1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/ObjCARCUtil.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/IntrinsicLowering.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineModuleInfo.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/WinEHFuncInfo.h"
43 #include "llvm/IR/CallingConv.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/DiagnosticInfo.h"
47 #include "llvm/IR/EHPersonalities.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalAlias.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/IRBuilder.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/MC/MCAsmInfo.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/MC/MCExpr.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/KnownBits.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Target/TargetOptions.h"
71 #define DEBUG_TYPE "x86-isel"
73 STATISTIC(NumTailCalls, "Number of tail calls");
75 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
76 "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
78 "Sets the preferable loop alignment for experiments (as log2 bytes) "
79 "for innermost loops only. If specified, this option overrides "
80 "alignment set by x86-experimental-pref-loop-alignment."),
83 static cl::opt<bool> MulConstantOptimization(
84 "mul-constant-optimization", cl::init(true),
85 cl::desc("Replace 'mul x, Const' with more effective instructions like "
89 static cl::opt<bool> ExperimentalUnorderedISEL(
90 "x86-experimental-unordered-atomic-isel", cl::init(false),
91 cl::desc("Use LoadSDNode and StoreSDNode instead of "
92 "AtomicSDNode for unordered atomic loads and "
93 "stores respectively."),
96 /// Call this when the user attempts to do something unsupported, like
97 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
98 /// report_fatal_error, so calling code should attempt to recover without
100 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
102 MachineFunction &MF = DAG.getMachineFunction();
103 DAG.getContext()->diagnose(
104 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
107 /// Returns true if a CC can dynamically exclude a register from the list of
108 /// callee-saved-registers (TargetRegistryInfo::getCalleeSavedRegs()) based on
109 /// the return registers.
110 static bool shouldDisableRetRegFromCSR(CallingConv::ID CC) {
114 case CallingConv::X86_RegCall:
115 case CallingConv::PreserveMost:
116 case CallingConv::PreserveAll:
121 /// Returns true if a CC can dynamically exclude a register from the list of
122 /// callee-saved-registers (TargetRegistryInfo::getCalleeSavedRegs()) based on
124 static bool shouldDisableArgRegFromCSR(CallingConv::ID CC) {
125 return CC == CallingConv::X86_RegCall;
128 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
129 const X86Subtarget &STI)
130 : TargetLowering(TM), Subtarget(STI) {
131 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
132 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
134 // Set up the TargetLowering object.
136 // X86 is weird. It always uses i8 for shift amounts and setcc results.
137 setBooleanContents(ZeroOrOneBooleanContent);
138 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
139 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
141 // For 64-bit, since we have so many registers, use the ILP scheduler.
142 // For 32-bit, use the register pressure specific scheduling.
143 // For Atom, always use ILP scheduling.
144 if (Subtarget.isAtom())
145 setSchedulingPreference(Sched::ILP);
146 else if (Subtarget.is64Bit())
147 setSchedulingPreference(Sched::ILP);
149 setSchedulingPreference(Sched::RegPressure);
150 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
151 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
153 // Bypass expensive divides and use cheaper ones.
154 if (TM.getOptLevel() >= CodeGenOpt::Default) {
155 if (Subtarget.hasSlowDivide32())
156 addBypassSlowDiv(32, 8);
157 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
158 addBypassSlowDiv(64, 32);
161 // Setup Windows compiler runtime calls.
162 if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
163 static const struct {
164 const RTLIB::Libcall Op;
165 const char * const Name;
166 const CallingConv::ID CC;
168 { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
169 { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
170 { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
171 { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
172 { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
175 for (const auto &LC : LibraryCalls) {
176 setLibcallName(LC.Op, LC.Name);
177 setLibcallCallingConv(LC.Op, LC.CC);
181 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
182 // MSVCRT doesn't have powi; fall back to pow
183 setLibcallName(RTLIB::POWI_F32, nullptr);
184 setLibcallName(RTLIB::POWI_F64, nullptr);
187 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
188 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
189 // FIXME: Should we be limiting the atomic size on other configs? Default is
191 if (!Subtarget.canUseCMPXCHG8B())
192 setMaxAtomicSizeInBitsSupported(32);
194 setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
196 setMaxLargeFPConvertBitWidthSupported(128);
198 // Set up the register classes.
199 addRegisterClass(MVT::i8, &X86::GR8RegClass);
200 addRegisterClass(MVT::i16, &X86::GR16RegClass);
201 addRegisterClass(MVT::i32, &X86::GR32RegClass);
202 if (Subtarget.is64Bit())
203 addRegisterClass(MVT::i64, &X86::GR64RegClass);
205 for (MVT VT : MVT::integer_valuetypes())
206 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
208 // We don't accept any truncstore of integer registers.
209 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
210 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
211 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
212 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
213 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
214 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
216 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
218 // SETOEQ and SETUNE require checking two conditions.
219 for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
220 setCondCodeAction(ISD::SETOEQ, VT, Expand);
221 setCondCodeAction(ISD::SETUNE, VT, Expand);
225 if (Subtarget.canUseCMOV()) {
226 setOperationAction(ISD::ABS , MVT::i16 , Custom);
227 setOperationAction(ISD::ABS , MVT::i32 , Custom);
228 if (Subtarget.is64Bit())
229 setOperationAction(ISD::ABS , MVT::i64 , Custom);
232 // Absolute difference.
233 for (auto Op : {ISD::ABDS, ISD::ABDU}) {
234 setOperationAction(Op , MVT::i8 , Custom);
235 setOperationAction(Op , MVT::i16 , Custom);
236 setOperationAction(Op , MVT::i32 , Custom);
237 if (Subtarget.is64Bit())
238 setOperationAction(Op , MVT::i64 , Custom);
241 // Signed saturation subtraction.
242 setOperationAction(ISD::SSUBSAT , MVT::i8 , Custom);
243 setOperationAction(ISD::SSUBSAT , MVT::i16 , Custom);
244 setOperationAction(ISD::SSUBSAT , MVT::i32 , Custom);
245 if (Subtarget.is64Bit())
246 setOperationAction(ISD::SSUBSAT , MVT::i64 , Custom);
249 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
250 // For slow shld targets we only lower for code size.
251 LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
253 setOperationAction(ShiftOp , MVT::i8 , Custom);
254 setOperationAction(ShiftOp , MVT::i16 , Custom);
255 setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction);
256 if (Subtarget.is64Bit())
257 setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction);
260 if (!Subtarget.useSoftFloat()) {
261 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
263 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
264 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
265 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
266 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
267 // We have an algorithm for SSE2, and we turn this into a 64-bit
268 // FILD or VCVTUSI2SS/SD for other targets.
269 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
270 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
271 // We have an algorithm for SSE2->double, and we turn this into a
272 // 64-bit FILD followed by conditional FADD for other targets.
273 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
274 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
276 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
278 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
279 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
280 // SSE has no i16 to fp conversion, only i32. We promote in the handler
281 // to allow f80 to use i16 and f64 to use i16 with sse1 only
282 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
283 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
284 // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
285 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
286 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
287 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
288 // are Legal, f80 is custom lowered.
289 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
290 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
292 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
294 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
295 // FIXME: This doesn't generate invalid exception when it should. PR44019.
296 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
297 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
298 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
299 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
300 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
301 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
302 // are Legal, f80 is custom lowered.
303 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
304 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
306 // Handle FP_TO_UINT by promoting the destination to a larger signed
308 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
309 // FIXME: This doesn't generate invalid exception when it should. PR44019.
310 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
311 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
312 // FIXME: This doesn't generate invalid exception when it should. PR44019.
313 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
314 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
315 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
316 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
317 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
319 setOperationAction(ISD::LRINT, MVT::f32, Custom);
320 setOperationAction(ISD::LRINT, MVT::f64, Custom);
321 setOperationAction(ISD::LLRINT, MVT::f32, Custom);
322 setOperationAction(ISD::LLRINT, MVT::f64, Custom);
324 if (!Subtarget.is64Bit()) {
325 setOperationAction(ISD::LRINT, MVT::i64, Custom);
326 setOperationAction(ISD::LLRINT, MVT::i64, Custom);
330 if (Subtarget.hasSSE2()) {
331 // Custom lowering for saturating float to int conversions.
332 // We handle promotion to larger result types manually.
333 for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
334 setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
335 setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
337 if (Subtarget.is64Bit()) {
338 setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
339 setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
343 // Handle address space casts between mixed sized pointers.
344 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
345 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
347 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
348 if (!Subtarget.hasSSE2()) {
349 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
350 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
351 if (Subtarget.is64Bit()) {
352 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
353 // Without SSE, i64->f64 goes through memory.
354 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
356 } else if (!Subtarget.is64Bit())
357 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
359 // Scalar integer divide and remainder are lowered to use operations that
360 // produce two results, to match the available instructions. This exposes
361 // the two-result form to trivial CSE, which is able to combine x/y and x%y
362 // into a single instruction.
364 // Scalar integer multiply-high is also lowered to use two-result
365 // operations, to match the available instructions. However, plain multiply
366 // (low) operations are left as Legal, as there are single-result
367 // instructions for this in x86. Using the two-result multiply instructions
368 // when both high and low results are needed must be arranged by dagcombine.
369 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
370 setOperationAction(ISD::MULHS, VT, Expand);
371 setOperationAction(ISD::MULHU, VT, Expand);
372 setOperationAction(ISD::SDIV, VT, Expand);
373 setOperationAction(ISD::UDIV, VT, Expand);
374 setOperationAction(ISD::SREM, VT, Expand);
375 setOperationAction(ISD::UREM, VT, Expand);
378 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
379 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
380 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
381 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
382 setOperationAction(ISD::BR_CC, VT, Expand);
383 setOperationAction(ISD::SELECT_CC, VT, Expand);
385 if (Subtarget.is64Bit())
386 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
387 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
388 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
389 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
391 setOperationAction(ISD::FREM , MVT::f32 , Expand);
392 setOperationAction(ISD::FREM , MVT::f64 , Expand);
393 setOperationAction(ISD::FREM , MVT::f80 , Expand);
394 setOperationAction(ISD::FREM , MVT::f128 , Expand);
396 if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
397 setOperationAction(ISD::GET_ROUNDING , MVT::i32 , Custom);
398 setOperationAction(ISD::SET_ROUNDING , MVT::Other, Custom);
399 setOperationAction(ISD::GET_FPENV_MEM , MVT::Other, Custom);
400 setOperationAction(ISD::SET_FPENV_MEM , MVT::Other, Custom);
401 setOperationAction(ISD::RESET_FPENV , MVT::Other, Custom);
404 // Promote the i8 variants and force them on up to i32 which has a shorter
406 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
407 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
408 // Promoted i16. tzcntw has a false dependency on Intel CPUs. For BSF, we emit
409 // a REP prefix to encode it as TZCNT for modern CPUs so it makes sense to
411 setOperationPromotedToType(ISD::CTTZ , MVT::i16 , MVT::i32);
412 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , MVT::i32);
414 if (!Subtarget.hasBMI()) {
415 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
416 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
417 if (Subtarget.is64Bit()) {
418 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
419 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
423 if (Subtarget.hasLZCNT()) {
424 // When promoting the i8 variants, force them to i32 for a shorter
426 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
427 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
429 for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
430 if (VT == MVT::i64 && !Subtarget.is64Bit())
432 setOperationAction(ISD::CTLZ , VT, Custom);
433 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
437 for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
438 ISD::STRICT_FP_TO_FP16}) {
439 // Special handling for half-precision floating point conversions.
440 // If we don't have F16C support, then lower half float conversions
441 // into library calls.
444 (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
445 // There's never any support for operations beyond MVT::f32.
446 setOperationAction(Op, MVT::f64, Expand);
447 setOperationAction(Op, MVT::f80, Expand);
448 setOperationAction(Op, MVT::f128, Expand);
451 for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
452 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
453 setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
454 setTruncStoreAction(VT, MVT::f16, Expand);
455 setTruncStoreAction(VT, MVT::bf16, Expand);
457 setOperationAction(ISD::BF16_TO_FP, VT, Expand);
458 setOperationAction(ISD::FP_TO_BF16, VT, Custom);
461 setOperationAction(ISD::PARITY, MVT::i8, Custom);
462 setOperationAction(ISD::PARITY, MVT::i16, Custom);
463 setOperationAction(ISD::PARITY, MVT::i32, Custom);
464 if (Subtarget.is64Bit())
465 setOperationAction(ISD::PARITY, MVT::i64, Custom);
466 if (Subtarget.hasPOPCNT()) {
467 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
468 // popcntw is longer to encode than popcntl and also has a false dependency
469 // on the dest that popcntl hasn't had since Cannon Lake.
470 setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
472 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
473 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
474 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
475 if (Subtarget.is64Bit())
476 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
478 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
481 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
483 if (!Subtarget.hasMOVBE())
484 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
486 // X86 wants to expand cmov itself.
487 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
488 setOperationAction(ISD::SELECT, VT, Custom);
489 setOperationAction(ISD::SETCC, VT, Custom);
490 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
491 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
493 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
494 if (VT == MVT::i64 && !Subtarget.is64Bit())
496 setOperationAction(ISD::SELECT, VT, Custom);
497 setOperationAction(ISD::SETCC, VT, Custom);
500 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
501 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
502 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
504 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
505 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
506 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
507 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
508 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
509 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
510 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
511 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
514 for (auto VT : { MVT::i32, MVT::i64 }) {
515 if (VT == MVT::i64 && !Subtarget.is64Bit())
517 setOperationAction(ISD::ConstantPool , VT, Custom);
518 setOperationAction(ISD::JumpTable , VT, Custom);
519 setOperationAction(ISD::GlobalAddress , VT, Custom);
520 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
521 setOperationAction(ISD::ExternalSymbol , VT, Custom);
522 setOperationAction(ISD::BlockAddress , VT, Custom);
525 // 64-bit shl, sra, srl (iff 32-bit x86)
526 for (auto VT : { MVT::i32, MVT::i64 }) {
527 if (VT == MVT::i64 && !Subtarget.is64Bit())
529 setOperationAction(ISD::SHL_PARTS, VT, Custom);
530 setOperationAction(ISD::SRA_PARTS, VT, Custom);
531 setOperationAction(ISD::SRL_PARTS, VT, Custom);
534 if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
535 setOperationAction(ISD::PREFETCH , MVT::Other, Custom);
537 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
539 // Expand certain atomics
540 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
541 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
542 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
543 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
544 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
545 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
546 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
547 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
550 if (!Subtarget.is64Bit())
551 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
553 if (Subtarget.canUseCMPXCHG16B())
554 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
556 // FIXME - use subtarget debug flags
557 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
558 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
559 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
560 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
563 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
564 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
566 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
567 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
569 setOperationAction(ISD::TRAP, MVT::Other, Legal);
570 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
571 if (Subtarget.isTargetPS())
572 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
574 setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
576 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
577 setOperationAction(ISD::VASTART , MVT::Other, Custom);
578 setOperationAction(ISD::VAEND , MVT::Other, Expand);
579 bool Is64Bit = Subtarget.is64Bit();
580 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
581 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
583 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
584 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
586 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
588 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
589 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
590 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
592 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
594 auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
595 setOperationAction(ISD::FABS, VT, Action);
596 setOperationAction(ISD::FNEG, VT, Action);
597 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
598 setOperationAction(ISD::FREM, VT, Action);
599 setOperationAction(ISD::FMA, VT, Action);
600 setOperationAction(ISD::FMINNUM, VT, Action);
601 setOperationAction(ISD::FMAXNUM, VT, Action);
602 setOperationAction(ISD::FMINIMUM, VT, Action);
603 setOperationAction(ISD::FMAXIMUM, VT, Action);
604 setOperationAction(ISD::FSIN, VT, Action);
605 setOperationAction(ISD::FCOS, VT, Action);
606 setOperationAction(ISD::FSINCOS, VT, Action);
607 setOperationAction(ISD::FSQRT, VT, Action);
608 setOperationAction(ISD::FPOW, VT, Action);
609 setOperationAction(ISD::FLOG, VT, Action);
610 setOperationAction(ISD::FLOG2, VT, Action);
611 setOperationAction(ISD::FLOG10, VT, Action);
612 setOperationAction(ISD::FEXP, VT, Action);
613 setOperationAction(ISD::FEXP2, VT, Action);
614 setOperationAction(ISD::FCEIL, VT, Action);
615 setOperationAction(ISD::FFLOOR, VT, Action);
616 setOperationAction(ISD::FNEARBYINT, VT, Action);
617 setOperationAction(ISD::FRINT, VT, Action);
618 setOperationAction(ISD::BR_CC, VT, Action);
619 setOperationAction(ISD::SETCC, VT, Action);
620 setOperationAction(ISD::SELECT, VT, Custom);
621 setOperationAction(ISD::SELECT_CC, VT, Action);
622 setOperationAction(ISD::FROUND, VT, Action);
623 setOperationAction(ISD::FROUNDEVEN, VT, Action);
624 setOperationAction(ISD::FTRUNC, VT, Action);
625 setOperationAction(ISD::FLDEXP, VT, Action);
628 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
629 // f16, f32 and f64 use SSE.
630 // Set up the FP register classes.
631 addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
632 : &X86::FR16RegClass);
633 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
634 : &X86::FR32RegClass);
635 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
636 : &X86::FR64RegClass);
638 // Disable f32->f64 extload as we can only generate this in one instruction
639 // under optsize. So its easier to pattern match (fpext (load)) for that
640 // case instead of needing to emit 2 instructions for extload in the
642 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
644 for (auto VT : { MVT::f32, MVT::f64 }) {
645 // Use ANDPD to simulate FABS.
646 setOperationAction(ISD::FABS, VT, Custom);
648 // Use XORP to simulate FNEG.
649 setOperationAction(ISD::FNEG, VT, Custom);
651 // Use ANDPD and ORPD to simulate FCOPYSIGN.
652 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
654 // These might be better off as horizontal vector ops.
655 setOperationAction(ISD::FADD, VT, Custom);
656 setOperationAction(ISD::FSUB, VT, Custom);
658 // We don't support sin/cos/fmod
659 setOperationAction(ISD::FSIN , VT, Expand);
660 setOperationAction(ISD::FCOS , VT, Expand);
661 setOperationAction(ISD::FSINCOS, VT, Expand);
664 // Half type will be promoted by default.
665 setF16Action(MVT::f16, Promote);
666 setOperationAction(ISD::FADD, MVT::f16, Promote);
667 setOperationAction(ISD::FSUB, MVT::f16, Promote);
668 setOperationAction(ISD::FMUL, MVT::f16, Promote);
669 setOperationAction(ISD::FDIV, MVT::f16, Promote);
670 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
671 setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
672 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
674 setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
675 setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
676 setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
677 setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
678 setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
679 setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
680 setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
681 setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
682 setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
683 setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
684 setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
685 setOperationAction(ISD::STRICT_FLDEXP, MVT::f16, Promote);
686 setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
687 setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
688 setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
689 setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
690 setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
691 setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
692 setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
693 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
694 setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
695 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
696 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
697 setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
698 setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
699 setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
700 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
701 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
702 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
704 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
705 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
707 // Lower this to MOVMSK plus an AND.
708 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
709 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
711 } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
712 (UseX87 || Is64Bit)) {
713 // Use SSE for f32, x87 for f64.
714 // Set up the FP register classes.
715 addRegisterClass(MVT::f32, &X86::FR32RegClass);
717 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
719 // Use ANDPS to simulate FABS.
720 setOperationAction(ISD::FABS , MVT::f32, Custom);
722 // Use XORP to simulate FNEG.
723 setOperationAction(ISD::FNEG , MVT::f32, Custom);
726 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
728 // Use ANDPS and ORPS to simulate FCOPYSIGN.
730 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
731 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
733 // We don't support sin/cos/fmod
734 setOperationAction(ISD::FSIN , MVT::f32, Expand);
735 setOperationAction(ISD::FCOS , MVT::f32, Expand);
736 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
739 // Always expand sin/cos functions even though x87 has an instruction.
740 setOperationAction(ISD::FSIN, MVT::f64, Expand);
741 setOperationAction(ISD::FCOS, MVT::f64, Expand);
742 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
745 // f32 and f64 in x87.
746 // Set up the FP register classes.
747 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
748 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
750 for (auto VT : { MVT::f32, MVT::f64 }) {
751 setOperationAction(ISD::UNDEF, VT, Expand);
752 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
754 // Always expand sin/cos functions even though x87 has an instruction.
755 setOperationAction(ISD::FSIN , VT, Expand);
756 setOperationAction(ISD::FCOS , VT, Expand);
757 setOperationAction(ISD::FSINCOS, VT, Expand);
761 // Expand FP32 immediates into loads from the stack, save special cases.
762 if (isTypeLegal(MVT::f32)) {
763 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
764 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
765 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
766 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
767 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
768 } else // SSE immediates.
769 addLegalFPImmediate(APFloat(+0.0f)); // xorps
771 // Expand FP64 immediates into loads from the stack, save special cases.
772 if (isTypeLegal(MVT::f64)) {
773 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
774 addLegalFPImmediate(APFloat(+0.0)); // FLD0
775 addLegalFPImmediate(APFloat(+1.0)); // FLD1
776 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
777 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
778 } else // SSE immediates.
779 addLegalFPImmediate(APFloat(+0.0)); // xorpd
781 // Support fp16 0 immediate.
782 if (isTypeLegal(MVT::f16))
783 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
785 // Handle constrained floating-point operations of scalar.
786 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
787 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
788 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
789 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
790 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
791 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
792 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
793 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
794 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
795 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
796 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
797 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
799 // We don't support FMA.
800 setOperationAction(ISD::FMA, MVT::f64, Expand);
801 setOperationAction(ISD::FMA, MVT::f32, Expand);
803 // f80 always uses X87.
805 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
806 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
807 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
809 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
810 addLegalFPImmediate(TmpFlt); // FLD0
812 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
815 APFloat TmpFlt2(+1.0);
816 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
818 addLegalFPImmediate(TmpFlt2); // FLD1
819 TmpFlt2.changeSign();
820 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
823 // Always expand sin/cos functions even though x87 has an instruction.
824 setOperationAction(ISD::FSIN , MVT::f80, Expand);
825 setOperationAction(ISD::FCOS , MVT::f80, Expand);
826 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
828 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
829 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
830 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
831 setOperationAction(ISD::FRINT, MVT::f80, Expand);
832 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
833 setOperationAction(ISD::FMA, MVT::f80, Expand);
834 setOperationAction(ISD::LROUND, MVT::f80, Expand);
835 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
836 setOperationAction(ISD::LRINT, MVT::f80, Custom);
837 setOperationAction(ISD::LLRINT, MVT::f80, Custom);
839 // Handle constrained floating-point operations of scalar.
840 setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
841 setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal);
842 setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal);
843 setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal);
844 setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal);
845 if (isTypeLegal(MVT::f16)) {
846 setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
847 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
849 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
851 // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
853 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
856 // f128 uses xmm registers, but most operations require libcalls.
857 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
858 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
859 : &X86::VR128RegClass);
861 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
863 setOperationAction(ISD::FADD, MVT::f128, LibCall);
864 setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
865 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
866 setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
867 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
868 setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
869 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
870 setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
871 setOperationAction(ISD::FMA, MVT::f128, LibCall);
872 setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall);
874 setOperationAction(ISD::FABS, MVT::f128, Custom);
875 setOperationAction(ISD::FNEG, MVT::f128, Custom);
876 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
878 setOperationAction(ISD::FSIN, MVT::f128, LibCall);
879 setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall);
880 setOperationAction(ISD::FCOS, MVT::f128, LibCall);
881 setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall);
882 setOperationAction(ISD::FSINCOS, MVT::f128, LibCall);
884 setOperationAction(ISD::FSQRT, MVT::f128, LibCall);
885 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
887 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
888 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
889 // We need to custom handle any FP_ROUND with an f128 input, but
890 // LegalizeDAG uses the result type to know when to run a custom handler.
891 // So we have to list all legal floating point result types here.
892 if (isTypeLegal(MVT::f32)) {
893 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
894 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
896 if (isTypeLegal(MVT::f64)) {
897 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
898 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
900 if (isTypeLegal(MVT::f80)) {
901 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
902 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
905 setOperationAction(ISD::SETCC, MVT::f128, Custom);
907 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
908 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
909 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
910 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
911 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
912 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
915 // Always use a library call for pow.
916 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
917 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
918 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
919 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
921 setOperationAction(ISD::FLOG, MVT::f80, Expand);
922 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
923 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
924 setOperationAction(ISD::FEXP, MVT::f80, Expand);
925 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
926 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
927 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
929 // Some FP actions are always expanded for vector types.
930 for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
931 MVT::v4f32, MVT::v8f32, MVT::v16f32,
932 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
933 setOperationAction(ISD::FSIN, VT, Expand);
934 setOperationAction(ISD::FSINCOS, VT, Expand);
935 setOperationAction(ISD::FCOS, VT, Expand);
936 setOperationAction(ISD::FREM, VT, Expand);
937 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
938 setOperationAction(ISD::FPOW, VT, Expand);
939 setOperationAction(ISD::FLOG, VT, Expand);
940 setOperationAction(ISD::FLOG2, VT, Expand);
941 setOperationAction(ISD::FLOG10, VT, Expand);
942 setOperationAction(ISD::FEXP, VT, Expand);
943 setOperationAction(ISD::FEXP2, VT, Expand);
946 // First set operation action for all vector types to either promote
947 // (for widening) or expand (for scalarization). Then we will selectively
948 // turn on ones that can be effectively codegen'd.
949 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
950 setOperationAction(ISD::SDIV, VT, Expand);
951 setOperationAction(ISD::UDIV, VT, Expand);
952 setOperationAction(ISD::SREM, VT, Expand);
953 setOperationAction(ISD::UREM, VT, Expand);
954 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
955 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
956 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
957 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
958 setOperationAction(ISD::FMA, VT, Expand);
959 setOperationAction(ISD::FFLOOR, VT, Expand);
960 setOperationAction(ISD::FCEIL, VT, Expand);
961 setOperationAction(ISD::FTRUNC, VT, Expand);
962 setOperationAction(ISD::FRINT, VT, Expand);
963 setOperationAction(ISD::FNEARBYINT, VT, Expand);
964 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
965 setOperationAction(ISD::MULHS, VT, Expand);
966 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
967 setOperationAction(ISD::MULHU, VT, Expand);
968 setOperationAction(ISD::SDIVREM, VT, Expand);
969 setOperationAction(ISD::UDIVREM, VT, Expand);
970 setOperationAction(ISD::CTPOP, VT, Expand);
971 setOperationAction(ISD::CTTZ, VT, Expand);
972 setOperationAction(ISD::CTLZ, VT, Expand);
973 setOperationAction(ISD::ROTL, VT, Expand);
974 setOperationAction(ISD::ROTR, VT, Expand);
975 setOperationAction(ISD::BSWAP, VT, Expand);
976 setOperationAction(ISD::SETCC, VT, Expand);
977 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
978 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
979 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
980 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
981 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
982 setOperationAction(ISD::TRUNCATE, VT, Expand);
983 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
984 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
985 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
986 setOperationAction(ISD::SELECT_CC, VT, Expand);
987 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
988 setTruncStoreAction(InnerVT, VT, Expand);
990 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
991 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
993 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
994 // types, we have to deal with them whether we ask for Expansion or not.
995 // Setting Expand causes its own optimisation problems though, so leave
997 if (VT.getVectorElementType() == MVT::i1)
998 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
1000 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
1001 // split/scalarized right now.
1002 if (VT.getVectorElementType() == MVT::f16 ||
1003 VT.getVectorElementType() == MVT::bf16)
1004 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
1008 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
1009 // with -msoft-float, disable use of MMX as well.
1010 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
1011 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
1012 // No operations on x86mmx supported, everything uses intrinsics.
1015 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
1016 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1017 : &X86::VR128RegClass);
1019 setOperationAction(ISD::FMAXIMUM, MVT::f32, Custom);
1020 setOperationAction(ISD::FMINIMUM, MVT::f32, Custom);
1022 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
1023 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
1024 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
1025 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1026 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
1027 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1028 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1029 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
1031 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
1032 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
1034 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1035 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1036 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1037 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1038 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1041 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1042 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1043 : &X86::VR128RegClass);
1045 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1046 // registers cannot be used even for integer operations.
1047 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1048 : &X86::VR128RegClass);
1049 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1050 : &X86::VR128RegClass);
1051 addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1052 : &X86::VR128RegClass);
1053 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1054 : &X86::VR128RegClass);
1055 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1056 : &X86::VR128RegClass);
1058 for (auto VT : { MVT::f64, MVT::v4f32, MVT::v2f64 }) {
1059 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1060 setOperationAction(ISD::FMINIMUM, VT, Custom);
1063 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1064 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1065 setOperationAction(ISD::SDIV, VT, Custom);
1066 setOperationAction(ISD::SREM, VT, Custom);
1067 setOperationAction(ISD::UDIV, VT, Custom);
1068 setOperationAction(ISD::UREM, VT, Custom);
1071 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
1072 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
1073 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
1075 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
1076 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
1077 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
1078 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
1079 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
1080 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
1081 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
1082 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
1083 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
1084 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
1085 setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal);
1086 setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal);
1088 setOperationAction(ISD::SMULO, MVT::v16i8, Custom);
1089 setOperationAction(ISD::UMULO, MVT::v16i8, Custom);
1090 setOperationAction(ISD::UMULO, MVT::v2i32, Custom);
1092 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
1093 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
1094 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
1096 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1097 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1098 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1099 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1100 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1103 setOperationAction(ISD::ABDU, MVT::v16i8, Custom);
1104 setOperationAction(ISD::ABDS, MVT::v16i8, Custom);
1105 setOperationAction(ISD::ABDU, MVT::v8i16, Custom);
1106 setOperationAction(ISD::ABDS, MVT::v8i16, Custom);
1107 setOperationAction(ISD::ABDU, MVT::v4i32, Custom);
1108 setOperationAction(ISD::ABDS, MVT::v4i32, Custom);
1110 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
1111 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
1112 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
1113 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
1114 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
1115 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
1116 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
1117 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
1118 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
1119 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
1121 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1122 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1123 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1124 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1126 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1127 setOperationAction(ISD::SETCC, VT, Custom);
1128 setOperationAction(ISD::CTPOP, VT, Custom);
1129 setOperationAction(ISD::ABS, VT, Custom);
1131 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1132 // setcc all the way to isel and prefer SETGT in some isel patterns.
1133 setCondCodeAction(ISD::SETLT, VT, Custom);
1134 setCondCodeAction(ISD::SETLE, VT, Custom);
1137 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
1138 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
1139 setOperationAction(ISD::STRICT_FSETCC, MVT::v2f64, Custom);
1140 setOperationAction(ISD::STRICT_FSETCC, MVT::v4f32, Custom);
1141 setOperationAction(ISD::STRICT_FSETCCS, MVT::v2f64, Custom);
1142 setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f32, Custom);
1144 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1145 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1146 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1147 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1148 setOperationAction(ISD::VSELECT, VT, Custom);
1149 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1152 for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1153 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1154 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1155 setOperationAction(ISD::VSELECT, VT, Custom);
1157 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1160 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1161 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1163 setF16Action(MVT::v8f16, Expand);
1164 setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1165 setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1166 setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1167 setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1169 // Custom lower v2i64 and v2f64 selects.
1170 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1171 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1172 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
1174 setOperationAction(ISD::SELECT, MVT::v8f16, Custom);
1175 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
1177 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Custom);
1178 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Custom);
1179 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
1180 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1181 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Custom);
1182 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom);
1184 // Custom legalize these to avoid over promotion or custom promotion.
1185 for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1186 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1187 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1188 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1189 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1192 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom);
1193 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Custom);
1194 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
1195 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom);
1197 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
1198 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom);
1200 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
1201 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom);
1203 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1204 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1205 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom);
1206 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1207 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom);
1209 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1210 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom);
1211 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1212 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom);
1214 // We want to legalize this to an f64 load rather than an i64 load on
1215 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1217 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
1218 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1219 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
1220 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
1221 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
1222 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
1224 // Add 32-bit vector stores to help vectorization opportunities.
1225 setOperationAction(ISD::STORE, MVT::v2i16, Custom);
1226 setOperationAction(ISD::STORE, MVT::v4i8, Custom);
1228 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1229 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1230 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1231 if (!Subtarget.hasAVX512())
1232 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1234 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1235 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1236 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1238 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1240 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
1241 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
1242 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1243 setOperationAction(ISD::TRUNCATE, MVT::v2i64, Custom);
1244 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1245 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1246 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1247 setOperationAction(ISD::TRUNCATE, MVT::v4i64, Custom);
1248 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1249 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1250 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1251 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1252 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1253 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1254 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1255 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1257 // In the customized shift lowering, the legal v4i32/v2i64 cases
1258 // in AVX2 will be recognized.
1259 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1260 setOperationAction(ISD::SRL, VT, Custom);
1261 setOperationAction(ISD::SHL, VT, Custom);
1262 setOperationAction(ISD::SRA, VT, Custom);
1263 if (VT == MVT::v2i64) continue;
1264 setOperationAction(ISD::ROTL, VT, Custom);
1265 setOperationAction(ISD::ROTR, VT, Custom);
1266 setOperationAction(ISD::FSHL, VT, Custom);
1267 setOperationAction(ISD::FSHR, VT, Custom);
1270 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1271 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1272 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1273 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1274 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1277 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1278 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1279 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1280 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1281 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1282 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1283 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1284 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1285 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1287 // These might be better off as horizontal vector ops.
1288 setOperationAction(ISD::ADD, MVT::i16, Custom);
1289 setOperationAction(ISD::ADD, MVT::i32, Custom);
1290 setOperationAction(ISD::SUB, MVT::i16, Custom);
1291 setOperationAction(ISD::SUB, MVT::i32, Custom);
1294 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1295 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1296 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1297 setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal);
1298 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1299 setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal);
1300 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1301 setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal);
1302 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1303 setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
1304 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1305 setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
1306 setOperationAction(ISD::FROUNDEVEN, RoundedTy, Legal);
1307 setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy, Legal);
1309 setOperationAction(ISD::FROUND, RoundedTy, Custom);
1312 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1313 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1314 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1315 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1316 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1317 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1318 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1319 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1321 for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
1322 setOperationAction(ISD::ABDS, VT, Custom);
1323 setOperationAction(ISD::ABDU, VT, Custom);
1326 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
1327 setOperationAction(ISD::SADDSAT, MVT::v2i64, Custom);
1328 setOperationAction(ISD::SSUBSAT, MVT::v2i64, Custom);
1330 // FIXME: Do we need to handle scalar-to-vector here?
1331 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1332 setOperationAction(ISD::SMULO, MVT::v2i32, Custom);
1334 // We directly match byte blends in the backend as they match the VSELECT
1336 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1338 // SSE41 brings specific instructions for doing vector sign extend even in
1339 // cases where we don't have SRA.
1340 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1341 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1342 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1345 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1346 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1347 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1348 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1349 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1350 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1351 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1352 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1355 if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1356 // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1357 // do the pre and post work in the vector domain.
1358 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom);
1359 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1360 // We need to mark SINT_TO_FP as Custom even though we want to expand it
1361 // so that DAG combine doesn't try to turn it into uint_to_fp.
1362 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom);
1363 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1367 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1368 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
1371 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1372 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1373 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1374 setOperationAction(ISD::ROTL, VT, Custom);
1375 setOperationAction(ISD::ROTR, VT, Custom);
1378 // XOP can efficiently perform BITREVERSE with VPPERM.
1379 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1380 setOperationAction(ISD::BITREVERSE, VT, Custom);
1382 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1383 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1384 setOperationAction(ISD::BITREVERSE, VT, Custom);
1387 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1388 bool HasInt256 = Subtarget.hasInt256();
1390 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1391 : &X86::VR256RegClass);
1392 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1393 : &X86::VR256RegClass);
1394 addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1395 : &X86::VR256RegClass);
1396 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1397 : &X86::VR256RegClass);
1398 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1399 : &X86::VR256RegClass);
1400 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1401 : &X86::VR256RegClass);
1402 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1403 : &X86::VR256RegClass);
1405 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1406 setOperationAction(ISD::FFLOOR, VT, Legal);
1407 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1408 setOperationAction(ISD::FCEIL, VT, Legal);
1409 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1410 setOperationAction(ISD::FTRUNC, VT, Legal);
1411 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1412 setOperationAction(ISD::FRINT, VT, Legal);
1413 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1414 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1415 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1416 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
1417 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1419 setOperationAction(ISD::FROUND, VT, Custom);
1421 setOperationAction(ISD::FNEG, VT, Custom);
1422 setOperationAction(ISD::FABS, VT, Custom);
1423 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1425 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1426 setOperationAction(ISD::FMINIMUM, VT, Custom);
1429 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1430 // even though v8i16 is a legal type.
1431 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1432 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1433 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1434 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1435 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Custom);
1436 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Custom);
1437 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i32, Custom);
1439 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Custom);
1440 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Custom);
1441 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Expand);
1442 setOperationAction(ISD::FP_ROUND, MVT::v8f16, Expand);
1443 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Custom);
1444 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Custom);
1446 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal);
1447 setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal);
1448 setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal);
1449 setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal);
1450 setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal);
1451 setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal);
1452 setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal);
1453 setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal);
1454 setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal);
1455 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal);
1456 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal);
1458 if (!Subtarget.hasAVX512())
1459 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1461 // In the customized shift lowering, the legal v8i32/v4i64 cases
1462 // in AVX2 will be recognized.
1463 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1464 setOperationAction(ISD::SRL, VT, Custom);
1465 setOperationAction(ISD::SHL, VT, Custom);
1466 setOperationAction(ISD::SRA, VT, Custom);
1467 setOperationAction(ISD::ABDS, VT, Custom);
1468 setOperationAction(ISD::ABDU, VT, Custom);
1469 if (VT == MVT::v4i64) continue;
1470 setOperationAction(ISD::ROTL, VT, Custom);
1471 setOperationAction(ISD::ROTR, VT, Custom);
1472 setOperationAction(ISD::FSHL, VT, Custom);
1473 setOperationAction(ISD::FSHR, VT, Custom);
1476 // These types need custom splitting if their input is a 128-bit vector.
1477 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1478 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1479 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1480 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1482 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1483 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1484 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1485 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1486 setOperationAction(ISD::SELECT, MVT::v16f16, Custom);
1487 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1488 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1490 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1491 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1492 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1493 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1496 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1497 setOperationAction(ISD::TRUNCATE, MVT::v32i16, Custom);
1498 setOperationAction(ISD::TRUNCATE, MVT::v32i32, Custom);
1499 setOperationAction(ISD::TRUNCATE, MVT::v32i64, Custom);
1501 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1503 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1504 setOperationAction(ISD::SETCC, VT, Custom);
1505 setOperationAction(ISD::CTPOP, VT, Custom);
1506 setOperationAction(ISD::CTLZ, VT, Custom);
1508 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1509 // setcc all the way to isel and prefer SETGT in some isel patterns.
1510 setCondCodeAction(ISD::SETLT, VT, Custom);
1511 setCondCodeAction(ISD::SETLE, VT, Custom);
1514 setOperationAction(ISD::SETCC, MVT::v4f64, Custom);
1515 setOperationAction(ISD::SETCC, MVT::v8f32, Custom);
1516 setOperationAction(ISD::STRICT_FSETCC, MVT::v4f64, Custom);
1517 setOperationAction(ISD::STRICT_FSETCC, MVT::v8f32, Custom);
1518 setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f64, Custom);
1519 setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f32, Custom);
1521 if (Subtarget.hasAnyFMA()) {
1522 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1523 MVT::v2f64, MVT::v4f64 }) {
1524 setOperationAction(ISD::FMA, VT, Legal);
1525 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1529 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1530 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1531 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1534 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1535 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1536 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1537 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1539 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1540 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1541 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1542 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1543 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1544 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1545 setOperationAction(ISD::AVGCEILU, MVT::v16i16, HasInt256 ? Legal : Custom);
1546 setOperationAction(ISD::AVGCEILU, MVT::v32i8, HasInt256 ? Legal : Custom);
1548 setOperationAction(ISD::SMULO, MVT::v32i8, Custom);
1549 setOperationAction(ISD::UMULO, MVT::v32i8, Custom);
1551 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1552 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1553 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1554 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1555 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1557 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1558 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1559 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1560 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1561 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1562 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1563 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1564 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1565 setOperationAction(ISD::UADDSAT, MVT::v8i32, Custom);
1566 setOperationAction(ISD::USUBSAT, MVT::v8i32, Custom);
1567 setOperationAction(ISD::UADDSAT, MVT::v4i64, Custom);
1568 setOperationAction(ISD::USUBSAT, MVT::v4i64, Custom);
1570 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1571 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1572 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1573 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1574 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1575 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1578 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1579 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1580 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1584 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1585 // when we have a 256bit-wide blend with immediate.
1586 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1587 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1589 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1590 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1591 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1592 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1593 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1594 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1595 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1596 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1600 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1601 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1602 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1603 setOperationAction(ISD::MSTORE, VT, Legal);
1606 // Extract subvector is special because the value type
1607 // (result) is 128-bit but the source is 256-bit wide.
1608 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1609 MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1610 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1613 // Custom lower several nodes for 256-bit types.
1614 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1615 MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1616 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1617 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1618 setOperationAction(ISD::VSELECT, VT, Custom);
1619 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1620 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1621 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1622 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1623 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1624 setOperationAction(ISD::STORE, VT, Custom);
1626 setF16Action(MVT::v16f16, Expand);
1627 setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1628 setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1629 setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1630 setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1633 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1635 // Custom legalize 2x32 to get a little better code.
1636 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1637 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1639 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1640 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1641 setOperationAction(ISD::MGATHER, VT, Custom);
1645 if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1646 Subtarget.hasF16C()) {
1647 for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1648 setOperationAction(ISD::FP_ROUND, VT, Custom);
1649 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1651 for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32 }) {
1652 setOperationAction(ISD::FP_EXTEND, VT, Custom);
1653 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
1655 for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1656 setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1657 setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1660 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1661 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
1664 // This block controls legalization of the mask vector sizes that are
1665 // available with AVX512. 512-bit vectors are in a separate block controlled
1666 // by useAVX512Regs.
1667 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1668 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1669 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1670 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1671 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1672 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1674 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1675 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1676 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1678 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1679 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1680 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1681 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1682 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1683 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1684 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1685 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1686 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1687 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1688 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
1689 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
1691 // There is no byte sized k-register load or store without AVX512DQ.
1692 if (!Subtarget.hasDQI()) {
1693 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1694 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1695 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1696 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1698 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1699 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1700 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1701 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1704 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1705 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1706 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1707 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1708 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1711 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1712 setOperationAction(ISD::VSELECT, VT, Expand);
1714 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1715 setOperationAction(ISD::SETCC, VT, Custom);
1716 setOperationAction(ISD::SELECT, VT, Custom);
1717 setOperationAction(ISD::TRUNCATE, VT, Custom);
1719 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1720 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1721 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1722 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1723 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1724 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1727 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1728 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1731 // This block controls legalization for 512-bit operations with 8/16/32/64 bit
1732 // elements. 512-bits can be disabled based on prefer-vector-width and
1733 // required-vector-width function attributes.
1734 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1735 bool HasBWI = Subtarget.hasBWI();
1737 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1738 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1739 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1740 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1741 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1742 addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1743 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1745 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1746 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1747 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1748 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1749 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1750 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1752 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1755 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1756 setOperationAction(ISD::FMAXIMUM, VT, Custom);
1757 setOperationAction(ISD::FMINIMUM, VT, Custom);
1758 setOperationAction(ISD::FNEG, VT, Custom);
1759 setOperationAction(ISD::FABS, VT, Custom);
1760 setOperationAction(ISD::FMA, VT, Legal);
1761 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1762 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1765 for (MVT VT : { MVT::v16i1, MVT::v16i8 }) {
1766 setOperationPromotedToType(ISD::FP_TO_SINT , VT, MVT::v16i32);
1767 setOperationPromotedToType(ISD::FP_TO_UINT , VT, MVT::v16i32);
1768 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1769 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1772 for (MVT VT : { MVT::v16i16, MVT::v16i32 }) {
1773 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
1774 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
1775 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1776 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1779 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Custom);
1780 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Custom);
1781 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1782 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1783 setOperationAction(ISD::FP_EXTEND, MVT::v8f64, Custom);
1784 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Custom);
1786 setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal);
1787 setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal);
1788 setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal);
1789 setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal);
1790 setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal);
1791 setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal);
1792 setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal);
1793 setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal);
1794 setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal);
1795 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal);
1796 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal);
1798 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1799 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1800 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1801 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1802 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1804 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1806 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1807 // to 512-bit rather than use the AVX2 instructions so that we can use
1809 if (!Subtarget.hasVLX()) {
1810 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1811 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1812 setOperationAction(ISD::MLOAD, VT, Custom);
1813 setOperationAction(ISD::MSTORE, VT, Custom);
1817 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal);
1818 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal);
1819 setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom);
1820 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1821 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1822 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1823 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1824 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1825 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1826 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1827 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1828 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1831 // Extends from v64i1 masks to 512-bit vectors.
1832 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1833 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1834 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1837 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1838 setOperationAction(ISD::FFLOOR, VT, Legal);
1839 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1840 setOperationAction(ISD::FCEIL, VT, Legal);
1841 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1842 setOperationAction(ISD::FTRUNC, VT, Legal);
1843 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1844 setOperationAction(ISD::FRINT, VT, Legal);
1845 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1846 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1847 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1848 setOperationAction(ISD::FROUNDEVEN, VT, Legal);
1849 setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1851 setOperationAction(ISD::FROUND, VT, Custom);
1854 for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1855 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1856 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1859 setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1860 setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1861 setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom);
1862 setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom);
1864 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1865 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1866 setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1867 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1869 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1870 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1871 setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1872 setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1873 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1874 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1875 setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1876 setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom);
1878 setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1879 setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1881 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1883 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1884 setOperationAction(ISD::SRL, VT, Custom);
1885 setOperationAction(ISD::SHL, VT, Custom);
1886 setOperationAction(ISD::SRA, VT, Custom);
1887 setOperationAction(ISD::ROTL, VT, Custom);
1888 setOperationAction(ISD::ROTR, VT, Custom);
1889 setOperationAction(ISD::SETCC, VT, Custom);
1890 setOperationAction(ISD::ABDS, VT, Custom);
1891 setOperationAction(ISD::ABDU, VT, Custom);
1893 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1894 // setcc all the way to isel and prefer SETGT in some isel patterns.
1895 setCondCodeAction(ISD::SETLT, VT, Custom);
1896 setCondCodeAction(ISD::SETLE, VT, Custom);
1899 setOperationAction(ISD::SETCC, MVT::v8f64, Custom);
1900 setOperationAction(ISD::SETCC, MVT::v16f32, Custom);
1901 setOperationAction(ISD::STRICT_FSETCC, MVT::v8f64, Custom);
1902 setOperationAction(ISD::STRICT_FSETCC, MVT::v16f32, Custom);
1903 setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f64, Custom);
1904 setOperationAction(ISD::STRICT_FSETCCS, MVT::v16f32, Custom);
1906 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1907 setOperationAction(ISD::SMAX, VT, Legal);
1908 setOperationAction(ISD::UMAX, VT, Legal);
1909 setOperationAction(ISD::SMIN, VT, Legal);
1910 setOperationAction(ISD::UMIN, VT, Legal);
1911 setOperationAction(ISD::ABS, VT, Legal);
1912 setOperationAction(ISD::CTPOP, VT, Custom);
1915 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1916 setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom);
1917 setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom);
1918 setOperationAction(ISD::CTLZ, VT, Custom);
1919 setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom);
1920 setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom);
1921 setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom);
1922 setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom);
1923 setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1924 setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1925 setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1926 setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1929 setOperationAction(ISD::FSHL, MVT::v64i8, Custom);
1930 setOperationAction(ISD::FSHR, MVT::v64i8, Custom);
1931 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1932 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1933 setOperationAction(ISD::FSHL, MVT::v16i32, Custom);
1934 setOperationAction(ISD::FSHR, MVT::v16i32, Custom);
1936 if (Subtarget.hasDQI()) {
1937 for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1938 ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1939 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1940 setOperationAction(Opc, MVT::v8i64, Custom);
1941 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1944 if (Subtarget.hasCDI()) {
1945 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1946 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1947 setOperationAction(ISD::CTLZ, VT, Legal);
1949 } // Subtarget.hasCDI()
1951 if (Subtarget.hasVPOPCNTDQ()) {
1952 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1953 setOperationAction(ISD::CTPOP, VT, Legal);
1956 // Extract subvector is special because the value type
1957 // (result) is 256-bit but the source is 512-bit wide.
1958 // 128-bit was made Legal under AVX1.
1959 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1960 MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1961 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1963 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1964 MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1965 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1966 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1967 setOperationAction(ISD::SELECT, VT, Custom);
1968 setOperationAction(ISD::VSELECT, VT, Custom);
1969 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1970 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1971 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1972 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1973 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1975 setF16Action(MVT::v32f16, Expand);
1976 setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1977 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1978 setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Legal);
1979 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
1980 for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1981 setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1982 setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1985 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1986 setOperationAction(ISD::MLOAD, VT, Legal);
1987 setOperationAction(ISD::MSTORE, VT, Legal);
1988 setOperationAction(ISD::MGATHER, VT, Custom);
1989 setOperationAction(ISD::MSCATTER, VT, Custom);
1992 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1993 setOperationAction(ISD::MLOAD, VT, Legal);
1994 setOperationAction(ISD::MSTORE, VT, Legal);
1997 setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1998 setOperationAction(ISD::STORE, MVT::v64i8, Custom);
2001 if (Subtarget.hasVBMI2()) {
2002 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
2003 MVT::v16i16, MVT::v8i32, MVT::v4i64,
2004 MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
2005 setOperationAction(ISD::FSHL, VT, Custom);
2006 setOperationAction(ISD::FSHR, VT, Custom);
2009 setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
2010 setOperationAction(ISD::ROTR, MVT::v8i16, Custom);
2011 setOperationAction(ISD::ROTR, MVT::v16i16, Custom);
2012 setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
2016 // This block controls legalization for operations that don't have
2017 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
2019 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
2020 // These operations are handled on non-VLX by artificially widening in
2023 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32, Custom);
2024 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Custom);
2025 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom);
2027 if (Subtarget.hasDQI()) {
2028 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
2029 // v2f32 UINT_TO_FP is already custom under SSE2.
2030 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
2031 isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
2032 "Unexpected operation action!");
2033 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
2034 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
2035 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
2036 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
2037 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
2040 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
2041 setOperationAction(ISD::SMAX, VT, Legal);
2042 setOperationAction(ISD::UMAX, VT, Legal);
2043 setOperationAction(ISD::SMIN, VT, Legal);
2044 setOperationAction(ISD::UMIN, VT, Legal);
2045 setOperationAction(ISD::ABS, VT, Legal);
2048 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2049 setOperationAction(ISD::ROTL, VT, Custom);
2050 setOperationAction(ISD::ROTR, VT, Custom);
2053 // Custom legalize 2x32 to get a little better code.
2054 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
2055 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
2057 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
2058 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
2059 setOperationAction(ISD::MSCATTER, VT, Custom);
2061 if (Subtarget.hasDQI()) {
2062 for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
2063 ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
2064 ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
2065 setOperationAction(Opc, MVT::v2i64, Custom);
2066 setOperationAction(Opc, MVT::v4i64, Custom);
2068 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
2069 setOperationAction(ISD::MUL, MVT::v4i64, Legal);
2072 if (Subtarget.hasCDI()) {
2073 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2074 setOperationAction(ISD::CTLZ, VT, Legal);
2076 } // Subtarget.hasCDI()
2078 if (Subtarget.hasVPOPCNTDQ()) {
2079 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
2080 setOperationAction(ISD::CTPOP, VT, Legal);
2084 // This block control legalization of v32i1/v64i1 which are available with
2086 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
2087 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
2088 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
2090 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
2091 setOperationAction(ISD::VSELECT, VT, Expand);
2092 setOperationAction(ISD::TRUNCATE, VT, Custom);
2093 setOperationAction(ISD::SETCC, VT, Custom);
2094 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2095 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
2096 setOperationAction(ISD::SELECT, VT, Custom);
2097 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2098 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2099 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
2100 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
2103 for (auto VT : { MVT::v16i1, MVT::v32i1 })
2104 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2106 // Extends from v32i1 masks to 256-bit vectors.
2107 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
2108 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
2109 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
2111 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2112 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
2113 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2116 // These operations are handled on non-VLX by artificially widening in
2118 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2120 if (Subtarget.hasBITALG()) {
2121 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2122 setOperationAction(ISD::CTPOP, VT, Legal);
2126 if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2127 auto setGroup = [&] (MVT VT) {
2128 setOperationAction(ISD::FADD, VT, Legal);
2129 setOperationAction(ISD::STRICT_FADD, VT, Legal);
2130 setOperationAction(ISD::FSUB, VT, Legal);
2131 setOperationAction(ISD::STRICT_FSUB, VT, Legal);
2132 setOperationAction(ISD::FMUL, VT, Legal);
2133 setOperationAction(ISD::STRICT_FMUL, VT, Legal);
2134 setOperationAction(ISD::FDIV, VT, Legal);
2135 setOperationAction(ISD::STRICT_FDIV, VT, Legal);
2136 setOperationAction(ISD::FSQRT, VT, Legal);
2137 setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
2139 setOperationAction(ISD::FFLOOR, VT, Legal);
2140 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
2141 setOperationAction(ISD::FCEIL, VT, Legal);
2142 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
2143 setOperationAction(ISD::FTRUNC, VT, Legal);
2144 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
2145 setOperationAction(ISD::FRINT, VT, Legal);
2146 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
2147 setOperationAction(ISD::FNEARBYINT, VT, Legal);
2148 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
2150 setOperationAction(ISD::FROUND, VT, Custom);
2152 setOperationAction(ISD::LOAD, VT, Legal);
2153 setOperationAction(ISD::STORE, VT, Legal);
2155 setOperationAction(ISD::FMA, VT, Legal);
2156 setOperationAction(ISD::STRICT_FMA, VT, Legal);
2157 setOperationAction(ISD::VSELECT, VT, Legal);
2158 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2159 setOperationAction(ISD::SELECT, VT, Custom);
2161 setOperationAction(ISD::FNEG, VT, Custom);
2162 setOperationAction(ISD::FABS, VT, Custom);
2163 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
2164 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2165 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2167 setOperationAction(ISD::SETCC, VT, Custom);
2168 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
2169 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
2172 // AVX512_FP16 scalar operations
2174 setOperationAction(ISD::FREM, MVT::f16, Promote);
2175 setOperationAction(ISD::STRICT_FREM, MVT::f16, Promote);
2176 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
2177 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
2178 setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
2179 setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal);
2180 setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Legal);
2181 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
2182 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
2183 setOperationAction(ISD::FMAXIMUM, MVT::f16, Custom);
2184 setOperationAction(ISD::FMINIMUM, MVT::f16, Custom);
2185 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
2186 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
2188 setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2189 setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2191 if (Subtarget.useAVX512Regs()) {
2192 setGroup(MVT::v32f16);
2193 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32f16, Custom);
2194 setOperationAction(ISD::SINT_TO_FP, MVT::v32i16, Legal);
2195 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v32i16, Legal);
2196 setOperationAction(ISD::UINT_TO_FP, MVT::v32i16, Legal);
2197 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v32i16, Legal);
2198 setOperationAction(ISD::FP_ROUND, MVT::v16f16, Legal);
2199 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Legal);
2200 setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Legal);
2201 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
2202 setOperationAction(ISD::FP_EXTEND, MVT::v8f64, Legal);
2203 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal);
2204 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32f16, Custom);
2206 setOperationAction(ISD::FP_TO_SINT, MVT::v32i16, Custom);
2207 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v32i16, Custom);
2208 setOperationAction(ISD::FP_TO_UINT, MVT::v32i16, Custom);
2209 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v32i16, Custom);
2210 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i8, MVT::v32i16);
2211 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2213 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v32i8, MVT::v32i16);
2214 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2216 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v32i1, MVT::v32i16);
2217 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2219 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v32i1, MVT::v32i16);
2220 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2223 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f16, Legal);
2224 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32f16, Legal);
2225 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32f16, Custom);
2227 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Legal);
2228 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2231 if (Subtarget.hasVLX()) {
2232 setGroup(MVT::v8f16);
2233 setGroup(MVT::v16f16);
2235 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8f16, Legal);
2236 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16f16, Custom);
2237 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Legal);
2238 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i16, Legal);
2239 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Legal);
2240 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i16, Legal);
2241 setOperationAction(ISD::UINT_TO_FP, MVT::v16i16, Legal);
2242 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i16, Legal);
2243 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Legal);
2244 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i16, Legal);
2246 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
2247 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i16, Custom);
2248 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
2249 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i16, Custom);
2250 setOperationAction(ISD::FP_ROUND, MVT::v8f16, Legal);
2251 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f16, Legal);
2252 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
2253 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
2254 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
2255 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal);
2257 // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2258 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8f16, Custom);
2259 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16f16, Custom);
2261 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f16, Legal);
2262 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16f16, Legal);
2263 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f16, Custom);
2265 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2266 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2267 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2268 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2270 // Need to custom widen these to prevent scalarization.
2271 setOperationAction(ISD::LOAD, MVT::v4f16, Custom);
2272 setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2276 if (!Subtarget.useSoftFloat() &&
2277 (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2278 addRegisterClass(MVT::v8bf16, &X86::VR128XRegClass);
2279 addRegisterClass(MVT::v16bf16, &X86::VR256XRegClass);
2280 // We set the type action of bf16 to TypeSoftPromoteHalf, but we don't
2281 // provide the method to promote BUILD_VECTOR and INSERT_VECTOR_ELT.
2282 // Set the operation action Custom to do the customization later.
2283 setOperationAction(ISD::BUILD_VECTOR, MVT::bf16, Custom);
2284 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::bf16, Custom);
2285 for (auto VT : {MVT::v8bf16, MVT::v16bf16}) {
2286 setF16Action(VT, Expand);
2287 setOperationAction(ISD::FADD, VT, Expand);
2288 setOperationAction(ISD::FSUB, VT, Expand);
2289 setOperationAction(ISD::FMUL, VT, Expand);
2290 setOperationAction(ISD::FDIV, VT, Expand);
2291 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2292 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2294 addLegalFPImmediate(APFloat::getZero(APFloat::BFloat()));
2297 if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2298 addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2299 setF16Action(MVT::v32bf16, Expand);
2300 setOperationAction(ISD::FADD, MVT::v32bf16, Expand);
2301 setOperationAction(ISD::FSUB, MVT::v32bf16, Expand);
2302 setOperationAction(ISD::FMUL, MVT::v32bf16, Expand);
2303 setOperationAction(ISD::FDIV, MVT::v32bf16, Expand);
2304 setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom);
2305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32bf16, Custom);
2308 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2309 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
2310 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2311 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2312 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
2313 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2315 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
2316 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2317 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2318 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
2319 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2321 if (Subtarget.hasBWI()) {
2322 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
2323 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
2326 if (Subtarget.hasFP16()) {
2327 // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2328 setOperationAction(ISD::FP_TO_SINT, MVT::v2f16, Custom);
2329 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2330 setOperationAction(ISD::FP_TO_UINT, MVT::v2f16, Custom);
2331 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2332 setOperationAction(ISD::FP_TO_SINT, MVT::v4f16, Custom);
2333 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2334 setOperationAction(ISD::FP_TO_UINT, MVT::v4f16, Custom);
2335 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2336 // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2337 setOperationAction(ISD::SINT_TO_FP, MVT::v2f16, Custom);
2338 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2339 setOperationAction(ISD::UINT_TO_FP, MVT::v2f16, Custom);
2340 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2341 setOperationAction(ISD::SINT_TO_FP, MVT::v4f16, Custom);
2342 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2343 setOperationAction(ISD::UINT_TO_FP, MVT::v4f16, Custom);
2344 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2345 // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2346 setOperationAction(ISD::FP_ROUND, MVT::v2f16, Custom);
2347 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f16, Custom);
2348 setOperationAction(ISD::FP_ROUND, MVT::v4f16, Custom);
2349 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f16, Custom);
2350 // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2351 setOperationAction(ISD::FP_EXTEND, MVT::v2f16, Custom);
2352 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f16, Custom);
2353 setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Custom);
2354 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f16, Custom);
2358 if (Subtarget.hasAMXTILE()) {
2359 addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2362 // We want to custom lower some of our intrinsics.
2363 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2364 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2365 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2366 if (!Subtarget.is64Bit()) {
2367 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2370 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2371 // handle type legalization for these operations here.
2373 // FIXME: We really should do custom legalization for addition and
2374 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
2375 // than generic legalization for 64-bit multiplication-with-overflow, though.
2376 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2377 if (VT == MVT::i64 && !Subtarget.is64Bit())
2379 // Add/Sub/Mul with overflow operations are custom lowered.
2380 setOperationAction(ISD::SADDO, VT, Custom);
2381 setOperationAction(ISD::UADDO, VT, Custom);
2382 setOperationAction(ISD::SSUBO, VT, Custom);
2383 setOperationAction(ISD::USUBO, VT, Custom);
2384 setOperationAction(ISD::SMULO, VT, Custom);
2385 setOperationAction(ISD::UMULO, VT, Custom);
2387 // Support carry in as value rather than glue.
2388 setOperationAction(ISD::UADDO_CARRY, VT, Custom);
2389 setOperationAction(ISD::USUBO_CARRY, VT, Custom);
2390 setOperationAction(ISD::SETCCCARRY, VT, Custom);
2391 setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2392 setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2395 if (!Subtarget.is64Bit()) {
2396 // These libcalls are not available in 32-bit.
2397 setLibcallName(RTLIB::SHL_I128, nullptr);
2398 setLibcallName(RTLIB::SRL_I128, nullptr);
2399 setLibcallName(RTLIB::SRA_I128, nullptr);
2400 setLibcallName(RTLIB::MUL_I128, nullptr);
2401 // The MULO libcall is not part of libgcc, only compiler-rt.
2402 setLibcallName(RTLIB::MULO_I64, nullptr);
2404 // The MULO libcall is not part of libgcc, only compiler-rt.
2405 setLibcallName(RTLIB::MULO_I128, nullptr);
2407 // Combine sin / cos into _sincos_stret if it is available.
2408 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2409 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2410 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2411 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2414 if (Subtarget.isTargetWin64()) {
2415 setOperationAction(ISD::SDIV, MVT::i128, Custom);
2416 setOperationAction(ISD::UDIV, MVT::i128, Custom);
2417 setOperationAction(ISD::SREM, MVT::i128, Custom);
2418 setOperationAction(ISD::UREM, MVT::i128, Custom);
2419 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2420 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2421 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2422 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2423 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2424 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2425 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2426 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2429 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2430 // is. We should promote the value to 64-bits to solve this.
2431 // This is what the CRT headers do - `fmodf` is an inline header
2432 // function casting to f64 and calling `fmod`.
2433 if (Subtarget.is32Bit() &&
2434 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2435 for (ISD::NodeType Op :
2436 {ISD::FCEIL, ISD::STRICT_FCEIL,
2437 ISD::FCOS, ISD::STRICT_FCOS,
2438 ISD::FEXP, ISD::STRICT_FEXP,
2439 ISD::FFLOOR, ISD::STRICT_FFLOOR,
2440 ISD::FREM, ISD::STRICT_FREM,
2441 ISD::FLOG, ISD::STRICT_FLOG,
2442 ISD::FLOG10, ISD::STRICT_FLOG10,
2443 ISD::FPOW, ISD::STRICT_FPOW,
2444 ISD::FSIN, ISD::STRICT_FSIN})
2445 if (isOperationExpand(Op, MVT::f32))
2446 setOperationAction(Op, MVT::f32, Promote);
2448 // We have target-specific dag combine patterns for the following nodes:
2449 setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2450 ISD::SCALAR_TO_VECTOR,
2451 ISD::INSERT_VECTOR_ELT,
2452 ISD::EXTRACT_VECTOR_ELT,
2453 ISD::CONCAT_VECTORS,
2454 ISD::INSERT_SUBVECTOR,
2455 ISD::EXTRACT_SUBVECTOR,
2481 ISD::SIGN_EXTEND_INREG,
2482 ISD::ANY_EXTEND_VECTOR_INREG,
2483 ISD::SIGN_EXTEND_VECTOR_INREG,
2484 ISD::ZERO_EXTEND_VECTOR_INREG,
2487 ISD::STRICT_SINT_TO_FP,
2488 ISD::STRICT_UINT_TO_FP,
2496 ISD::STRICT_FP_EXTEND,
2498 ISD::STRICT_FP_ROUND});
2500 computeRegisterProperties(Subtarget.getRegisterInfo());
2502 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2503 MaxStoresPerMemsetOptSize = 8;
2504 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2505 MaxStoresPerMemcpyOptSize = 4;
2506 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2507 MaxStoresPerMemmoveOptSize = 4;
2509 // TODO: These control memcmp expansion in CGP and could be raised higher, but
2510 // that needs to benchmarked and balanced with the potential use of vector
2511 // load/store types (PR33329, PR33914).
2512 MaxLoadsPerMemcmp = 2;
2513 MaxLoadsPerMemcmpOptSize = 2;
2515 // Default loop alignment, which can be overridden by -align-loops.
2516 setPrefLoopAlignment(Align(16));
2518 // An out-of-order CPU can speculatively execute past a predictable branch,
2519 // but a conditional move could be stalled by an expensive earlier operation.
2520 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2521 EnableExtLdPromotion = true;
2522 setPrefFunctionAlignment(Align(16));
2524 verifyIntrinsicTables();
2526 // Default to having -disable-strictnode-mutation on
2527 IsStrictFPEnabled = true;
2530 // This has so far only been implemented for 64-bit MachO.
2531 bool X86TargetLowering::useLoadStackGuardNode() const {
2532 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2535 bool X86TargetLowering::useStackGuardXorFP() const {
2536 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2537 return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2540 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2541 const SDLoc &DL) const {
2542 EVT PtrTy = getPointerTy(DAG.getDataLayout());
2543 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2544 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2545 return SDValue(Node, 0);
2548 TargetLoweringBase::LegalizeTypeAction
2549 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2550 if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2551 !Subtarget.hasBWI())
2552 return TypeSplitVector;
2554 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2555 !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2556 return TypeSplitVector;
2558 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2559 VT.getVectorElementType() != MVT::i1)
2560 return TypeWidenVector;
2562 return TargetLoweringBase::getPreferredVectorAction(VT);
2565 static std::pair<MVT, unsigned>
2566 handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC,
2567 const X86Subtarget &Subtarget) {
2568 // v2i1/v4i1/v8i1/v16i1 all pass in xmm registers unless the calling
2569 // convention is one that uses k registers.
2571 return {MVT::v2i64, 1};
2573 return {MVT::v4i32, 1};
2574 if (NumElts == 8 && CC != CallingConv::X86_RegCall &&
2575 CC != CallingConv::Intel_OCL_BI)
2576 return {MVT::v8i16, 1};
2577 if (NumElts == 16 && CC != CallingConv::X86_RegCall &&
2578 CC != CallingConv::Intel_OCL_BI)
2579 return {MVT::v16i8, 1};
2580 // v32i1 passes in ymm unless we have BWI and the calling convention is
2582 if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall))
2583 return {MVT::v32i8, 1};
2584 // Split v64i1 vectors if we don't have v64i8 available.
2585 if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) {
2586 if (Subtarget.useAVX512Regs())
2587 return {MVT::v64i8, 1};
2588 return {MVT::v32i8, 2};
2591 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2592 if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2594 return {MVT::i8, NumElts};
2596 return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
2599 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2602 if (VT.isVector()) {
2603 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2604 unsigned NumElts = VT.getVectorNumElements();
2607 unsigned NumRegisters;
2608 std::tie(RegisterVT, NumRegisters) =
2609 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2610 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2614 if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2618 // We will use more GPRs for f64 and f80 on 32 bits when x87 is disabled.
2619 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
2620 !Subtarget.hasX87())
2623 if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2624 return getRegisterTypeForCallingConv(Context, CC,
2625 VT.changeVectorElementType(MVT::f16));
2627 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2630 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2633 if (VT.isVector()) {
2634 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2635 unsigned NumElts = VT.getVectorNumElements();
2638 unsigned NumRegisters;
2639 std::tie(RegisterVT, NumRegisters) =
2640 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2641 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2642 return NumRegisters;
2645 if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2649 // We have to split f64 to 2 registers and f80 to 3 registers on 32 bits if
2651 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
2658 if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2659 return getNumRegistersForCallingConv(Context, CC,
2660 VT.changeVectorElementType(MVT::f16));
2662 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2665 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2666 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2667 unsigned &NumIntermediates, MVT &RegisterVT) const {
2668 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2669 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2670 Subtarget.hasAVX512() &&
2671 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2672 (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) ||
2673 VT.getVectorNumElements() > 64)) {
2674 RegisterVT = MVT::i8;
2675 IntermediateVT = MVT::i1;
2676 NumIntermediates = VT.getVectorNumElements();
2677 return NumIntermediates;
2680 // Split v64i1 vectors if we don't have v64i8 available.
2681 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2682 CC != CallingConv::X86_RegCall) {
2683 RegisterVT = MVT::v32i8;
2684 IntermediateVT = MVT::v32i1;
2685 NumIntermediates = 2;
2689 // Split vNbf16 vectors according to vNf16.
2690 if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2691 VT = VT.changeVectorElementType(MVT::f16);
2693 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2694 NumIntermediates, RegisterVT);
2697 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2698 LLVMContext& Context,
2703 if (Subtarget.hasAVX512()) {
2704 // Figure out what this type will be legalized to.
2706 while (getTypeAction(Context, LegalVT) != TypeLegal)
2707 LegalVT = getTypeToTransformTo(Context, LegalVT);
2709 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2710 if (LegalVT.getSimpleVT().is512BitVector())
2711 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2713 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2714 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2715 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2717 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2718 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2719 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2723 return VT.changeVectorElementTypeToInteger();
2726 /// Helper for getByValTypeAlignment to determine
2727 /// the desired ByVal argument alignment.
2728 static void getMaxByValAlign(Type *Ty, Align &MaxAlign) {
2731 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2732 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
2733 MaxAlign = Align(16);
2734 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2736 getMaxByValAlign(ATy->getElementType(), EltAlign);
2737 if (EltAlign > MaxAlign)
2738 MaxAlign = EltAlign;
2739 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2740 for (auto *EltTy : STy->elements()) {
2742 getMaxByValAlign(EltTy, EltAlign);
2743 if (EltAlign > MaxAlign)
2744 MaxAlign = EltAlign;
2751 /// Return the desired alignment for ByVal aggregate
2752 /// function arguments in the caller parameter area. For X86, aggregates
2753 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2754 /// are at 4-byte boundaries.
2755 uint64_t X86TargetLowering::getByValTypeAlignment(Type *Ty,
2756 const DataLayout &DL) const {
2757 if (Subtarget.is64Bit()) {
2758 // Max of 8 and alignment of type.
2759 Align TyAlign = DL.getABITypeAlign(Ty);
2761 return TyAlign.value();
2766 if (Subtarget.hasSSE1())
2767 getMaxByValAlign(Ty, Alignment);
2768 return Alignment.value();
2771 /// It returns EVT::Other if the type should be determined using generic
2772 /// target-independent logic.
2773 /// For vector ops we check that the overall size isn't larger than our
2774 /// preferred vector width.
2775 EVT X86TargetLowering::getOptimalMemOpType(
2776 const MemOp &Op, const AttributeList &FuncAttributes) const {
2777 if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
2778 if (Op.size() >= 16 &&
2779 (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
2780 // FIXME: Check if unaligned 64-byte accesses are slow.
2781 if (Op.size() >= 64 && Subtarget.hasAVX512() &&
2782 (Subtarget.getPreferVectorWidth() >= 512)) {
2783 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2785 // FIXME: Check if unaligned 32-byte accesses are slow.
2786 if (Op.size() >= 32 && Subtarget.hasAVX() &&
2787 Subtarget.useLight256BitInstructions()) {
2788 // Although this isn't a well-supported type for AVX1, we'll let
2789 // legalization and shuffle lowering produce the optimal codegen. If we
2790 // choose an optimal type with a vector element larger than a byte,
2791 // getMemsetStores() may create an intermediate splat (using an integer
2792 // multiply) before we splat as a vector.
2795 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2797 // TODO: Can SSE1 handle a byte vector?
2798 // If we have SSE1 registers we should be able to use them.
2799 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2800 (Subtarget.getPreferVectorWidth() >= 128))
2802 } else if (((Op.isMemcpy() && !Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&
2803 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2804 // Do not use f64 to lower memcpy if source is string constant. It's
2805 // better to use i32 to avoid the loads.
2806 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2807 // The gymnastics of splatting a byte value into an XMM register and then
2808 // only using 8-byte stores (because this is a CPU with slow unaligned
2809 // 16-byte accesses) makes that a loser.
2813 // This is a compromise. If we reach here, unaligned accesses may be slow on
2814 // this target. However, creating smaller, aligned accesses could be even
2815 // slower and would certainly be a lot more code.
2816 if (Subtarget.is64Bit() && Op.size() >= 8)
2821 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2823 return Subtarget.hasSSE1();
2825 return Subtarget.hasSSE2();
2829 static bool isBitAligned(Align Alignment, uint64_t SizeInBits) {
2830 return (8 * Alignment.value()) % SizeInBits == 0;
2833 bool X86TargetLowering::isMemoryAccessFast(EVT VT, Align Alignment) const {
2834 if (isBitAligned(Alignment, VT.getSizeInBits()))
2836 switch (VT.getSizeInBits()) {
2838 // 8-byte and under are always assumed to be fast.
2841 return !Subtarget.isUnalignedMem16Slow();
2843 return !Subtarget.isUnalignedMem32Slow();
2844 // TODO: What about AVX-512 (512-bit) accesses?
2848 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2849 EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags,
2850 unsigned *Fast) const {
2852 *Fast = isMemoryAccessFast(VT, Alignment);
2853 // NonTemporal vector memory ops must be aligned.
2854 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2855 // NT loads can only be vector aligned, so if its less aligned than the
2856 // minimum vector size (which we can split the vector down to), we might as
2857 // well use a regular unaligned vector load.
2858 // We don't have any NT loads pre-SSE41.
2859 if (!!(Flags & MachineMemOperand::MOLoad))
2860 return (Alignment < 16 || !Subtarget.hasSSE41());
2863 // Misaligned accesses of any size are always allowed.
2867 bool X86TargetLowering::allowsMemoryAccess(LLVMContext &Context,
2868 const DataLayout &DL, EVT VT,
2869 unsigned AddrSpace, Align Alignment,
2870 MachineMemOperand::Flags Flags,
2871 unsigned *Fast) const {
2873 *Fast = isMemoryAccessFast(VT, Alignment);
2874 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2875 if (allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags,
2878 // NonTemporal vector memory ops are special, and must be aligned.
2879 if (!isBitAligned(Alignment, VT.getSizeInBits()))
2881 switch (VT.getSizeInBits()) {
2883 if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasSSE41())
2885 if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasSSE2())
2889 if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasAVX2())
2891 if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasAVX())
2895 if (Subtarget.hasAVX512())
2899 return false; // Don't have NonTemporal vector memory ops of this size.
2905 /// Return the entry encoding for a jump table in the
2906 /// current function. The returned value is a member of the
2907 /// MachineJumpTableInfo::JTEntryKind enum.
2908 unsigned X86TargetLowering::getJumpTableEncoding() const {
2909 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2911 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2912 return MachineJumpTableInfo::EK_Custom32;
2914 // Otherwise, use the normal jump table encoding heuristics.
2915 return TargetLowering::getJumpTableEncoding();
2918 bool X86TargetLowering::splitValueIntoRegisterParts(
2919 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
2920 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
2921 bool IsABIRegCopy = CC.has_value();
2922 EVT ValueVT = Val.getValueType();
2923 if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
2924 unsigned ValueBits = ValueVT.getSizeInBits();
2925 unsigned PartBits = PartVT.getSizeInBits();
2926 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
2927 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
2928 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
2935 SDValue X86TargetLowering::joinRegisterPartsIntoValue(
2936 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
2937 MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
2938 bool IsABIRegCopy = CC.has_value();
2939 if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
2940 unsigned ValueBits = ValueVT.getSizeInBits();
2941 unsigned PartBits = PartVT.getSizeInBits();
2942 SDValue Val = Parts[0];
2944 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
2945 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
2946 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
2952 bool X86TargetLowering::useSoftFloat() const {
2953 return Subtarget.useSoftFloat();
2956 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2957 ArgListTy &Args) const {
2959 // Only relabel X86-32 for C / Stdcall CCs.
2960 if (Subtarget.is64Bit())
2962 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2964 unsigned ParamRegs = 0;
2965 if (auto *M = MF->getFunction().getParent())
2966 ParamRegs = M->getNumberRegisterParameters();
2968 // Mark the first N int arguments as having reg
2969 for (auto &Arg : Args) {
2971 if (T->isIntOrPtrTy())
2972 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2973 unsigned numRegs = 1;
2974 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2976 if (ParamRegs < numRegs)
2978 ParamRegs -= numRegs;
2985 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2986 const MachineBasicBlock *MBB,
2987 unsigned uid,MCContext &Ctx) const{
2988 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2989 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2991 return MCSymbolRefExpr::create(MBB->getSymbol(),
2992 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2995 /// Returns relocation base for the given PIC jumptable.
2996 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2997 SelectionDAG &DAG) const {
2998 if (!Subtarget.is64Bit())
2999 // This doesn't have SDLoc associated with it, but is not really the
3000 // same as a Register.
3001 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3002 getPointerTy(DAG.getDataLayout()));
3006 /// This returns the relocation base for the given PIC jumptable,
3007 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
3008 const MCExpr *X86TargetLowering::
3009 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
3010 MCContext &Ctx) const {
3011 // X86-64 uses RIP relative addressing based on the jump table label.
3012 if (Subtarget.isPICStyleRIPRel())
3013 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3015 // Otherwise, the reference is relative to the PIC base.
3016 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3019 std::pair<const TargetRegisterClass *, uint8_t>
3020 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3022 const TargetRegisterClass *RRC = nullptr;
3024 switch (VT.SimpleTy) {
3026 return TargetLowering::findRepresentativeClass(TRI, VT);
3027 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
3028 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
3031 RRC = &X86::VR64RegClass;
3033 case MVT::f32: case MVT::f64:
3034 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
3035 case MVT::v4f32: case MVT::v2f64:
3036 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
3037 case MVT::v8f32: case MVT::v4f64:
3038 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
3039 case MVT::v16f32: case MVT::v8f64:
3040 RRC = &X86::VR128XRegClass;
3043 return std::make_pair(RRC, Cost);
3046 unsigned X86TargetLowering::getAddressSpace() const {
3047 if (Subtarget.is64Bit())
3048 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
3052 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
3053 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
3054 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
3057 static Constant* SegmentOffset(IRBuilderBase &IRB,
3058 int Offset, unsigned AddressSpace) {
3059 return ConstantExpr::getIntToPtr(
3060 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
3061 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
3064 Value *X86TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
3065 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
3066 // tcbhead_t; use it instead of the usual global variable (see
3067 // sysdeps/{i386,x86_64}/nptl/tls.h)
3068 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
3069 unsigned AddressSpace = getAddressSpace();
3071 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
3072 if (Subtarget.isTargetFuchsia())
3073 return SegmentOffset(IRB, 0x10, AddressSpace);
3075 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
3076 // Specially, some users may customize the base reg and offset.
3077 int Offset = M->getStackProtectorGuardOffset();
3078 // If we don't set -stack-protector-guard-offset value:
3079 // %fs:0x28, unless we're using a Kernel code model, in which case
3080 // it's %gs:0x28. gs:0x14 on i386.
3081 if (Offset == INT_MAX)
3082 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
3084 StringRef GuardReg = M->getStackProtectorGuardReg();
3085 if (GuardReg == "fs")
3086 AddressSpace = X86AS::FS;
3087 else if (GuardReg == "gs")
3088 AddressSpace = X86AS::GS;
3090 // Use symbol guard if user specify.
3091 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
3092 if (!GuardSymb.empty()) {
3093 GlobalVariable *GV = M->getGlobalVariable(GuardSymb);
3095 Type *Ty = Subtarget.is64Bit() ? Type::getInt64Ty(M->getContext())
3096 : Type::getInt32Ty(M->getContext());
3097 GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
3098 nullptr, GuardSymb, nullptr,
3099 GlobalValue::NotThreadLocal, AddressSpace);
3100 if (!Subtarget.isTargetDarwin())
3101 GV->setDSOLocal(M->getDirectAccessExternalData());
3106 return SegmentOffset(IRB, Offset, AddressSpace);
3108 return TargetLowering::getIRStackGuard(IRB);
3111 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
3112 // MSVC CRT provides functionalities for stack protection.
3113 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3114 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3115 // MSVC CRT has a global variable holding security cookie.
3116 M.getOrInsertGlobal("__security_cookie",
3117 Type::getInt8PtrTy(M.getContext()));
3119 // MSVC CRT has a function to validate security cookie.
3120 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
3121 "__security_check_cookie", Type::getVoidTy(M.getContext()),
3122 Type::getInt8PtrTy(M.getContext()));
3123 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
3124 F->setCallingConv(CallingConv::X86_FastCall);
3125 F->addParamAttr(0, Attribute::AttrKind::InReg);
3130 StringRef GuardMode = M.getStackProtectorGuard();
3132 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
3133 if ((GuardMode == "tls" || GuardMode.empty()) &&
3134 hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
3136 TargetLowering::insertSSPDeclarations(M);
3139 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
3140 // MSVC CRT has a global variable holding security cookie.
3141 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3142 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3143 return M.getGlobalVariable("__security_cookie");
3145 return TargetLowering::getSDagStackGuard(M);
3148 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
3149 // MSVC CRT has a function to validate security cookie.
3150 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3151 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3152 return M.getFunction("__security_check_cookie");
3154 return TargetLowering::getSSPStackGuardCheck(M);
3158 X86TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
3159 if (Subtarget.getTargetTriple().isOSContiki())
3160 return getDefaultSafeStackPointerLocation(IRB, false);
3162 // Android provides a fixed TLS slot for the SafeStack pointer. See the
3163 // definition of TLS_SLOT_SAFESTACK in
3164 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
3165 if (Subtarget.isTargetAndroid()) {
3166 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
3168 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
3169 return SegmentOffset(IRB, Offset, getAddressSpace());
3172 // Fuchsia is similar.
3173 if (Subtarget.isTargetFuchsia()) {
3174 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
3175 return SegmentOffset(IRB, 0x18, getAddressSpace());
3178 return TargetLowering::getSafeStackPointerLocation(IRB);
3181 //===----------------------------------------------------------------------===//
3182 // Return Value Calling Convention Implementation
3183 //===----------------------------------------------------------------------===//
3185 bool X86TargetLowering::CanLowerReturn(
3186 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
3187 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
3188 SmallVector<CCValAssign, 16> RVLocs;
3189 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3190 return CCInfo.CheckReturn(Outs, RetCC_X86);
3193 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
3194 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
3198 ArrayRef<MCPhysReg> X86TargetLowering::getRoundingControlRegisters() const {
3199 // FIXME: We should def X86::FPCW for x87 as well. But it affects a lot of lit
3200 // tests at the moment, which is not what we expected.
3201 static const MCPhysReg RCRegs[] = {X86::MXCSR};
3205 /// Lowers masks values (v*i1) to the local register values
3206 /// \returns DAG node after lowering to register type
3207 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
3208 const SDLoc &DL, SelectionDAG &DAG) {
3209 EVT ValVT = ValArg.getValueType();
3211 if (ValVT == MVT::v1i1)
3212 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ValLoc, ValArg,
3213 DAG.getIntPtrConstant(0, DL));
3215 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
3216 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
3217 // Two stage lowering might be required
3218 // bitcast: v8i1 -> i8 / v16i1 -> i16
3219 // anyextend: i8 -> i32 / i16 -> i32
3220 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
3221 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
3222 if (ValLoc == MVT::i32)
3223 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, ValLoc, ValToCopy);
3227 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
3228 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
3229 // One stage lowering is required
3230 // bitcast: v32i1 -> i32 / v64i1 -> i64
3231 return DAG.getBitcast(ValLoc, ValArg);
3234 return DAG.getNode(ISD::ANY_EXTEND, DL, ValLoc, ValArg);
3237 /// Breaks v64i1 value into two registers and adds the new node to the DAG
3238 static void Passv64i1ArgInRegs(
3239 const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg,
3240 SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
3241 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
3242 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
3243 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3244 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
3245 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3246 "The value should reside in two registers");
3248 // Before splitting the value we cast it to i64
3249 Arg = DAG.getBitcast(MVT::i64, Arg);
3251 // Splitting the value into two i32 types
3253 std::tie(Lo, Hi) = DAG.SplitScalar(Arg, DL, MVT::i32, MVT::i32);
3255 // Attach the two i32 types into corresponding registers
3256 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
3257 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
3261 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3263 const SmallVectorImpl<ISD::OutputArg> &Outs,
3264 const SmallVectorImpl<SDValue> &OutVals,
3265 const SDLoc &dl, SelectionDAG &DAG) const {
3266 MachineFunction &MF = DAG.getMachineFunction();
3267 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3269 // In some cases we need to disable registers from the default CSR list.
3270 // For example, when they are used as return registers (preserve_* and X86's
3271 // regcall) or for argument passing (X86's regcall).
3272 bool ShouldDisableCalleeSavedRegister =
3273 shouldDisableRetRegFromCSR(CallConv) ||
3274 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
3276 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
3277 report_fatal_error("X86 interrupts may not return any value");
3279 SmallVector<CCValAssign, 16> RVLocs;
3280 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
3281 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
3283 SmallVector<std::pair<Register, SDValue>, 4> RetVals;
3284 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
3286 CCValAssign &VA = RVLocs[I];
3287 assert(VA.isRegLoc() && "Can only return in registers!");
3289 // Add the register to the CalleeSaveDisableRegs list.
3290 if (ShouldDisableCalleeSavedRegister)
3291 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
3293 SDValue ValToCopy = OutVals[OutsIndex];
3294 EVT ValVT = ValToCopy.getValueType();
3296 // Promote values to the appropriate types.
3297 if (VA.getLocInfo() == CCValAssign::SExt)
3298 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
3299 else if (VA.getLocInfo() == CCValAssign::ZExt)
3300 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
3301 else if (VA.getLocInfo() == CCValAssign::AExt) {
3302 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
3303 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
3305 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
3307 else if (VA.getLocInfo() == CCValAssign::BCvt)
3308 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
3310 assert(VA.getLocInfo() != CCValAssign::FPExt &&
3311 "Unexpected FP-extend for return value.");
3313 // Report an error if we have attempted to return a value via an XMM
3314 // register and SSE was disabled.
3315 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3316 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3317 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3318 } else if (!Subtarget.hasSSE2() &&
3319 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3320 ValVT == MVT::f64) {
3321 // When returning a double via an XMM register, report an error if SSE2 is
3323 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3324 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3327 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
3328 // the RET instruction and handled by the FP Stackifier.
3329 if (VA.getLocReg() == X86::FP0 ||
3330 VA.getLocReg() == X86::FP1) {
3331 // If this is a copy from an xmm register to ST(0), use an FPExtend to
3332 // change the value to the FP stack register class.
3333 if (isScalarFPTypeInSSEReg(VA.getValVT()))
3334 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
3335 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3336 // Don't emit a copytoreg.
3340 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
3341 // which is returned in RAX / RDX.
3342 if (Subtarget.is64Bit()) {
3343 if (ValVT == MVT::x86mmx) {
3344 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
3345 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
3346 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
3348 // If we don't have SSE2 available, convert to v4f32 so the generated
3349 // register is legal.
3350 if (!Subtarget.hasSSE2())
3351 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
3356 if (VA.needsCustom()) {
3357 assert(VA.getValVT() == MVT::v64i1 &&
3358 "Currently the only custom case is when we split v64i1 to 2 regs");
3360 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
3363 // Add the second register to the CalleeSaveDisableRegs list.
3364 if (ShouldDisableCalleeSavedRegister)
3365 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
3367 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3372 SmallVector<SDValue, 6> RetOps;
3373 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
3374 // Operand #1 = Bytes To Pop
3375 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
3378 // Copy the result values into the output registers.
3379 for (auto &RetVal : RetVals) {
3380 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
3381 RetOps.push_back(RetVal.second);
3382 continue; // Don't emit a copytoreg.
3385 Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);
3386 Glue = Chain.getValue(1);
3388 DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
3391 // Swift calling convention does not require we copy the sret argument
3392 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
3394 // All x86 ABIs require that for returning structs by value we copy
3395 // the sret argument into %rax/%eax (depending on ABI) for the return.
3396 // We saved the argument into a virtual register in the entry block,
3397 // so now we copy the value out and into %rax/%eax.
3399 // Checking Function.hasStructRetAttr() here is insufficient because the IR
3400 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
3401 // false, then an sret argument may be implicitly inserted in the SelDAG. In
3402 // either case FuncInfo->setSRetReturnReg() will have been called.
3403 if (Register SRetReg = FuncInfo->getSRetReturnReg()) {
3404 // When we have both sret and another return value, we should use the
3405 // original Chain stored in RetOps[0], instead of the current Chain updated
3406 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
3408 // For the case of sret and another return value, we have
3409 // Chain_0 at the function entry
3410 // Chain_1 = getCopyToReg(Chain_0) in the above loop
3411 // If we use Chain_1 in getCopyFromReg, we will have
3412 // Val = getCopyFromReg(Chain_1)
3413 // Chain_2 = getCopyToReg(Chain_1, Val) from below
3415 // getCopyToReg(Chain_0) will be glued together with
3416 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
3417 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
3418 // Data dependency from Unit B to Unit A due to usage of Val in
3419 // getCopyToReg(Chain_1, Val)
3420 // Chain dependency from Unit A to Unit B
3422 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
3423 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
3424 getPointerTy(MF.getDataLayout()));
3427 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
3428 X86::RAX : X86::EAX;
3429 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Glue);
3430 Glue = Chain.getValue(1);
3432 // RAX/EAX now acts like a return value.
3434 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
3436 // Add the returned register to the CalleeSaveDisableRegs list. Don't do
3437 // this however for preserve_most/preserve_all to minimize the number of
3438 // callee-saved registers for these CCs.
3439 if (ShouldDisableCalleeSavedRegister &&
3440 CallConv != CallingConv::PreserveAll &&
3441 CallConv != CallingConv::PreserveMost)
3442 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
3445 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
3446 const MCPhysReg *I =
3447 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3450 if (X86::GR64RegClass.contains(*I))
3451 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
3453 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3457 RetOps[0] = Chain; // Update chain.
3459 // Add the glue if we have it.
3461 RetOps.push_back(Glue);
3463 X86ISD::NodeType opcode = X86ISD::RET_GLUE;
3464 if (CallConv == CallingConv::X86_INTR)
3465 opcode = X86ISD::IRET;
3466 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
3469 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3470 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
3473 SDValue TCChain = Chain;
3474 SDNode *Copy = *N->use_begin();
3475 if (Copy->getOpcode() == ISD::CopyToReg) {
3476 // If the copy has a glue operand, we conservatively assume it isn't safe to
3477 // perform a tail call.
3478 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3480 TCChain = Copy->getOperand(0);
3481 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
3484 bool HasRet = false;
3485 for (const SDNode *U : Copy->uses()) {
3486 if (U->getOpcode() != X86ISD::RET_GLUE)
3488 // If we are returning more than one value, we can definitely
3489 // not make a tail call see PR19530
3490 if (U->getNumOperands() > 4)
3492 if (U->getNumOperands() == 4 &&
3493 U->getOperand(U->getNumOperands() - 1).getValueType() != MVT::Glue)
3505 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
3506 ISD::NodeType ExtendKind) const {
3507 MVT ReturnMVT = MVT::i32;
3509 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
3510 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
3511 // The ABI does not require i1, i8 or i16 to be extended.
3513 // On Darwin, there is code in the wild relying on Clang's old behaviour of
3514 // always extending i8/i16 return values, so keep doing that for now.
3516 ReturnMVT = MVT::i8;
3519 EVT MinVT = getRegisterType(Context, ReturnMVT);
3520 return VT.bitsLT(MinVT) ? MinVT : VT;
3523 /// Reads two 32 bit registers and creates a 64 bit mask value.
3524 /// \param VA The current 32 bit value that need to be assigned.
3525 /// \param NextVA The next 32 bit value that need to be assigned.
3526 /// \param Root The parent DAG node.
3527 /// \param [in,out] InGlue Represents SDvalue in the parent DAG node for
3528 /// glue purposes. In the case the DAG is already using
3529 /// physical register instead of virtual, we should glue
3530 /// our new SDValue to InGlue SDvalue.
3531 /// \return a new SDvalue of size 64bit.
3532 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
3533 SDValue &Root, SelectionDAG &DAG,
3534 const SDLoc &DL, const X86Subtarget &Subtarget,
3535 SDValue *InGlue = nullptr) {
3536 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
3537 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3538 assert(VA.getValVT() == MVT::v64i1 &&
3539 "Expecting first location of 64 bit width type");
3540 assert(NextVA.getValVT() == VA.getValVT() &&
3541 "The locations should have the same type");
3542 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3543 "The values should reside in two registers");
3546 SDValue ArgValueLo, ArgValueHi;
3548 MachineFunction &MF = DAG.getMachineFunction();
3549 const TargetRegisterClass *RC = &X86::GR32RegClass;
3551 // Read a 32 bit value from the registers.
3552 if (nullptr == InGlue) {
3553 // When no physical register is present,
3554 // create an intermediate virtual register.
3555 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
3556 ArgValueLo = DAG.getCopyFromReg(Root, DL, Reg, MVT::i32);
3557 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3558 ArgValueHi = DAG.getCopyFromReg(Root, DL, Reg, MVT::i32);
3560 // When a physical register is available read the value from it and glue
3561 // the reads together.
3563 DAG.getCopyFromReg(Root, DL, VA.getLocReg(), MVT::i32, *InGlue);
3564 *InGlue = ArgValueLo.getValue(2);
3566 DAG.getCopyFromReg(Root, DL, NextVA.getLocReg(), MVT::i32, *InGlue);
3567 *InGlue = ArgValueHi.getValue(2);
3570 // Convert the i32 type into v32i1 type.
3571 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
3573 // Convert the i32 type into v32i1 type.
3574 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
3576 // Concatenate the two values together.
3577 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v64i1, Lo, Hi);
3580 /// The function will lower a register of various sizes (8/16/32/64)
3581 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
3582 /// \returns a DAG node contains the operand after lowering to mask type.
3583 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
3584 const EVT &ValLoc, const SDLoc &DL,
3585 SelectionDAG &DAG) {
3586 SDValue ValReturned = ValArg;
3588 if (ValVT == MVT::v1i1)
3589 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, ValReturned);
3591 if (ValVT == MVT::v64i1) {
3592 // In 32 bit machine, this case is handled by getv64i1Argument
3593 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
3594 // In 64 bit machine, There is no need to truncate the value only bitcast
3597 switch (ValVT.getSimpleVT().SimpleTy) {
3599 MaskLenVT = MVT::i8;
3602 MaskLenVT = MVT::i16;
3605 MaskLenVT = MVT::i32;
3608 llvm_unreachable("Expecting a vector of i1 types");
3611 ValReturned = DAG.getNode(ISD::TRUNCATE, DL, MaskLenVT, ValReturned);
3613 return DAG.getBitcast(ValVT, ValReturned);
3616 /// Lower the result values of a call into the
3617 /// appropriate copies out of appropriate physical registers.
3619 SDValue X86TargetLowering::LowerCallResult(
3620 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
3621 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3622 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3623 uint32_t *RegMask) const {
3625 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3626 // Assign locations to each value returned by this call.
3627 SmallVector<CCValAssign, 16> RVLocs;
3628 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3630 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3632 // Copy all of the result registers out of their specified physreg.
3633 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3635 CCValAssign &VA = RVLocs[I];
3636 EVT CopyVT = VA.getLocVT();
3638 // In some calling conventions we need to remove the used registers
3639 // from the register mask.
3641 for (MCPhysReg SubReg : TRI->subregs_inclusive(VA.getLocReg()))
3642 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
3645 // Report an error if there was an attempt to return FP values via XMM
3647 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3648 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3649 if (VA.getLocReg() == X86::XMM1)
3650 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3652 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3653 } else if (!Subtarget.hasSSE2() &&
3654 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3655 CopyVT == MVT::f64) {
3656 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3657 if (VA.getLocReg() == X86::XMM1)
3658 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3660 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3663 // If we prefer to use the value in xmm registers, copy it out as f80 and
3664 // use a truncate to move it from fp stack reg to xmm reg.
3665 bool RoundAfterCopy = false;
3666 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3667 isScalarFPTypeInSSEReg(VA.getValVT())) {
3668 if (!Subtarget.hasX87())
3669 report_fatal_error("X87 register return with X87 disabled");
3671 RoundAfterCopy = (CopyVT != VA.getLocVT());
3675 if (VA.needsCustom()) {
3676 assert(VA.getValVT() == MVT::v64i1 &&
3677 "Currently the only custom case is when we split v64i1 to 2 regs");
3679 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InGlue);
3681 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InGlue)
3683 Val = Chain.getValue(0);
3684 InGlue = Chain.getValue(2);
3688 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3689 // This truncation won't change the value.
3690 DAG.getIntPtrConstant(1, dl, /*isTarget=*/true));
3692 if (VA.isExtInLoc()) {
3693 if (VA.getValVT().isVector() &&
3694 VA.getValVT().getScalarType() == MVT::i1 &&
3695 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3696 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3697 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3698 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3700 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3703 if (VA.getLocInfo() == CCValAssign::BCvt)
3704 Val = DAG.getBitcast(VA.getValVT(), Val);
3706 InVals.push_back(Val);
3712 //===----------------------------------------------------------------------===//
3713 // C & StdCall & Fast Calling Convention implementation
3714 //===----------------------------------------------------------------------===//
3715 // StdCall calling convention seems to be standard for many Windows' API
3716 // routines and around. It differs from C calling convention just a little:
3717 // callee should clean up the stack, not caller. Symbols should be also
3718 // decorated in some fancy way :) It doesn't support any vector arguments.
3719 // For info on fast calling convention see Fast Calling Convention (tail call)
3720 // implementation LowerX86_32FastCCCallTo.
3722 /// Determines whether Args, either a set of outgoing arguments to a call, or a
3723 /// set of incoming args of a call, contains an sret pointer that the callee
3725 template <typename T>
3726 static bool hasCalleePopSRet(const SmallVectorImpl<T> &Args,
3727 const X86Subtarget &Subtarget) {
3728 // Not C++20 (yet), so no concepts available.
3729 static_assert(std::is_same_v<T, ISD::OutputArg> ||
3730 std::is_same_v<T, ISD::InputArg>,
3731 "requires ISD::OutputArg or ISD::InputArg");
3733 // Only 32-bit pops the sret. It's a 64-bit world these days, so early-out
3734 // for most compilations.
3735 if (!Subtarget.is32Bit())
3741 // Most calls do not have an sret argument, check the arg next.
3742 const ISD::ArgFlagsTy &Flags = Args[0].Flags;
3743 if (!Flags.isSRet() || Flags.isInReg())
3746 // The MSVCabi does not pop the sret.
3747 if (Subtarget.getTargetTriple().isOSMSVCRT())
3750 // MCUs don't pop the sret
3751 if (Subtarget.isTargetMCU())
3754 // Callee pops argument
3758 /// Make a copy of an aggregate at address specified by "Src" to address
3759 /// "Dst" with size and alignment information specified by the specific
3760 /// parameter attribute. The copy will be passed as a byval function parameter.
3761 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3762 SDValue Chain, ISD::ArgFlagsTy Flags,
3763 SelectionDAG &DAG, const SDLoc &dl) {
3764 SDValue SizeNode = DAG.getIntPtrConstant(Flags.getByValSize(), dl);
3766 return DAG.getMemcpy(
3767 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
3768 /*isVolatile*/ false, /*AlwaysInline=*/true,
3769 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
3772 /// Return true if the calling convention is one that we can guarantee TCO for.
3773 static bool canGuaranteeTCO(CallingConv::ID CC) {
3774 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3775 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3776 CC == CallingConv::Tail || CC == CallingConv::SwiftTail);
3779 /// Return true if we might ever do TCO for calls with this calling convention.
3780 static bool mayTailCallThisCC(CallingConv::ID CC) {
3782 // C calling conventions:
3783 case CallingConv::C:
3784 case CallingConv::Win64:
3785 case CallingConv::X86_64_SysV:
3786 // Callee pop conventions:
3787 case CallingConv::X86_ThisCall:
3788 case CallingConv::X86_StdCall:
3789 case CallingConv::X86_VectorCall:
3790 case CallingConv::X86_FastCall:
3792 case CallingConv::Swift:
3795 return canGuaranteeTCO(CC);
3799 /// Return true if the function is being made into a tailcall target by
3800 /// changing its ABI.
3801 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3802 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) ||
3803 CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
3806 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3807 if (!CI->isTailCall())
3810 CallingConv::ID CalleeCC = CI->getCallingConv();
3811 if (!mayTailCallThisCC(CalleeCC))
3818 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3819 const SmallVectorImpl<ISD::InputArg> &Ins,
3820 const SDLoc &dl, SelectionDAG &DAG,
3821 const CCValAssign &VA,
3822 MachineFrameInfo &MFI, unsigned i) const {
3823 // Create the nodes corresponding to a load from this parameter slot.
3824 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3825 bool AlwaysUseMutable = shouldGuaranteeTCO(
3826 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3827 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3829 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3831 // If value is passed by pointer we have address passed instead of the value
3832 // itself. No need to extend if the mask value and location share the same
3834 bool ExtendedInMem =
3835 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3836 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3838 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3839 ValVT = VA.getLocVT();
3841 ValVT = VA.getValVT();
3843 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3844 // changed with more analysis.
3845 // In case of tail call optimization mark all arguments mutable. Since they
3846 // could be overwritten by lowering of arguments in case of a tail call.
3847 if (Flags.isByVal()) {
3848 unsigned Bytes = Flags.getByValSize();
3849 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3851 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3852 // can be improved with deeper analysis.
3853 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3854 /*isAliased=*/true);
3855 return DAG.getFrameIndex(FI, PtrVT);
3858 EVT ArgVT = Ins[i].ArgVT;
3860 // If this is a vector that has been split into multiple parts, don't elide
3861 // the copy. The layout on the stack may not match the packed in-memory
3863 bool ScalarizedVector = ArgVT.isVector() && !VA.getLocVT().isVector();
3865 // This is an argument in memory. We might be able to perform copy elision.
3866 // If the argument is passed directly in memory without any extension, then we
3867 // can perform copy elision. Large vector types, for example, may be passed
3868 // indirectly by pointer.
3869 if (Flags.isCopyElisionCandidate() &&
3870 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
3871 !ScalarizedVector) {
3873 if (Ins[i].PartOffset == 0) {
3874 // If this is a one-part value or the first part of a multi-part value,
3875 // create a stack object for the entire argument value type and return a
3876 // load from our portion of it. This assumes that if the first part of an
3877 // argument is in memory, the rest will also be in memory.
3878 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3879 /*IsImmutable=*/false);
3880 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3882 ValVT, dl, Chain, PartAddr,
3883 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3886 // This is not the first piece of an argument in memory. See if there is
3887 // already a fixed stack object including this offset. If so, assume it
3888 // was created by the PartOffset == 0 branch above and create a load from
3889 // the appropriate offset into it.
3890 int64_t PartBegin = VA.getLocMemOffset();
3891 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3892 int FI = MFI.getObjectIndexBegin();
3893 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3894 int64_t ObjBegin = MFI.getObjectOffset(FI);
3895 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3896 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3899 if (MFI.isFixedObjectIndex(FI)) {
3901 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3902 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3903 return DAG.getLoad(ValVT, dl, Chain, Addr,
3904 MachinePointerInfo::getFixedStack(
3905 DAG.getMachineFunction(), FI, Ins[i].PartOffset));
3909 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3910 VA.getLocMemOffset(), isImmutable);
3912 // Set SExt or ZExt flag.
3913 if (VA.getLocInfo() == CCValAssign::ZExt) {
3914 MFI.setObjectZExt(FI, true);
3915 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3916 MFI.setObjectSExt(FI, true);
3919 MaybeAlign Alignment;
3920 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
3922 Alignment = MaybeAlign(4);
3923 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3924 SDValue Val = DAG.getLoad(
3925 ValVT, dl, Chain, FIN,
3926 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
3928 return ExtendedInMem
3929 ? (VA.getValVT().isVector()
3930 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3931 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3935 // FIXME: Get this from tablegen.
3936 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3937 const X86Subtarget &Subtarget) {
3938 assert(Subtarget.is64Bit());
3940 if (Subtarget.isCallingConvWin64(CallConv)) {
3941 static const MCPhysReg GPR64ArgRegsWin64[] = {
3942 X86::RCX, X86::RDX, X86::R8, X86::R9
3944 return ArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3947 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3948 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3950 return ArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3953 // FIXME: Get this from tablegen.
3954 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3955 CallingConv::ID CallConv,
3956 const X86Subtarget &Subtarget) {
3957 assert(Subtarget.is64Bit());
3958 if (Subtarget.isCallingConvWin64(CallConv)) {
3959 // The XMM registers which might contain var arg parameters are shadowed
3960 // in their paired GPR. So we only need to save the GPR to their home
3962 // TODO: __vectorcall will change this.
3963 return std::nullopt;
3966 bool isSoftFloat = Subtarget.useSoftFloat();
3967 if (isSoftFloat || !Subtarget.hasSSE1())
3968 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3970 return std::nullopt;
3972 static const MCPhysReg XMMArgRegs64Bit[] = {
3973 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3974 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3976 return ArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3980 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3981 return llvm::is_sorted(
3982 ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
3983 return A.getValNo() < B.getValNo();
3989 /// This is a helper class for lowering variable arguments parameters.
3990 class VarArgsLoweringHelper {
3992 VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,
3993 SelectionDAG &DAG, const X86Subtarget &Subtarget,
3994 CallingConv::ID CallConv, CCState &CCInfo)
3995 : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),
3996 TheMachineFunction(DAG.getMachineFunction()),
3997 TheFunction(TheMachineFunction.getFunction()),
3998 FrameInfo(TheMachineFunction.getFrameInfo()),
3999 FrameLowering(*Subtarget.getFrameLowering()),
4000 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
4003 // Lower variable arguments parameters.
4004 void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);
4007 void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);
4009 void forwardMustTailParameters(SDValue &Chain);
4011 bool is64Bit() const { return Subtarget.is64Bit(); }
4012 bool isWin64() const { return Subtarget.isCallingConvWin64(CallConv); }
4014 X86MachineFunctionInfo *FuncInfo;
4017 const X86Subtarget &Subtarget;
4018 MachineFunction &TheMachineFunction;
4019 const Function &TheFunction;
4020 MachineFrameInfo &FrameInfo;
4021 const TargetFrameLowering &FrameLowering;
4022 const TargetLowering &TargLowering;
4023 CallingConv::ID CallConv;
4028 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
4029 SDValue &Chain, unsigned StackSize) {
4030 // If the function takes variable number of arguments, make a frame index for
4031 // the start of the first vararg value... for expansion of llvm.va_start. We
4032 // can skip this if there are no va_start calls.
4033 if (is64Bit() || (CallConv != CallingConv::X86_FastCall &&
4034 CallConv != CallingConv::X86_ThisCall)) {
4035 FuncInfo->setVarArgsFrameIndex(
4036 FrameInfo.CreateFixedObject(1, StackSize, true));
4039 // 64-bit calling conventions support varargs and register parameters, so we
4040 // have to do extra work to spill them in the prologue.
4042 // Find the first unallocated argument registers.
4043 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
4044 ArrayRef<MCPhysReg> ArgXMMs =
4045 get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget);
4046 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
4047 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
4049 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
4050 "SSE register cannot be used when SSE is disabled!");
4053 // Get to the caller-allocated home save location. Add 8 to account
4054 // for the return address.
4055 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
4056 FuncInfo->setRegSaveFrameIndex(
4057 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
4058 // Fixup to set vararg frame on shadow area (4 x i64).
4060 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
4062 // For X86-64, if there are vararg parameters that are passed via
4063 // registers, then we must store them to their spots on the stack so
4064 // they may be loaded by dereferencing the result of va_next.
4065 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
4066 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
4067 FuncInfo->setRegSaveFrameIndex(FrameInfo.CreateStackObject(
4068 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));
4071 SmallVector<SDValue, 6>
4072 LiveGPRs; // list of SDValue for GPR registers keeping live input value
4073 SmallVector<SDValue, 8> LiveXMMRegs; // list of SDValue for XMM registers
4074 // keeping live input value
4075 SDValue ALVal; // if applicable keeps SDValue for %al register
4077 // Gather all the live in physical registers.
4078 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
4079 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
4080 LiveGPRs.push_back(DAG.getCopyFromReg(Chain, DL, GPR, MVT::i64));
4082 const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);
4083 if (!AvailableXmms.empty()) {
4084 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
4085 ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
4086 for (MCPhysReg Reg : AvailableXmms) {
4087 // FastRegisterAllocator spills virtual registers at basic
4088 // block boundary. That leads to usages of xmm registers
4089 // outside of check for %al. Pass physical registers to
4090 // VASTART_SAVE_XMM_REGS to avoid unneccessary spilling.
4091 TheMachineFunction.getRegInfo().addLiveIn(Reg);
4092 LiveXMMRegs.push_back(DAG.getRegister(Reg, MVT::v4f32));
4096 // Store the integer parameter registers.
4097 SmallVector<SDValue, 8> MemOps;
4099 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
4100 TargLowering.getPointerTy(DAG.getDataLayout()));
4101 unsigned Offset = FuncInfo->getVarArgsGPOffset();
4102 for (SDValue Val : LiveGPRs) {
4103 SDValue FIN = DAG.getNode(ISD::ADD, DL,
4104 TargLowering.getPointerTy(DAG.getDataLayout()),
4105 RSFIN, DAG.getIntPtrConstant(Offset, DL));
4107 DAG.getStore(Val.getValue(1), DL, Val, FIN,
4108 MachinePointerInfo::getFixedStack(
4109 DAG.getMachineFunction(),
4110 FuncInfo->getRegSaveFrameIndex(), Offset));
4111 MemOps.push_back(Store);
4115 // Now store the XMM (fp + vector) parameter registers.
4116 if (!LiveXMMRegs.empty()) {
4117 SmallVector<SDValue, 12> SaveXMMOps;
4118 SaveXMMOps.push_back(Chain);
4119 SaveXMMOps.push_back(ALVal);
4120 SaveXMMOps.push_back(RSFIN);
4121 SaveXMMOps.push_back(
4122 DAG.getTargetConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32));
4123 llvm::append_range(SaveXMMOps, LiveXMMRegs);
4124 MachineMemOperand *StoreMMO =
4125 DAG.getMachineFunction().getMachineMemOperand(
4126 MachinePointerInfo::getFixedStack(
4127 DAG.getMachineFunction(), FuncInfo->getRegSaveFrameIndex(),
4129 MachineMemOperand::MOStore, 128, Align(16));
4130 MemOps.push_back(DAG.getMemIntrinsicNode(X86ISD::VASTART_SAVE_XMM_REGS,
4131 DL, DAG.getVTList(MVT::Other),
4132 SaveXMMOps, MVT::i8, StoreMMO));
4135 if (!MemOps.empty())
4136 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
4140 void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {
4141 // Find the largest legal vector type.
4142 MVT VecVT = MVT::Other;
4143 // FIXME: Only some x86_32 calling conventions support AVX512.
4144 if (Subtarget.useAVX512Regs() &&
4145 (is64Bit() || (CallConv == CallingConv::X86_VectorCall ||
4146 CallConv == CallingConv::Intel_OCL_BI)))
4147 VecVT = MVT::v16f32;
4148 else if (Subtarget.hasAVX())
4150 else if (Subtarget.hasSSE2())
4153 // We forward some GPRs and some vector types.
4154 SmallVector<MVT, 2> RegParmTypes;
4155 MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;
4156 RegParmTypes.push_back(IntVT);
4157 if (VecVT != MVT::Other)
4158 RegParmTypes.push_back(VecVT);
4160 // Compute the set of forwarded registers. The rest are scratch.
4161 SmallVectorImpl<ForwardedRegister> &Forwards =
4162 FuncInfo->getForwardedMustTailRegParms();
4163 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
4165 // Forward AL for SysV x86_64 targets, since it is used for varargs.
4166 if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
4167 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
4168 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
4171 // Copy all forwards from physical to virtual registers.
4172 for (ForwardedRegister &FR : Forwards) {
4173 // FIXME: Can we use a less constrained schedule?
4174 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, FR.VReg, FR.VT);
4175 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
4176 TargLowering.getRegClassFor(FR.VT));
4177 Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);
4181 void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,
4182 unsigned StackSize) {
4183 // Set FrameIndex to the 0xAAAAAAA value to mark unset state.
4184 // If necessary, it would be set into the correct value later.
4185 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
4186 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4188 if (FrameInfo.hasVAStart())
4189 createVarArgAreaAndStoreRegisters(Chain, StackSize);
4191 if (FrameInfo.hasMustTailInVarArgFunc())
4192 forwardMustTailParameters(Chain);
4195 SDValue X86TargetLowering::LowerFormalArguments(
4196 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
4197 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4198 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4199 MachineFunction &MF = DAG.getMachineFunction();
4200 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4202 const Function &F = MF.getFunction();
4203 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
4204 F.getName() == "main")
4205 FuncInfo->setForceFramePointer(true);
4207 MachineFrameInfo &MFI = MF.getFrameInfo();
4208 bool Is64Bit = Subtarget.is64Bit();
4209 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
4212 !(IsVarArg && canGuaranteeTCO(CallConv)) &&
4213 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
4215 // Assign locations to all of the incoming arguments.
4216 SmallVector<CCValAssign, 16> ArgLocs;
4217 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
4219 // Allocate shadow area for Win64.
4221 CCInfo.AllocateStack(32, Align(8));
4223 CCInfo.AnalyzeArguments(Ins, CC_X86);
4225 // In vectorcall calling convention a second pass is required for the HVA
4227 if (CallingConv::X86_VectorCall == CallConv) {
4228 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
4231 // The next loop assumes that the locations are in the same order of the
4233 assert(isSortedByValueNo(ArgLocs) &&
4234 "Argument Location list must be sorted before lowering");
4237 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
4239 assert(InsIndex < Ins.size() && "Invalid Ins index");
4240 CCValAssign &VA = ArgLocs[I];
4242 if (VA.isRegLoc()) {
4243 EVT RegVT = VA.getLocVT();
4244 if (VA.needsCustom()) {
4246 VA.getValVT() == MVT::v64i1 &&
4247 "Currently the only custom case is when we split v64i1 to 2 regs");
4249 // v64i1 values, in regcall calling convention, that are
4250 // compiled to 32 bit arch, are split up into two registers.
4252 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
4254 const TargetRegisterClass *RC;
4255 if (RegVT == MVT::i8)
4256 RC = &X86::GR8RegClass;
4257 else if (RegVT == MVT::i16)
4258 RC = &X86::GR16RegClass;
4259 else if (RegVT == MVT::i32)
4260 RC = &X86::GR32RegClass;
4261 else if (Is64Bit && RegVT == MVT::i64)
4262 RC = &X86::GR64RegClass;
4263 else if (RegVT == MVT::f16)
4264 RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
4265 else if (RegVT == MVT::f32)
4266 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
4267 else if (RegVT == MVT::f64)
4268 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
4269 else if (RegVT == MVT::f80)
4270 RC = &X86::RFP80RegClass;
4271 else if (RegVT == MVT::f128)
4272 RC = &X86::VR128RegClass;
4273 else if (RegVT.is512BitVector())
4274 RC = &X86::VR512RegClass;
4275 else if (RegVT.is256BitVector())
4276 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
4277 else if (RegVT.is128BitVector())
4278 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
4279 else if (RegVT == MVT::x86mmx)
4280 RC = &X86::VR64RegClass;
4281 else if (RegVT == MVT::v1i1)
4282 RC = &X86::VK1RegClass;
4283 else if (RegVT == MVT::v8i1)
4284 RC = &X86::VK8RegClass;
4285 else if (RegVT == MVT::v16i1)
4286 RC = &X86::VK16RegClass;
4287 else if (RegVT == MVT::v32i1)
4288 RC = &X86::VK32RegClass;
4289 else if (RegVT == MVT::v64i1)
4290 RC = &X86::VK64RegClass;
4292 llvm_unreachable("Unknown argument type!");
4294 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4295 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4298 // If this is an 8 or 16-bit value, it is really passed promoted to 32
4299 // bits. Insert an assert[sz]ext to capture this, then truncate to the
4301 if (VA.getLocInfo() == CCValAssign::SExt)
4302 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4303 DAG.getValueType(VA.getValVT()));
4304 else if (VA.getLocInfo() == CCValAssign::ZExt)
4305 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4306 DAG.getValueType(VA.getValVT()));
4307 else if (VA.getLocInfo() == CCValAssign::BCvt)
4308 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
4310 if (VA.isExtInLoc()) {
4311 // Handle MMX values passed in XMM regs.
4312 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
4313 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
4314 else if (VA.getValVT().isVector() &&
4315 VA.getValVT().getScalarType() == MVT::i1 &&
4316 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
4317 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
4318 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
4319 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
4321 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4324 assert(VA.isMemLoc());
4326 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
4329 // If value is passed via pointer - do a load.
4330 if (VA.getLocInfo() == CCValAssign::Indirect &&
4331 !(Ins[I].Flags.isByVal() && VA.isRegLoc())) {
4333 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
4336 InVals.push_back(ArgValue);
4339 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
4340 if (Ins[I].Flags.isSwiftAsync()) {
4341 auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
4342 if (Subtarget.is64Bit())
4343 X86FI->setHasSwiftAsyncContext(true);
4345 int FI = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
4346 X86FI->setSwiftAsyncContextFrameIdx(FI);
4347 SDValue St = DAG.getStore(DAG.getEntryNode(), dl, InVals[I],
4348 DAG.getFrameIndex(FI, MVT::i32),
4349 MachinePointerInfo::getFixedStack(MF, FI));
4350 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, St, Chain);
4354 // Swift calling convention does not require we copy the sret argument
4355 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
4356 if (CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail)
4359 // All x86 ABIs require that for returning structs by value we copy the
4360 // sret argument into %rax/%eax (depending on ABI) for the return. Save
4361 // the argument into a virtual register so that we can access it from the
4363 if (Ins[I].Flags.isSRet()) {
4364 assert(!FuncInfo->getSRetReturnReg() &&
4365 "SRet return has already been set");
4366 MVT PtrTy = getPointerTy(DAG.getDataLayout());
4368 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
4369 FuncInfo->setSRetReturnReg(Reg);
4370 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
4371 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
4376 unsigned StackSize = CCInfo.getStackSize();
4377 // Align stack specially for tail calls.
4378 if (shouldGuaranteeTCO(CallConv,
4379 MF.getTarget().Options.GuaranteedTailCallOpt))
4380 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
4383 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
4384 .lowerVarArgsParameters(Chain, StackSize);
4386 // Some CCs need callee pop.
4387 if (X86::isCalleePop(CallConv, Is64Bit, IsVarArg,
4388 MF.getTarget().Options.GuaranteedTailCallOpt)) {
4389 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
4390 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
4391 // X86 interrupts must pop the error code (and the alignment padding) if
4393 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
4395 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
4396 // If this is an sret function, the return should pop the hidden pointer.
4397 if (!canGuaranteeTCO(CallConv) && hasCalleePopSRet(Ins, Subtarget))
4398 FuncInfo->setBytesToPopOnReturn(4);
4402 // RegSaveFrameIndex is X86-64 only.
4403 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4406 FuncInfo->setArgumentStackSize(StackSize);
4408 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
4409 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
4410 if (Personality == EHPersonality::CoreCLR) {
4412 // TODO: Add a mechanism to frame lowering that will allow us to indicate
4413 // that we'd prefer this slot be allocated towards the bottom of the frame
4414 // (i.e. near the stack pointer after allocating the frame). Every
4415 // funclet needs a copy of this slot in its (mostly empty) frame, and the
4416 // offset from the bottom of this and each funclet's frame must be the
4417 // same, so the size of funclets' (mostly empty) frames is dictated by
4418 // how far this slot is from the bottom (since they allocate just enough
4419 // space to accommodate holding this slot at the correct offset).
4420 int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSpillSlot=*/false);
4421 EHInfo->PSPSymFrameIdx = PSPSymFI;
4425 if (shouldDisableArgRegFromCSR(CallConv) ||
4426 F.hasFnAttribute("no_caller_saved_registers")) {
4427 MachineRegisterInfo &MRI = MF.getRegInfo();
4428 for (std::pair<Register, Register> Pair : MRI.liveins())
4429 MRI.disableCalleeSavedRegister(Pair.first);
4435 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
4436 SDValue Arg, const SDLoc &dl,
4438 const CCValAssign &VA,
4439 ISD::ArgFlagsTy Flags,
4440 bool isByVal) const {
4441 unsigned LocMemOffset = VA.getLocMemOffset();
4442 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4443 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4446 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
4448 MaybeAlign Alignment;
4449 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
4450 Arg.getSimpleValueType() != MVT::f80)
4451 Alignment = MaybeAlign(4);
4452 return DAG.getStore(
4453 Chain, dl, Arg, PtrOff,
4454 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
4458 /// Emit a load of return address if tail call
4459 /// optimization is performed and it is required.
4460 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
4461 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
4462 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
4463 // Adjust the Return address stack slot.
4464 EVT VT = getPointerTy(DAG.getDataLayout());
4465 OutRetAddr = getReturnAddressFrameIndex(DAG);
4467 // Load the "old" Return address.
4468 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
4469 return SDValue(OutRetAddr.getNode(), 1);
4472 /// Emit a store of the return address if tail call
4473 /// optimization is performed and it is required (FPDiff!=0).
4474 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
4475 SDValue Chain, SDValue RetAddrFrIdx,
4476 EVT PtrVT, unsigned SlotSize,
4477 int FPDiff, const SDLoc &dl) {
4478 // Store the return address to the appropriate stack slot.
4479 if (!FPDiff) return Chain;
4480 // Calculate the new stack slot for the return address.
4481 int NewReturnAddrFI =
4482 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
4484 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
4485 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
4486 MachinePointerInfo::getFixedStack(
4487 DAG.getMachineFunction(), NewReturnAddrFI));
4491 /// Returns a vector_shuffle mask for an movs{s|d}, movd
4492 /// operation of specified width.
4493 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
4495 unsigned NumElems = VT.getVectorNumElements();
4496 SmallVector<int, 8> Mask;
4497 Mask.push_back(NumElems);
4498 for (unsigned i = 1; i != NumElems; ++i)
4500 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4504 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
4505 SmallVectorImpl<SDValue> &InVals) const {
4506 SelectionDAG &DAG = CLI.DAG;
4508 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
4509 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
4510 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
4511 SDValue Chain = CLI.Chain;
4512 SDValue Callee = CLI.Callee;
4513 CallingConv::ID CallConv = CLI.CallConv;
4514 bool &isTailCall = CLI.IsTailCall;
4515 bool isVarArg = CLI.IsVarArg;
4516 const auto *CB = CLI.CB;
4518 MachineFunction &MF = DAG.getMachineFunction();
4519 bool Is64Bit = Subtarget.is64Bit();
4520 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
4521 bool IsSibcall = false;
4522 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
4523 CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
4524 bool IsCalleePopSRet = !IsGuaranteeTCO && hasCalleePopSRet(Outs, Subtarget);
4525 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
4526 bool HasNCSR = (CB && isa<CallInst>(CB) &&
4527 CB->hasFnAttr("no_caller_saved_registers"));
4528 bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
4529 bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
4530 bool IsCFICall = IsIndirectCall && CLI.CFIType;
4531 const Module *M = MF.getMMI().getModule();
4532 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
4534 MachineFunction::CallSiteInfo CSInfo;
4535 if (CallConv == CallingConv::X86_INTR)
4536 report_fatal_error("X86 interrupts may not be called directly");
4538 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
4539 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
4540 // If we are using a GOT, disable tail calls to external symbols with
4541 // default visibility. Tail calling such a symbol requires using a GOT
4542 // relocation, which forces early binding of the symbol. This breaks code
4543 // that require lazy function symbol resolution. Using musttail or
4544 // GuaranteedTailCallOpt will override this.
4545 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4546 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
4547 G->getGlobal()->hasDefaultVisibility()))
4551 if (isTailCall && !IsMustTail) {
4552 // Check if it's really possible to do a tail call.
4553 isTailCall = IsEligibleForTailCallOptimization(
4554 Callee, CallConv, IsCalleePopSRet, isVarArg, CLI.RetTy, Outs, OutVals,
4557 // Sibcalls are automatically detected tailcalls which do not require
4559 if (!IsGuaranteeTCO && isTailCall)
4566 if (IsMustTail && !isTailCall)
4567 report_fatal_error("failed to perform tail call elimination on a call "
4568 "site marked musttail");
4570 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
4571 "Var args not supported with calling convention fastcc, ghc or hipe");
4573 // Analyze operands of the call, assigning locations to each operand.
4574 SmallVector<CCValAssign, 16> ArgLocs;
4575 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
4577 // Allocate shadow area for Win64.
4579 CCInfo.AllocateStack(32, Align(8));
4581 CCInfo.AnalyzeArguments(Outs, CC_X86);
4583 // In vectorcall calling convention a second pass is required for the HVA
4585 if (CallingConv::X86_VectorCall == CallConv) {
4586 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
4589 // Get a count of how many bytes are to be pushed on the stack.
4590 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
4592 // This is a sibcall. The memory operands are available in caller's
4593 // own caller's stack.
4595 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
4596 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
4600 shouldGuaranteeTCO(CallConv,
4601 MF.getTarget().Options.GuaranteedTailCallOpt)) {
4602 // Lower arguments at fp - stackoffset + fpdiff.
4603 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
4605 FPDiff = NumBytesCallerPushed - NumBytes;
4607 // Set the delta of movement of the returnaddr stackslot.
4608 // But only set if delta is greater than previous delta.
4609 if (FPDiff < X86Info->getTCReturnAddrDelta())
4610 X86Info->setTCReturnAddrDelta(FPDiff);
4613 unsigned NumBytesToPush = NumBytes;
4614 unsigned NumBytesToPop = NumBytes;
4616 // If we have an inalloca argument, all stack space has already been allocated
4617 // for us and be right at the top of the stack. We don't support multiple
4618 // arguments passed in memory when using inalloca.
4619 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
4621 if (!ArgLocs.back().isMemLoc())
4622 report_fatal_error("cannot use inalloca attribute on a register "
4624 if (ArgLocs.back().getLocMemOffset() != 0)
4625 report_fatal_error("any parameter with the inalloca attribute must be "
4626 "the only memory argument");
4627 } else if (CLI.IsPreallocated) {
4628 assert(ArgLocs.back().isMemLoc() &&
4629 "cannot use preallocated attribute on a register "
4631 SmallVector<size_t, 4> PreallocatedOffsets;
4632 for (size_t i = 0; i < CLI.OutVals.size(); ++i) {
4633 if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {
4634 PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());
4637 auto *MFI = DAG.getMachineFunction().getInfo<X86MachineFunctionInfo>();
4638 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);
4639 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
4640 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
4644 if (!IsSibcall && !IsMustTail)
4645 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
4646 NumBytes - NumBytesToPush, dl);
4648 SDValue RetAddrFrIdx;
4649 // Load return address for tail calls.
4650 if (isTailCall && FPDiff)
4651 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
4652 Is64Bit, FPDiff, dl);
4654 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
4655 SmallVector<SDValue, 8> MemOpChains;
4658 // The next loop assumes that the locations are in the same order of the
4660 assert(isSortedByValueNo(ArgLocs) &&
4661 "Argument Location list must be sorted before lowering");
4663 // Walk the register/memloc assignments, inserting copies/loads. In the case
4664 // of tail call optimization arguments are handle later.
4665 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4666 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
4668 assert(OutIndex < Outs.size() && "Invalid Out index");
4669 // Skip inalloca/preallocated arguments, they have already been written.
4670 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
4671 if (Flags.isInAlloca() || Flags.isPreallocated())
4674 CCValAssign &VA = ArgLocs[I];
4675 EVT RegVT = VA.getLocVT();
4676 SDValue Arg = OutVals[OutIndex];
4677 bool isByVal = Flags.isByVal();
4679 // Promote the value if needed.
4680 switch (VA.getLocInfo()) {
4681 default: llvm_unreachable("Unknown loc info!");
4682 case CCValAssign::Full: break;
4683 case CCValAssign::SExt:
4684 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
4686 case CCValAssign::ZExt:
4687 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
4689 case CCValAssign::AExt:
4690 if (Arg.getValueType().isVector() &&
4691 Arg.getValueType().getVectorElementType() == MVT::i1)
4692 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
4693 else if (RegVT.is128BitVector()) {
4694 // Special case: passing MMX values in XMM registers.
4695 Arg = DAG.getBitcast(MVT::i64, Arg);
4696 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
4697 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
4699 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
4701 case CCValAssign::BCvt:
4702 Arg = DAG.getBitcast(RegVT, Arg);
4704 case CCValAssign::Indirect: {
4706 // Memcpy the argument to a temporary stack slot to prevent
4707 // the caller from seeing any modifications the callee may make
4708 // as guaranteed by the `byval` attribute.
4709 int FrameIdx = MF.getFrameInfo().CreateStackObject(
4710 Flags.getByValSize(),
4711 std::max(Align(16), Flags.getNonZeroByValAlign()), false);
4713 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
4715 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
4716 // From now on treat this as a regular pointer
4720 // Store the argument.
4721 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
4722 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4723 Chain = DAG.getStore(
4724 Chain, dl, Arg, SpillSlot,
4725 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4732 if (VA.needsCustom()) {
4733 assert(VA.getValVT() == MVT::v64i1 &&
4734 "Currently the only custom case is when we split v64i1 to 2 regs");
4735 // Split v64i1 value into two registers
4736 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
4737 } else if (VA.isRegLoc()) {
4738 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4739 const TargetOptions &Options = DAG.getTarget().Options;
4740 if (Options.EmitCallSiteInfo)
4741 CSInfo.emplace_back(VA.getLocReg(), I);
4742 if (isVarArg && IsWin64) {
4743 // Win64 ABI requires argument XMM reg to be copied to the corresponding
4744 // shadow reg if callee is a varargs function.
4746 switch (VA.getLocReg()) {
4747 case X86::XMM0: ShadowReg = X86::RCX; break;
4748 case X86::XMM1: ShadowReg = X86::RDX; break;
4749 case X86::XMM2: ShadowReg = X86::R8; break;
4750 case X86::XMM3: ShadowReg = X86::R9; break;
4753 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4755 } else if (!IsSibcall && (!isTailCall || isByVal)) {
4756 assert(VA.isMemLoc());
4757 if (!StackPtr.getNode())
4758 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4759 getPointerTy(DAG.getDataLayout()));
4760 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4761 dl, DAG, VA, Flags, isByVal));
4765 if (!MemOpChains.empty())
4766 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4768 if (Subtarget.isPICStyleGOT()) {
4769 // ELF / PIC requires GOT in the EBX register before function calls via PLT
4770 // GOT pointer (except regcall).
4772 // Indirect call with RegCall calling convertion may use up all the
4773 // general registers, so it is not suitable to bind EBX reister for
4774 // GOT address, just let register allocator handle it.
4775 if (CallConv != CallingConv::X86_RegCall)
4776 RegsToPass.push_back(std::make_pair(
4777 Register(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4778 getPointerTy(DAG.getDataLayout()))));
4780 // If we are tail calling and generating PIC/GOT style code load the
4781 // address of the callee into ECX. The value in ecx is used as target of
4782 // the tail jump. This is done to circumvent the ebx/callee-saved problem
4783 // for tail calls on PIC/GOT architectures. Normally we would just put the
4784 // address of GOT into ebx and then call target@PLT. But for tail calls
4785 // ebx would be restored (since ebx is callee saved) before jumping to the
4788 // Note: The actual moving to ECX is done further down.
4789 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4790 if (G && !G->getGlobal()->hasLocalLinkage() &&
4791 G->getGlobal()->hasDefaultVisibility())
4792 Callee = LowerGlobalAddress(Callee, DAG);
4793 else if (isa<ExternalSymbolSDNode>(Callee))
4794 Callee = LowerExternalSymbol(Callee, DAG);
4798 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
4799 (Subtarget.hasSSE1() || !M->getModuleFlag("SkipRaxSetup"))) {
4800 // From AMD64 ABI document:
4801 // For calls that may call functions that use varargs or stdargs
4802 // (prototype-less calls or calls to functions containing ellipsis (...) in
4803 // the declaration) %al is used as hidden argument to specify the number
4804 // of SSE registers used. The contents of %al do not need to match exactly
4805 // the number of registers, but must be an ubound on the number of SSE
4806 // registers used and is in the range 0 - 8 inclusive.
4808 // Count the number of XMM registers allocated.
4809 static const MCPhysReg XMMArgRegs[] = {
4810 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4811 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4813 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4814 assert((Subtarget.hasSSE1() || !NumXMMRegs)
4815 && "SSE registers cannot be used when SSE is disabled");
4816 RegsToPass.push_back(std::make_pair(Register(X86::AL),
4817 DAG.getConstant(NumXMMRegs, dl,
4821 if (isVarArg && IsMustTail) {
4822 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4823 for (const auto &F : Forwards) {
4824 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4825 RegsToPass.push_back(std::make_pair(F.PReg, Val));
4829 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
4830 // don't need this because the eligibility check rejects calls that require
4831 // shuffling arguments passed in memory.
4832 if (!IsSibcall && isTailCall) {
4833 // Force all the incoming stack arguments to be loaded from the stack
4834 // before any new outgoing arguments are stored to the stack, because the
4835 // outgoing stack slots may alias the incoming argument stack slots, and
4836 // the alias isn't otherwise explicit. This is slightly more conservative
4837 // than necessary, because it means that each store effectively depends
4838 // on every argument instead of just those arguments it would clobber.
4839 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4841 SmallVector<SDValue, 8> MemOpChains2;
4844 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4846 CCValAssign &VA = ArgLocs[I];
4848 if (VA.isRegLoc()) {
4849 if (VA.needsCustom()) {
4850 assert((CallConv == CallingConv::X86_RegCall) &&
4851 "Expecting custom case only in regcall calling convention");
4852 // This means that we are in special case where one argument was
4853 // passed through two register locations - Skip the next location
4860 assert(VA.isMemLoc());
4861 SDValue Arg = OutVals[OutsIndex];
4862 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4863 // Skip inalloca/preallocated arguments. They don't require any work.
4864 if (Flags.isInAlloca() || Flags.isPreallocated())
4866 // Create frame index.
4867 int32_t Offset = VA.getLocMemOffset()+FPDiff;
4868 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4869 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4870 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4872 if (Flags.isByVal()) {
4873 // Copy relative to framepointer.
4874 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4875 if (!StackPtr.getNode())
4876 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4877 getPointerTy(DAG.getDataLayout()));
4878 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4881 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4885 // Store relative to framepointer.
4886 MemOpChains2.push_back(DAG.getStore(
4887 ArgChain, dl, Arg, FIN,
4888 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4892 if (!MemOpChains2.empty())
4893 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4895 // Store the return address to the appropriate stack slot.
4896 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4897 getPointerTy(DAG.getDataLayout()),
4898 RegInfo->getSlotSize(), FPDiff, dl);
4901 // Build a sequence of copy-to-reg nodes chained together with token chain
4902 // and glue operands which copy the outgoing args into registers.
4904 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4905 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4906 RegsToPass[i].second, InGlue);
4907 InGlue = Chain.getValue(1);
4910 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4911 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4912 // In the 64-bit large code model, we have to make all calls
4913 // through a register, since the call instruction's 32-bit
4914 // pc-relative offset may not be large enough to hold the whole
4916 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4917 Callee->getOpcode() == ISD::ExternalSymbol) {
4918 // Lower direct calls to global addresses and external symbols. Setting
4919 // ForCall to true here has the effect of removing WrapperRIP when possible
4920 // to allow direct calls to be selected without first materializing the
4921 // address into a register.
4922 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4923 } else if (Subtarget.isTarget64BitILP32() &&
4924 Callee.getValueType() == MVT::i32) {
4925 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4926 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4929 // Returns a chain & a glue for retval copy to use.
4930 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4931 SmallVector<SDValue, 8> Ops;
4933 if (!IsSibcall && isTailCall && !IsMustTail) {
4934 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InGlue, dl);
4935 InGlue = Chain.getValue(1);
4938 Ops.push_back(Chain);
4939 Ops.push_back(Callee);
4942 Ops.push_back(DAG.getTargetConstant(FPDiff, dl, MVT::i32));
4944 // Add argument registers to the end of the list so that they are known live
4946 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4947 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4948 RegsToPass[i].second.getValueType()));
4950 // Add a register mask operand representing the call-preserved registers.
4951 const uint32_t *Mask = [&]() {
4952 auto AdaptedCC = CallConv;
4953 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists),
4954 // use X86_INTR calling convention because it has the same CSR mask
4955 // (same preserved registers).
4957 AdaptedCC = (CallingConv::ID)CallingConv::X86_INTR;
4958 // If NoCalleeSavedRegisters is requested, than use GHC since it happens
4959 // to use the CSR_NoRegs_RegMask.
4960 if (CB && CB->hasFnAttr("no_callee_saved_registers"))
4961 AdaptedCC = (CallingConv::ID)CallingConv::GHC;
4962 return RegInfo->getCallPreservedMask(MF, AdaptedCC);
4964 assert(Mask && "Missing call preserved mask for calling convention");
4966 // If this is an invoke in a 32-bit function using a funclet-based
4967 // personality, assume the function clobbers all registers. If an exception
4968 // is thrown, the runtime will not restore CSRs.
4969 // FIXME: Model this more precisely so that we can register allocate across
4970 // the normal edge and spill and fill across the exceptional edge.
4971 if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
4972 const Function &CallerFn = MF.getFunction();
4973 EHPersonality Pers =
4974 CallerFn.hasPersonalityFn()
4975 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4976 : EHPersonality::Unknown;
4977 if (isFuncletEHPersonality(Pers))
4978 Mask = RegInfo->getNoPreservedMask();
4981 // Define a new register mask from the existing mask.
4982 uint32_t *RegMask = nullptr;
4984 // In some calling conventions we need to remove the used physical registers
4985 // from the reg mask. Create a new RegMask for such calling conventions.
4986 // RegMask for calling conventions that disable only return registers (e.g.
4987 // preserve_most) will be modified later in LowerCallResult.
4988 bool ShouldDisableArgRegs = shouldDisableArgRegFromCSR(CallConv) || HasNCSR;
4989 if (ShouldDisableArgRegs || shouldDisableRetRegFromCSR(CallConv)) {
4990 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4992 // Allocate a new Reg Mask and copy Mask.
4993 RegMask = MF.allocateRegMask();
4994 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4995 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4997 // Make sure all sub registers of the argument registers are reset
4999 if (ShouldDisableArgRegs) {
5000 for (auto const &RegPair : RegsToPass)
5001 for (MCPhysReg SubReg : TRI->subregs_inclusive(RegPair.first))
5002 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
5005 // Create the RegMask Operand according to our updated mask.
5006 Ops.push_back(DAG.getRegisterMask(RegMask));
5008 // Create the RegMask Operand according to the static mask.
5009 Ops.push_back(DAG.getRegisterMask(Mask));
5012 if (InGlue.getNode())
5013 Ops.push_back(InGlue);
5017 //// If this is the first return lowered for this function, add the regs
5018 //// to the liveout set for the function.
5019 // This isn't right, although it's probably harmless on x86; liveouts
5020 // should be computed from returns not tail calls. Consider a void
5021 // function making a tail call to a function returning int.
5022 MF.getFrameInfo().setHasTailCall();
5023 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
5026 Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
5028 DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge);
5029 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
5033 if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
5034 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
5035 } else if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
5036 // Calls with a "clang.arc.attachedcall" bundle are special. They should be
5037 // expanded to the call, directly followed by a special marker sequence and
5038 // a call to a ObjC library function. Use the CALL_RVMARKER to do that.
5039 assert(!isTailCall &&
5040 "tail calls cannot be marked with clang.arc.attachedcall");
5041 assert(Is64Bit && "clang.arc.attachedcall is only supported in 64bit mode");
5043 // Add a target global address for the retainRV/claimRV runtime function
5044 // just before the call target.
5045 Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
5046 auto PtrVT = getPointerTy(DAG.getDataLayout());
5047 auto GA = DAG.getTargetGlobalAddress(ARCFn, dl, PtrVT);
5048 Ops.insert(Ops.begin() + 1, GA);
5049 Chain = DAG.getNode(X86ISD::CALL_RVMARKER, dl, NodeTys, Ops);
5051 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
5055 Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
5057 InGlue = Chain.getValue(1);
5058 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
5059 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
5061 // Save heapallocsite metadata.
5063 if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
5064 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
5066 // Create the CALLSEQ_END node.
5067 unsigned NumBytesForCalleeToPop = 0; // Callee pops nothing.
5068 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
5069 DAG.getTarget().Options.GuaranteedTailCallOpt))
5070 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
5071 else if (!canGuaranteeTCO(CallConv) && IsCalleePopSRet)
5072 // If this call passes a struct-return pointer, the callee
5073 // pops that struct pointer.
5074 NumBytesForCalleeToPop = 4;
5076 // Returns a glue for retval copy to use.
5078 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
5080 InGlue = Chain.getValue(1);
5083 // Handle result values, copying them out of physregs into vregs that we
5085 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
5089 //===----------------------------------------------------------------------===//
5090 // Fast Calling Convention (tail call) implementation
5091 //===----------------------------------------------------------------------===//
5093 // Like std call, callee cleans arguments, convention except that ECX is
5094 // reserved for storing the tail called function address. Only 2 registers are
5095 // free for argument passing (inreg). Tail call optimization is performed
5097 // * tailcallopt is enabled
5098 // * caller/callee are fastcc
5099 // On X86_64 architecture with GOT-style position independent code only local
5100 // (within module) calls are supported at the moment.
5101 // To keep the stack aligned according to platform abi the function
5102 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
5103 // of stack alignment. (Dynamic linkers need this - Darwin's dyld for example)
5104 // If a tail called function callee has more arguments than the caller the
5105 // caller needs to make sure that there is room to move the RETADDR to. This is
5106 // achieved by reserving an area the size of the argument delta right after the
5107 // original RETADDR, but before the saved framepointer or the spilled registers
5108 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
5120 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
5123 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
5124 SelectionDAG &DAG) const {
5125 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
5126 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
5127 assert(StackSize % SlotSize == 0 &&
5128 "StackSize must be a multiple of SlotSize");
5129 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
5132 /// Return true if the given stack call argument is already available in the
5133 /// same position (relatively) of the caller's incoming argument stack.
5135 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
5136 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
5137 const X86InstrInfo *TII, const CCValAssign &VA) {
5138 unsigned Bytes = Arg.getValueSizeInBits() / 8;
5141 // Look through nodes that don't alter the bits of the incoming value.
5142 unsigned Op = Arg.getOpcode();
5143 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
5144 Arg = Arg.getOperand(0);
5147 if (Op == ISD::TRUNCATE) {
5148 const SDValue &TruncInput = Arg.getOperand(0);
5149 if (TruncInput.getOpcode() == ISD::AssertZext &&
5150 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
5151 Arg.getValueType()) {
5152 Arg = TruncInput.getOperand(0);
5160 if (Arg.getOpcode() == ISD::CopyFromReg) {
5161 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
5162 if (!VR.isVirtual())
5164 MachineInstr *Def = MRI->getVRegDef(VR);
5167 if (!Flags.isByVal()) {
5168 if (!TII->isLoadFromStackSlot(*Def, FI))
5171 unsigned Opcode = Def->getOpcode();
5172 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
5173 Opcode == X86::LEA64_32r) &&
5174 Def->getOperand(1).isFI()) {
5175 FI = Def->getOperand(1).getIndex();
5176 Bytes = Flags.getByValSize();
5180 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
5181 if (Flags.isByVal())
5182 // ByVal argument is passed in as a pointer but it's now being
5183 // dereferenced. e.g.
5184 // define @foo(%struct.X* %A) {
5185 // tail call @bar(%struct.X* byval %A)
5188 SDValue Ptr = Ld->getBasePtr();
5189 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
5192 FI = FINode->getIndex();
5193 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
5194 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
5195 FI = FINode->getIndex();
5196 Bytes = Flags.getByValSize();
5200 assert(FI != INT_MAX);
5201 if (!MFI.isFixedObjectIndex(FI))
5204 if (Offset != MFI.getObjectOffset(FI))
5207 // If this is not byval, check that the argument stack object is immutable.
5208 // inalloca and argument copy elision can create mutable argument stack
5209 // objects. Byval objects can be mutated, but a byval call intends to pass the
5211 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
5214 if (VA.getLocVT().getFixedSizeInBits() >
5215 Arg.getValueSizeInBits().getFixedValue()) {
5216 // If the argument location is wider than the argument type, check that any
5217 // extension flags match.
5218 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
5219 Flags.isSExt() != MFI.isObjectSExt(FI)) {
5224 return Bytes == MFI.getObjectSize(FI);
5227 /// Check whether the call is eligible for tail call optimization. Targets
5228 /// that want to do tail call optimization should implement this function.
5229 bool X86TargetLowering::IsEligibleForTailCallOptimization(
5230 SDValue Callee, CallingConv::ID CalleeCC, bool IsCalleePopSRet,
5231 bool isVarArg, Type *RetTy, const SmallVectorImpl<ISD::OutputArg> &Outs,
5232 const SmallVectorImpl<SDValue> &OutVals,
5233 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
5234 if (!mayTailCallThisCC(CalleeCC))
5237 // If -tailcallopt is specified, make fastcc functions tail-callable.
5238 MachineFunction &MF = DAG.getMachineFunction();
5239 const Function &CallerF = MF.getFunction();
5241 // If the function return type is x86_fp80 and the callee return type is not,
5242 // then the FP_EXTEND of the call result is not a nop. It's not safe to
5243 // perform a tailcall optimization here.
5244 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
5247 CallingConv::ID CallerCC = CallerF.getCallingConv();
5248 bool CCMatch = CallerCC == CalleeCC;
5249 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
5250 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
5251 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
5252 CalleeCC == CallingConv::Tail || CalleeCC == CallingConv::SwiftTail;
5254 // Win64 functions have extra shadow space for argument homing. Don't do the
5255 // sibcall if the caller and callee have mismatched expectations for this
5257 if (IsCalleeWin64 != IsCallerWin64)
5260 if (IsGuaranteeTCO) {
5261 if (canGuaranteeTCO(CalleeCC) && CCMatch)
5266 // Look for obvious safe cases to perform tail call optimization that do not
5267 // require ABI changes. This is what gcc calls sibcall.
5269 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
5270 // emit a special epilogue.
5271 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5272 if (RegInfo->hasStackRealignment(MF))
5275 // Also avoid sibcall optimization if we're an sret return fn and the callee
5276 // is incompatible. See comment in LowerReturn about why hasStructRetAttr is
5278 if (MF.getInfo<X86MachineFunctionInfo>()->getSRetReturnReg()) {
5279 // For a compatible tail call the callee must return our sret pointer. So it
5280 // needs to be (a) an sret function itself and (b) we pass our sret as its
5281 // sret. Condition #b is harder to determine.
5283 } else if (IsCalleePopSRet)
5284 // The callee pops an sret, so we cannot tail-call, as our caller doesn't
5288 // Do not sibcall optimize vararg calls unless all arguments are passed via
5290 LLVMContext &C = *DAG.getContext();
5291 if (isVarArg && !Outs.empty()) {
5292 // Optimizing for varargs on Win64 is unlikely to be safe without
5293 // additional testing.
5294 if (IsCalleeWin64 || IsCallerWin64)
5297 SmallVector<CCValAssign, 16> ArgLocs;
5298 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5299 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5300 for (const auto &VA : ArgLocs)
5305 // If the call result is in ST0 / ST1, it needs to be popped off the x87
5306 // stack. Therefore, if it's not used by the call it is not safe to optimize
5307 // this into a sibcall.
5308 bool Unused = false;
5309 for (const auto &In : Ins) {
5316 SmallVector<CCValAssign, 16> RVLocs;
5317 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
5318 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
5319 for (const auto &VA : RVLocs) {
5320 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
5325 // Check that the call results are passed in the same way.
5326 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
5327 RetCC_X86, RetCC_X86))
5329 // The callee has to preserve all registers the caller needs to preserve.
5330 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
5331 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
5333 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
5334 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5338 unsigned StackArgsSize = 0;
5340 // If the callee takes no arguments then go on to check the results of the
5342 if (!Outs.empty()) {
5343 // Check if stack adjustment is needed. For now, do not do this if any
5344 // argument is passed on the stack.
5345 SmallVector<CCValAssign, 16> ArgLocs;
5346 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5348 // Allocate shadow area for Win64
5350 CCInfo.AllocateStack(32, Align(8));
5352 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5353 StackArgsSize = CCInfo.getStackSize();
5355 if (CCInfo.getStackSize()) {
5356 // Check if the arguments are already laid out in the right way as
5357 // the caller's fixed stack objects.
5358 MachineFrameInfo &MFI = MF.getFrameInfo();
5359 const MachineRegisterInfo *MRI = &MF.getRegInfo();
5360 const X86InstrInfo *TII = Subtarget.getInstrInfo();
5361 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
5362 const CCValAssign &VA = ArgLocs[I];
5363 SDValue Arg = OutVals[I];
5364 ISD::ArgFlagsTy Flags = Outs[I].Flags;
5365 if (VA.getLocInfo() == CCValAssign::Indirect)
5367 if (!VA.isRegLoc()) {
5368 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
5375 bool PositionIndependent = isPositionIndependent();
5376 // If the tailcall address may be in a register, then make sure it's
5377 // possible to register allocate for it. In 32-bit, the call address can
5378 // only target EAX, EDX, or ECX since the tail call must be scheduled after
5379 // callee-saved registers are restored. These happen to be the same
5380 // registers used to pass 'inreg' arguments so watch out for those.
5381 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
5382 !isa<ExternalSymbolSDNode>(Callee)) ||
5383 PositionIndependent)) {
5384 unsigned NumInRegs = 0;
5385 // In PIC we need an extra register to formulate the address computation
5387 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
5389 for (const auto &VA : ArgLocs) {
5392 Register Reg = VA.getLocReg();
5395 case X86::EAX: case X86::EDX: case X86::ECX:
5396 if (++NumInRegs == MaxInRegs)
5403 const MachineRegisterInfo &MRI = MF.getRegInfo();
5404 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
5408 bool CalleeWillPop =
5409 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
5410 MF.getTarget().Options.GuaranteedTailCallOpt);
5412 if (unsigned BytesToPop =
5413 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
5414 // If we have bytes to pop, the callee must pop them.
5415 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
5416 if (!CalleePopMatches)
5418 } else if (CalleeWillPop && StackArgsSize > 0) {
5419 // If we don't have bytes to pop, make sure the callee doesn't pop any.
5427 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
5428 const TargetLibraryInfo *libInfo) const {
5429 return X86::createFastISel(funcInfo, libInfo);
5432 //===----------------------------------------------------------------------===//
5433 // Other Lowering Hooks
5434 //===----------------------------------------------------------------------===//
5436 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
5437 bool AssumeSingleUse) {
5438 if (!AssumeSingleUse && !Op.hasOneUse())
5440 if (!ISD::isNormalLoad(Op.getNode()))
5443 // If this is an unaligned vector, make sure the target supports folding it.
5444 auto *Ld = cast<LoadSDNode>(Op.getNode());
5445 if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
5446 Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
5449 // TODO: If this is a non-temporal load and the target has an instruction
5450 // for it, it should not be folded. See "useNonTemporalLoad()".
5455 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
5456 const X86Subtarget &Subtarget,
5457 bool AssumeSingleUse) {
5458 assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
5459 if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
5462 // We can not replace a wide volatile load with a broadcast-from-memory,
5463 // because that would narrow the load, which isn't legal for volatiles.
5464 auto *Ld = cast<LoadSDNode>(Op.getNode());
5465 return !Ld->isVolatile() ||
5466 Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
5469 bool X86::mayFoldIntoStore(SDValue Op) {
5470 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
5473 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
5474 if (Op.hasOneUse()) {
5475 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
5476 return (ISD::ZERO_EXTEND == Opcode);
5481 static bool isTargetShuffle(unsigned Opcode) {
5483 default: return false;
5484 case X86ISD::BLENDI:
5485 case X86ISD::PSHUFB:
5486 case X86ISD::PSHUFD:
5487 case X86ISD::PSHUFHW:
5488 case X86ISD::PSHUFLW:
5490 case X86ISD::INSERTPS:
5491 case X86ISD::EXTRQI:
5492 case X86ISD::INSERTQI:
5493 case X86ISD::VALIGN:
5494 case X86ISD::PALIGNR:
5495 case X86ISD::VSHLDQ:
5496 case X86ISD::VSRLDQ:
5497 case X86ISD::MOVLHPS:
5498 case X86ISD::MOVHLPS:
5499 case X86ISD::MOVSHDUP:
5500 case X86ISD::MOVSLDUP:
5501 case X86ISD::MOVDDUP:
5505 case X86ISD::UNPCKL:
5506 case X86ISD::UNPCKH:
5507 case X86ISD::VBROADCAST:
5508 case X86ISD::VPERMILPI:
5509 case X86ISD::VPERMILPV:
5510 case X86ISD::VPERM2X128:
5511 case X86ISD::SHUF128:
5512 case X86ISD::VPERMIL2:
5513 case X86ISD::VPERMI:
5514 case X86ISD::VPPERM:
5515 case X86ISD::VPERMV:
5516 case X86ISD::VPERMV3:
5517 case X86ISD::VZEXT_MOVL:
5522 static bool isTargetShuffleVariableMask(unsigned Opcode) {
5524 default: return false;
5526 case X86ISD::PSHUFB:
5527 case X86ISD::VPERMILPV:
5528 case X86ISD::VPERMIL2:
5529 case X86ISD::VPPERM:
5530 case X86ISD::VPERMV:
5531 case X86ISD::VPERMV3:
5533 // 'Faux' Target Shuffles.
5541 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
5542 MachineFunction &MF = DAG.getMachineFunction();
5543 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5544 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
5545 int ReturnAddrIndex = FuncInfo->getRAIndex();
5547 if (ReturnAddrIndex == 0) {
5548 // Set up a frame object for the return address.
5549 unsigned SlotSize = RegInfo->getSlotSize();
5550 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
5553 FuncInfo->setRAIndex(ReturnAddrIndex);
5556 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
5559 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
5560 bool hasSymbolicDisplacement) {
5561 // Offset should fit into 32 bit immediate field.
5562 if (!isInt<32>(Offset))
5565 // If we don't have a symbolic displacement - we don't have any extra
5567 if (!hasSymbolicDisplacement)
5570 // FIXME: Some tweaks might be needed for medium code model.
5571 if (M != CodeModel::Small && M != CodeModel::Kernel)
5574 // For small code model we assume that latest object is 16MB before end of 31
5575 // bits boundary. We may also accept pretty large negative constants knowing
5576 // that all objects are in the positive half of address space.
5577 if (M == CodeModel::Small && Offset < 16*1024*1024)
5580 // For kernel code model we know that all object resist in the negative half
5581 // of 32bits address space. We may not accept negative offsets, since they may
5582 // be just off and we may accept pretty large positive ones.
5583 if (M == CodeModel::Kernel && Offset >= 0)
5589 /// Determines whether the callee is required to pop its own arguments.
5590 /// Callee pop is necessary to support tail calls.
5591 bool X86::isCalleePop(CallingConv::ID CallingConv,
5592 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
5593 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
5594 // can guarantee TCO.
5595 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
5598 switch (CallingConv) {
5601 case CallingConv::X86_StdCall:
5602 case CallingConv::X86_FastCall:
5603 case CallingConv::X86_ThisCall:
5604 case CallingConv::X86_VectorCall:
5609 /// Return true if the condition is an signed comparison operation.
5610 static bool isX86CCSigned(unsigned X86CC) {
5613 llvm_unreachable("Invalid integer condition!");
5629 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
5630 switch (SetCCOpcode) {
5631 default: llvm_unreachable("Invalid integer condition!");
5632 case ISD::SETEQ: return X86::COND_E;
5633 case ISD::SETGT: return X86::COND_G;
5634 case ISD::SETGE: return X86::COND_GE;
5635 case ISD::SETLT: return X86::COND_L;
5636 case ISD::SETLE: return X86::COND_LE;
5637 case ISD::SETNE: return X86::COND_NE;
5638 case ISD::SETULT: return X86::COND_B;
5639 case ISD::SETUGT: return X86::COND_A;
5640 case ISD::SETULE: return X86::COND_BE;
5641 case ISD::SETUGE: return X86::COND_AE;
5645 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
5646 /// condition code, returning the condition code and the LHS/RHS of the
5647 /// comparison to make.
5648 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
5649 bool isFP, SDValue &LHS, SDValue &RHS,
5650 SelectionDAG &DAG) {
5652 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5653 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
5654 // X > -1 -> X == 0, jump !sign.
5655 RHS = DAG.getConstant(0, DL, RHS.getValueType());
5656 return X86::COND_NS;
5658 if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
5659 // X < 0 -> X == 0, jump on sign.
5662 if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
5663 // X >= 0 -> X == 0, jump on !sign.
5664 return X86::COND_NS;
5666 if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
5668 RHS = DAG.getConstant(0, DL, RHS.getValueType());
5669 return X86::COND_LE;
5673 return TranslateIntegerX86CC(SetCCOpcode);
5676 // First determine if it is required or is profitable to flip the operands.
5678 // If LHS is a foldable load, but RHS is not, flip the condition.
5679 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
5680 !ISD::isNON_EXTLoad(RHS.getNode())) {
5681 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
5682 std::swap(LHS, RHS);
5685 switch (SetCCOpcode) {
5691 std::swap(LHS, RHS);
5695 // On a floating point condition, the flags are set as follows:
5697 // 0 | 0 | 0 | X > Y
5698 // 0 | 0 | 1 | X < Y
5699 // 1 | 0 | 0 | X == Y
5700 // 1 | 1 | 1 | unordered
5701 switch (SetCCOpcode) {
5702 default: llvm_unreachable("Condcode should be pre-legalized away");
5704 case ISD::SETEQ: return X86::COND_E;
5705 case ISD::SETOLT: // flipped
5707 case ISD::SETGT: return X86::COND_A;
5708 case ISD::SETOLE: // flipped
5710 case ISD::SETGE: return X86::COND_AE;
5711 case ISD::SETUGT: // flipped
5713 case ISD::SETLT: return X86::COND_B;
5714 case ISD::SETUGE: // flipped
5716 case ISD::SETLE: return X86::COND_BE;
5718 case ISD::SETNE: return X86::COND_NE;
5719 case ISD::SETUO: return X86::COND_P;
5720 case ISD::SETO: return X86::COND_NP;
5722 case ISD::SETUNE: return X86::COND_INVALID;
5726 /// Is there a floating point cmov for the specific X86 condition code?
5727 /// Current x86 isa includes the following FP cmov instructions:
5728 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
5729 static bool hasFPCMov(unsigned X86CC) {
5745 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
5746 return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
5747 VT.is512BitVector();
5750 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5752 MachineFunction &MF,
5753 unsigned Intrinsic) const {
5754 Info.flags = MachineMemOperand::MONone;
5757 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
5759 switch (Intrinsic) {
5760 case Intrinsic::x86_aesenc128kl:
5761 case Intrinsic::x86_aesdec128kl:
5762 Info.opc = ISD::INTRINSIC_W_CHAIN;
5763 Info.ptrVal = I.getArgOperand(1);
5764 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5765 Info.align = Align(1);
5766 Info.flags |= MachineMemOperand::MOLoad;
5768 case Intrinsic::x86_aesenc256kl:
5769 case Intrinsic::x86_aesdec256kl:
5770 Info.opc = ISD::INTRINSIC_W_CHAIN;
5771 Info.ptrVal = I.getArgOperand(1);
5772 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5773 Info.align = Align(1);
5774 Info.flags |= MachineMemOperand::MOLoad;
5776 case Intrinsic::x86_aesencwide128kl:
5777 case Intrinsic::x86_aesdecwide128kl:
5778 Info.opc = ISD::INTRINSIC_W_CHAIN;
5779 Info.ptrVal = I.getArgOperand(0);
5780 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5781 Info.align = Align(1);
5782 Info.flags |= MachineMemOperand::MOLoad;
5784 case Intrinsic::x86_aesencwide256kl:
5785 case Intrinsic::x86_aesdecwide256kl:
5786 Info.opc = ISD::INTRINSIC_W_CHAIN;
5787 Info.ptrVal = I.getArgOperand(0);
5788 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5789 Info.align = Align(1);
5790 Info.flags |= MachineMemOperand::MOLoad;
5792 case Intrinsic::x86_cmpccxadd32:
5793 case Intrinsic::x86_cmpccxadd64:
5794 case Intrinsic::x86_atomic_bts:
5795 case Intrinsic::x86_atomic_btc:
5796 case Intrinsic::x86_atomic_btr: {
5797 Info.opc = ISD::INTRINSIC_W_CHAIN;
5798 Info.ptrVal = I.getArgOperand(0);
5799 unsigned Size = I.getType()->getScalarSizeInBits();
5800 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5801 Info.align = Align(Size);
5802 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5803 MachineMemOperand::MOVolatile;
5806 case Intrinsic::x86_atomic_bts_rm:
5807 case Intrinsic::x86_atomic_btc_rm:
5808 case Intrinsic::x86_atomic_btr_rm: {
5809 Info.opc = ISD::INTRINSIC_W_CHAIN;
5810 Info.ptrVal = I.getArgOperand(0);
5811 unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
5812 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5813 Info.align = Align(Size);
5814 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5815 MachineMemOperand::MOVolatile;
5818 case Intrinsic::x86_aadd32:
5819 case Intrinsic::x86_aadd64:
5820 case Intrinsic::x86_aand32:
5821 case Intrinsic::x86_aand64:
5822 case Intrinsic::x86_aor32:
5823 case Intrinsic::x86_aor64:
5824 case Intrinsic::x86_axor32:
5825 case Intrinsic::x86_axor64:
5826 case Intrinsic::x86_atomic_add_cc:
5827 case Intrinsic::x86_atomic_sub_cc:
5828 case Intrinsic::x86_atomic_or_cc:
5829 case Intrinsic::x86_atomic_and_cc:
5830 case Intrinsic::x86_atomic_xor_cc: {
5831 Info.opc = ISD::INTRINSIC_W_CHAIN;
5832 Info.ptrVal = I.getArgOperand(0);
5833 unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
5834 Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5835 Info.align = Align(Size);
5836 Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5837 MachineMemOperand::MOVolatile;
5844 switch (IntrData->Type) {
5845 case TRUNCATE_TO_MEM_VI8:
5846 case TRUNCATE_TO_MEM_VI16:
5847 case TRUNCATE_TO_MEM_VI32: {
5848 Info.opc = ISD::INTRINSIC_VOID;
5849 Info.ptrVal = I.getArgOperand(0);
5850 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
5851 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
5852 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
5854 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
5855 ScalarVT = MVT::i16;
5856 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
5857 ScalarVT = MVT::i32;
5859 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
5860 Info.align = Align(1);
5861 Info.flags |= MachineMemOperand::MOStore;
5866 Info.opc = ISD::INTRINSIC_W_CHAIN;
5867 Info.ptrVal = nullptr;
5868 MVT DataVT = MVT::getVT(I.getType());
5869 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5870 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5871 IndexVT.getVectorNumElements());
5872 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5873 Info.align = Align(1);
5874 Info.flags |= MachineMemOperand::MOLoad;
5878 Info.opc = ISD::INTRINSIC_VOID;
5879 Info.ptrVal = nullptr;
5880 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
5881 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5882 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5883 IndexVT.getVectorNumElements());
5884 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5885 Info.align = Align(1);
5886 Info.flags |= MachineMemOperand::MOStore;
5896 /// Returns true if the target can instruction select the
5897 /// specified FP immediate natively. If false, the legalizer will
5898 /// materialize the FP immediate as a load from a constant pool.
5899 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5900 bool ForCodeSize) const {
5901 for (const APFloat &FPImm : LegalFPImmediates)
5902 if (Imm.bitwiseIsEqual(FPImm))
5907 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5908 ISD::LoadExtType ExtTy,
5910 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5912 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5913 // relocation target a movq or addq instruction: don't let the load shrink.
5914 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5915 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5916 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5917 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5919 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5920 // those uses are extracted directly into a store, then the extract + store
5921 // can be store-folded. Therefore, it's probably not worth splitting the load.
5922 EVT VT = Load->getValueType(0);
5923 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5924 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5925 // Skip uses of the chain value. Result 0 of the node is the load value.
5926 if (UI.getUse().getResNo() != 0)
5929 // If this use is not an extract + store, it's probably worth splitting.
5930 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5931 UI->use_begin()->getOpcode() != ISD::STORE)
5934 // All non-chain uses are extract + store.
5941 /// Returns true if it is beneficial to convert a load of a constant
5942 /// to just the constant itself.
5943 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5945 assert(Ty->isIntegerTy());
5947 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5948 if (BitSize == 0 || BitSize > 64)
5953 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5954 // If we are using XMM registers in the ABI and the condition of the select is
5955 // a floating-point compare and we have blendv or conditional move, then it is
5956 // cheaper to select instead of doing a cross-register move and creating a
5957 // load that depends on the compare result.
5958 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5959 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5962 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5963 // TODO: It might be a win to ease or lift this restriction, but the generic
5964 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5965 if (VT.isVector() && Subtarget.hasAVX512())
5971 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5973 // TODO: We handle scalars using custom code, but generic combining could make
5974 // that unnecessary.
5976 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5979 // Find the type this will be legalized too. Otherwise we might prematurely
5980 // convert this to shl+add/sub and then still have to type legalize those ops.
5981 // Another choice would be to defer the decision for illegal types until
5982 // after type legalization. But constant splat vectors of i64 can't make it
5983 // through type legalization on 32-bit targets so we would need to special
5985 while (getTypeAction(Context, VT) != TypeLegal)
5986 VT = getTypeToTransformTo(Context, VT);
5988 // If vector multiply is legal, assume that's faster than shl + add/sub.
5989 // Multiply is a complex op with higher latency and lower throughput in
5990 // most implementations, sub-vXi32 vector multiplies are always fast,
5991 // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
5992 // is always going to be slow.
5993 unsigned EltSizeInBits = VT.getScalarSizeInBits();
5994 if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
5995 (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
5998 // shl+add, shl+sub, shl+add+neg
5999 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
6000 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
6003 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
6004 unsigned Index) const {
6005 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
6008 // Mask vectors support all subregister combinations and operations that
6009 // extract half of vector.
6010 if (ResVT.getVectorElementType() == MVT::i1)
6011 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
6012 (Index == ResVT.getVectorNumElements()));
6014 return (Index % ResVT.getVectorNumElements()) == 0;
6017 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
6018 unsigned Opc = VecOp.getOpcode();
6020 // Assume target opcodes can't be scalarized.
6021 // TODO - do we have any exceptions?
6022 if (Opc >= ISD::BUILTIN_OP_END)
6025 // If the vector op is not supported, try to convert to scalar.
6026 EVT VecVT = VecOp.getValueType();
6027 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
6030 // If the vector op is supported, but the scalar op is not, the transform may
6031 // not be worthwhile.
6032 EVT ScalarVT = VecVT.getScalarType();
6033 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
6036 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
6038 // TODO: Allow vectors?
6041 return VT.isSimple() || !isOperationExpand(Opcode, VT);
6044 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
6045 // Speculate cttz only if we can directly use TZCNT or can promote to i32.
6046 return Subtarget.hasBMI() ||
6047 (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
6050 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
6051 // Speculate ctlz only if we can directly use LZCNT.
6052 return Subtarget.hasLZCNT();
6055 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
6056 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
6057 // expensive than a straight movsd. On the other hand, it's important to
6058 // shrink long double fp constant since fldt is very slow.
6059 return !Subtarget.hasSSE2() || VT == MVT::f80;
6062 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
6063 return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
6064 (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
6067 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
6068 const SelectionDAG &DAG,
6069 const MachineMemOperand &MMO) const {
6070 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
6071 BitcastVT.getVectorElementType() == MVT::i1)
6074 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
6077 // If both types are legal vectors, it's always ok to convert them.
6078 if (LoadVT.isVector() && BitcastVT.isVector() &&
6079 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
6082 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
6085 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
6086 const MachineFunction &MF) const {
6087 // Do not merge to float value size (128 bytes) if no implicit
6088 // float attribute is set.
6089 bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
6092 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
6093 return (MemVT.getSizeInBits() <= MaxIntSize);
6095 // Make sure we don't merge greater than our preferred vector
6097 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
6103 bool X86TargetLowering::isCtlzFast() const {
6104 return Subtarget.hasFastLZCNT();
6107 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
6108 const Instruction &AndI) const {
6112 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
6113 EVT VT = Y.getValueType();
6118 if (!Subtarget.hasBMI())
6121 // There are only 32-bit and 64-bit forms for 'andn'.
6122 if (VT != MVT::i32 && VT != MVT::i64)
6125 return !isa<ConstantSDNode>(Y);
6128 bool X86TargetLowering::hasAndNot(SDValue Y) const {
6129 EVT VT = Y.getValueType();
6132 return hasAndNotCompare(Y);
6136 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
6139 if (VT == MVT::v4i32)
6142 return Subtarget.hasSSE2();
6145 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
6146 return X.getValueType().isScalarInteger(); // 'bt'
6149 bool X86TargetLowering::
6150 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6151 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
6152 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
6153 SelectionDAG &DAG) const {
6154 // Does baseline recommend not to perform the fold by default?
6155 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6156 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
6158 // For scalars this transform is always beneficial.
6159 if (X.getValueType().isScalarInteger())
6161 // If all the shift amounts are identical, then transform is beneficial even
6162 // with rudimentary SSE2 shifts.
6163 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
6165 // If we have AVX2 with it's powerful shift operations, then it's also good.
6166 if (Subtarget.hasAVX2())
6168 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
6169 return NewShiftOpcode == ISD::SHL;
6172 bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {
6173 return N->getOpcode() != ISD::FP_EXTEND;
6176 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
6177 const SDNode *N, CombineLevel Level) const {
6178 assert(((N->getOpcode() == ISD::SHL &&
6179 N->getOperand(0).getOpcode() == ISD::SRL) ||
6180 (N->getOpcode() == ISD::SRL &&
6181 N->getOperand(0).getOpcode() == ISD::SHL)) &&
6182 "Expected shift-shift mask");
6183 // TODO: Should we always create i64 masks? Or only folded immediates?
6184 EVT VT = N->getValueType(0);
6185 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
6186 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
6187 // Only fold if the shift values are equal - so it folds to AND.
6188 // TODO - we should fold if either is a non-uniform vector but we don't do
6189 // the fold for non-splats yet.
6190 return N->getOperand(1) == N->getOperand(0).getOperand(1);
6192 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
6195 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
6196 EVT VT = Y.getValueType();
6198 // For vectors, we don't have a preference, but we probably want a mask.
6202 // 64-bit shifts on 32-bit targets produce really bad bloated code.
6203 if (VT == MVT::i64 && !Subtarget.is64Bit())
6209 TargetLowering::ShiftLegalizationStrategy
6210 X86TargetLowering::preferredShiftLegalizationStrategy(
6211 SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
6212 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
6213 !Subtarget.isOSWindows())
6214 return ShiftLegalizationStrategy::LowerToLibcall;
6215 return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
6219 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
6220 // Any legal vector type can be splatted more efficiently than
6221 // loading/spilling from memory.
6222 return isTypeLegal(VT);
6225 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
6226 MVT VT = MVT::getIntegerVT(NumBits);
6227 if (isTypeLegal(VT))
6230 // PMOVMSKB can handle this.
6231 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
6234 // VPMOVMSKB can handle this.
6235 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
6238 // TODO: Allow 64-bit type for 32-bit target.
6239 // TODO: 512-bit types should be allowed, but make sure that those
6240 // cases are handled in combineVectorSizedSetCCEquality().
6242 return MVT::INVALID_SIMPLE_VALUE_TYPE;
6245 /// Val is the undef sentinel value or equal to the specified value.
6246 static bool isUndefOrEqual(int Val, int CmpVal) {
6247 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
6250 /// Return true if every element in Mask is the undef sentinel value or equal to
6251 /// the specified value.
6252 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
6253 return llvm::all_of(Mask, [CmpVal](int M) {
6254 return (M == SM_SentinelUndef) || (M == CmpVal);
6258 /// Return true if every element in Mask, beginning from position Pos and ending
6259 /// in Pos+Size is the undef sentinel value or equal to the specified value.
6260 static bool isUndefOrEqualInRange(ArrayRef<int> Mask, int CmpVal, unsigned Pos,
6262 return llvm::all_of(Mask.slice(Pos, Size),
6263 [CmpVal](int M) { return isUndefOrEqual(M, CmpVal); });
6266 /// Val is either the undef or zero sentinel value.
6267 static bool isUndefOrZero(int Val) {
6268 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
6271 /// Return true if every element in Mask, beginning from position Pos and ending
6272 /// in Pos+Size is the undef sentinel value.
6273 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
6274 return llvm::all_of(Mask.slice(Pos, Size),
6275 [](int M) { return M == SM_SentinelUndef; });
6278 /// Return true if the mask creates a vector whose lower half is undefined.
6279 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
6280 unsigned NumElts = Mask.size();
6281 return isUndefInRange(Mask, 0, NumElts / 2);
6284 /// Return true if the mask creates a vector whose upper half is undefined.
6285 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
6286 unsigned NumElts = Mask.size();
6287 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
6290 /// Return true if Val falls within the specified range (L, H].
6291 static bool isInRange(int Val, int Low, int Hi) {
6292 return (Val >= Low && Val < Hi);
6295 /// Return true if the value of any element in Mask falls within the specified
6297 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
6298 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
6301 /// Return true if the value of any element in Mask is the zero sentinel value.
6302 static bool isAnyZero(ArrayRef<int> Mask) {
6303 return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
6306 /// Return true if the value of any element in Mask is the zero or undef
6307 /// sentinel values.
6308 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
6309 return llvm::any_of(Mask, [](int M) {
6310 return M == SM_SentinelZero || M == SM_SentinelUndef;
6314 /// Return true if Val is undef or if its value falls within the
6315 /// specified range (L, H].
6316 static bool isUndefOrInRange(int Val, int Low, int Hi) {
6317 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
6320 /// Return true if every element in Mask is undef or if its value
6321 /// falls within the specified range (L, H].
6322 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6323 return llvm::all_of(
6324 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
6327 /// Return true if Val is undef, zero or if its value falls within the
6328 /// specified range (L, H].
6329 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
6330 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
6333 /// Return true if every element in Mask is undef, zero or if its value
6334 /// falls within the specified range (L, H].
6335 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6336 return llvm::all_of(
6337 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
6340 /// Return true if every element in Mask, beginning
6341 /// from position Pos and ending in Pos + Size, falls within the specified
6342 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
6343 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
6344 unsigned Size, int Low, int Step = 1) {
6345 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6346 if (!isUndefOrEqual(Mask[i], Low))
6351 /// Return true if every element in Mask, beginning
6352 /// from position Pos and ending in Pos+Size, falls within the specified
6353 /// sequential range (Low, Low+Size], or is undef or is zero.
6354 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6355 unsigned Size, int Low,
6357 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6358 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
6363 /// Return true if every element in Mask, beginning
6364 /// from position Pos and ending in Pos+Size is undef or is zero.
6365 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6367 return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
6370 /// Helper function to test whether a shuffle mask could be
6371 /// simplified by widening the elements being shuffled.
6373 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
6374 /// leaves it in an unspecified state.
6376 /// NOTE: This must handle normal vector shuffle masks and *target* vector
6377 /// shuffle masks. The latter have the special property of a '-2' representing
6378 /// a zero-ed lane of a vector.
6379 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6380 SmallVectorImpl<int> &WidenedMask) {
6381 WidenedMask.assign(Mask.size() / 2, 0);
6382 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
6384 int M1 = Mask[i + 1];
6386 // If both elements are undef, its trivial.
6387 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
6388 WidenedMask[i / 2] = SM_SentinelUndef;
6392 // Check for an undef mask and a mask value properly aligned to fit with
6393 // a pair of values. If we find such a case, use the non-undef mask's value.
6394 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
6395 WidenedMask[i / 2] = M1 / 2;
6398 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
6399 WidenedMask[i / 2] = M0 / 2;
6403 // When zeroing, we need to spread the zeroing across both lanes to widen.
6404 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
6405 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
6406 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
6407 WidenedMask[i / 2] = SM_SentinelZero;
6413 // Finally check if the two mask values are adjacent and aligned with
6415 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
6416 WidenedMask[i / 2] = M0 / 2;
6420 // Otherwise we can't safely widen the elements used in this shuffle.
6423 assert(WidenedMask.size() == Mask.size() / 2 &&
6424 "Incorrect size of mask after widening the elements!");
6429 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6430 const APInt &Zeroable,
6432 SmallVectorImpl<int> &WidenedMask) {
6433 // Create an alternative mask with info about zeroable elements.
6434 // Here we do not set undef elements as zeroable.
6435 SmallVector<int, 64> ZeroableMask(Mask);
6437 assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
6438 for (int i = 0, Size = Mask.size(); i != Size; ++i)
6439 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
6440 ZeroableMask[i] = SM_SentinelZero;
6442 return canWidenShuffleElements(ZeroableMask, WidenedMask);
6445 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
6446 SmallVector<int, 32> WidenedMask;
6447 return canWidenShuffleElements(Mask, WidenedMask);
6450 // Attempt to narrow/widen shuffle mask until it matches the target number of
6452 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
6453 SmallVectorImpl<int> &ScaledMask) {
6454 unsigned NumSrcElts = Mask.size();
6455 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
6456 "Illegal shuffle scale factor");
6458 // Narrowing is guaranteed to work.
6459 if (NumDstElts >= NumSrcElts) {
6460 int Scale = NumDstElts / NumSrcElts;
6461 llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
6465 // We have to repeat the widening until we reach the target size, but we can
6466 // split out the first widening as it sets up ScaledMask for us.
6467 if (canWidenShuffleElements(Mask, ScaledMask)) {
6468 while (ScaledMask.size() > NumDstElts) {
6469 SmallVector<int, 16> WidenedMask;
6470 if (!canWidenShuffleElements(ScaledMask, WidenedMask))
6472 ScaledMask = std::move(WidenedMask);
6480 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
6481 bool X86::isZeroNode(SDValue Elt) {
6482 return isNullConstant(Elt) || isNullFPConstant(Elt);
6485 // Build a vector of constants.
6486 // Use an UNDEF node if MaskElt == -1.
6487 // Split 64-bit constants in the 32-bit mode.
6488 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
6489 const SDLoc &dl, bool IsMask = false) {
6491 SmallVector<SDValue, 32> Ops;
6494 MVT ConstVecVT = VT;
6495 unsigned NumElts = VT.getVectorNumElements();
6496 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6497 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6498 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6502 MVT EltVT = ConstVecVT.getVectorElementType();
6503 for (unsigned i = 0; i < NumElts; ++i) {
6504 bool IsUndef = Values[i] < 0 && IsMask;
6505 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
6506 DAG.getConstant(Values[i], dl, EltVT);
6507 Ops.push_back(OpNode);
6509 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
6510 DAG.getConstant(0, dl, EltVT));
6512 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6514 ConstsNode = DAG.getBitcast(VT, ConstsNode);
6518 static SDValue getConstVector(ArrayRef<APInt> Bits, const APInt &Undefs,
6519 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6520 assert(Bits.size() == Undefs.getBitWidth() &&
6521 "Unequal constant and undef arrays");
6522 SmallVector<SDValue, 32> Ops;
6525 MVT ConstVecVT = VT;
6526 unsigned NumElts = VT.getVectorNumElements();
6527 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6528 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6529 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6533 MVT EltVT = ConstVecVT.getVectorElementType();
6534 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
6536 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
6539 const APInt &V = Bits[i];
6540 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
6542 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
6543 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
6544 } else if (EltVT == MVT::f32) {
6545 APFloat FV(APFloat::IEEEsingle(), V);
6546 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6547 } else if (EltVT == MVT::f64) {
6548 APFloat FV(APFloat::IEEEdouble(), V);
6549 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6551 Ops.push_back(DAG.getConstant(V, dl, EltVT));
6555 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6556 return DAG.getBitcast(VT, ConstsNode);
6559 static SDValue getConstVector(ArrayRef<APInt> Bits, MVT VT,
6560 SelectionDAG &DAG, const SDLoc &dl) {
6561 APInt Undefs = APInt::getZero(Bits.size());
6562 return getConstVector(Bits, Undefs, VT, DAG, dl);
6565 /// Returns a vector of specified type with all zero elements.
6566 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
6567 SelectionDAG &DAG, const SDLoc &dl) {
6568 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
6569 VT.getVectorElementType() == MVT::i1) &&
6570 "Unexpected vector type");
6572 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
6573 // type. This ensures they get CSE'd. But if the integer type is not
6574 // available, use a floating-point +0.0 instead.
6576 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
6577 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
6578 } else if (VT.isFloatingPoint()) {
6579 Vec = DAG.getConstantFP(+0.0, dl, VT);
6580 } else if (VT.getVectorElementType() == MVT::i1) {
6581 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
6582 "Unexpected vector type");
6583 Vec = DAG.getConstant(0, dl, VT);
6585 unsigned Num32BitElts = VT.getSizeInBits() / 32;
6586 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
6588 return DAG.getBitcast(VT, Vec);
6591 // Helper to determine if the ops are all the extracted subvectors come from a
6592 // single source. If we allow commute they don't have to be in order (Lo/Hi).
6593 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
6594 if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6595 RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6596 LHS.getValueType() != RHS.getValueType() ||
6597 LHS.getOperand(0) != RHS.getOperand(0))
6600 SDValue Src = LHS.getOperand(0);
6601 if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
6604 unsigned NumElts = LHS.getValueType().getVectorNumElements();
6605 if ((LHS.getConstantOperandAPInt(1) == 0 &&
6606 RHS.getConstantOperandAPInt(1) == NumElts) ||
6607 (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
6608 LHS.getConstantOperandAPInt(1) == NumElts))
6614 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
6615 const SDLoc &dl, unsigned vectorWidth) {
6616 EVT VT = Vec.getValueType();
6617 EVT ElVT = VT.getVectorElementType();
6618 unsigned Factor = VT.getSizeInBits() / vectorWidth;
6619 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
6620 VT.getVectorNumElements() / Factor);
6622 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
6623 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
6624 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6626 // This is the index of the first element of the vectorWidth-bit chunk
6627 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6628 IdxVal &= ~(ElemsPerChunk - 1);
6630 // If the input is a buildvector just emit a smaller one.
6631 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
6632 return DAG.getBuildVector(ResultVT, dl,
6633 Vec->ops().slice(IdxVal, ElemsPerChunk));
6635 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6636 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
6639 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
6640 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
6641 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
6642 /// instructions or a simple subregister reference. Idx is an index in the
6643 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
6644 /// lowering EXTRACT_VECTOR_ELT operations easier.
6645 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
6646 SelectionDAG &DAG, const SDLoc &dl) {
6647 assert((Vec.getValueType().is256BitVector() ||
6648 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
6649 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
6652 /// Generate a DAG to grab 256-bits from a 512-bit vector.
6653 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
6654 SelectionDAG &DAG, const SDLoc &dl) {
6655 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
6656 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
6659 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6660 SelectionDAG &DAG, const SDLoc &dl,
6661 unsigned vectorWidth) {
6662 assert((vectorWidth == 128 || vectorWidth == 256) &&
6663 "Unsupported vector width");
6664 // Inserting UNDEF is Result
6667 EVT VT = Vec.getValueType();
6668 EVT ElVT = VT.getVectorElementType();
6669 EVT ResultVT = Result.getValueType();
6671 // Insert the relevant vectorWidth bits.
6672 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
6673 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6675 // This is the index of the first element of the vectorWidth-bit chunk
6676 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6677 IdxVal &= ~(ElemsPerChunk - 1);
6679 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6680 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
6683 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
6684 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
6685 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
6686 /// simple superregister reference. Idx is an index in the 128 bits
6687 /// we want. It need not be aligned to a 128-bit boundary. That makes
6688 /// lowering INSERT_VECTOR_ELT operations easier.
6689 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6690 SelectionDAG &DAG, const SDLoc &dl) {
6691 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
6692 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
6695 /// Widen a vector to a larger size with the same scalar type, with the new
6696 /// elements either zero or undef.
6697 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
6698 const X86Subtarget &Subtarget, SelectionDAG &DAG,
6700 assert(Vec.getValueSizeInBits().getFixedValue() < VT.getFixedSizeInBits() &&
6701 Vec.getValueType().getScalarType() == VT.getScalarType() &&
6702 "Unsupported vector widening type");
6703 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
6705 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
6706 DAG.getIntPtrConstant(0, dl));
6709 /// Widen a vector to a larger size with the same scalar type, with the new
6710 /// elements either zero or undef.
6711 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
6712 const X86Subtarget &Subtarget, SelectionDAG &DAG,
6713 const SDLoc &dl, unsigned WideSizeInBits) {
6714 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
6715 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
6716 "Unsupported vector widening type");
6717 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
6718 MVT SVT = Vec.getSimpleValueType().getScalarType();
6719 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
6720 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
6723 // Helper function to collect subvector ops that are concatenated together,
6724 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
6725 // The subvectors in Ops are guaranteed to be the same type.
6726 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
6727 SelectionDAG &DAG) {
6728 assert(Ops.empty() && "Expected an empty ops vector");
6730 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
6731 Ops.append(N->op_begin(), N->op_end());
6735 if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
6736 SDValue Src = N->getOperand(0);
6737 SDValue Sub = N->getOperand(1);
6738 const APInt &Idx = N->getConstantOperandAPInt(2);
6739 EVT VT = Src.getValueType();
6740 EVT SubVT = Sub.getValueType();
6742 // TODO - Handle more general insert_subvector chains.
6743 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
6744 // insert_subvector(undef, x, lo)
6745 if (Idx == 0 && Src.isUndef()) {
6747 Ops.push_back(DAG.getUNDEF(SubVT));
6750 if (Idx == (VT.getVectorNumElements() / 2)) {
6751 // insert_subvector(insert_subvector(undef, x, lo), y, hi)
6752 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
6753 Src.getOperand(1).getValueType() == SubVT &&
6754 isNullConstant(Src.getOperand(2))) {
6755 Ops.push_back(Src.getOperand(1));
6759 // insert_subvector(x, extract_subvector(x, lo), hi)
6760 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6761 Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
6765 // insert_subvector(undef, x, hi)
6766 if (Src.isUndef()) {
6767 Ops.push_back(DAG.getUNDEF(SubVT));
6778 // Helper to check if \p V can be split into subvectors and the upper subvectors
6779 // are all undef. In which case return the lower subvectors.
6780 static bool isUpperSubvectorUndef(SDValue V, SmallVectorImpl<SDValue> &LowerOps,
6781 SelectionDAG &DAG) {
6782 SmallVector<SDValue> SubOps;
6783 if (!collectConcatOps(V.getNode(), SubOps, DAG))
6786 unsigned NumSubOps = SubOps.size();
6787 assert((NumSubOps % 2) == 0 && "Unexpected number of subvectors");
6789 ArrayRef<SDValue> UpperOps(SubOps.begin() + (NumSubOps / 2), SubOps.end());
6790 if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndef(); }))
6793 LowerOps.assign(SubOps.begin(), SubOps.begin() + (NumSubOps / 2));
6797 // Helper to check if we can access all the constituent subvectors without any
6799 static bool isFreeToSplitVector(SDNode *N, SelectionDAG &DAG) {
6800 SmallVector<SDValue> Ops;
6801 return collectConcatOps(N, Ops, DAG);
6804 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
6806 EVT VT = Op.getValueType();
6807 unsigned NumElems = VT.getVectorNumElements();
6808 unsigned SizeInBits = VT.getSizeInBits();
6809 assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
6810 "Can't split odd sized vector");
6812 // If this is a splat value (with no-undefs) then use the lower subvector,
6813 // which should be a free extraction.
6814 SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
6815 if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
6816 return std::make_pair(Lo, Lo);
6818 SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
6819 return std::make_pair(Lo, Hi);
6822 /// Break an operation into 2 half sized ops and then concatenate the results.
6823 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
6824 unsigned NumOps = Op.getNumOperands();
6825 EVT VT = Op.getValueType();
6828 // Extract the LHS Lo/Hi vectors
6829 SmallVector<SDValue> LoOps(NumOps, SDValue());
6830 SmallVector<SDValue> HiOps(NumOps, SDValue());
6831 for (unsigned I = 0; I != NumOps; ++I) {
6832 SDValue SrcOp = Op.getOperand(I);
6833 if (!SrcOp.getValueType().isVector()) {
6834 LoOps[I] = HiOps[I] = SrcOp;
6837 std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
6841 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
6842 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
6843 DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
6844 DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
6847 /// Break an unary integer operation into 2 half sized ops and then
6848 /// concatenate the result back.
6849 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
6850 // Make sure we only try to split 256/512-bit types to avoid creating
6852 EVT VT = Op.getValueType();
6854 assert((Op.getOperand(0).getValueType().is256BitVector() ||
6855 Op.getOperand(0).getValueType().is512BitVector()) &&
6856 (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6857 assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
6858 VT.getVectorNumElements() &&
6860 return splitVectorOp(Op, DAG);
6863 /// Break a binary integer operation into 2 half sized ops and then
6864 /// concatenate the result back.
6865 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
6866 // Assert that all the types match.
6867 EVT VT = Op.getValueType();
6869 assert(Op.getOperand(0).getValueType() == VT &&
6870 Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
6871 assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6872 return splitVectorOp(Op, DAG);
6875 // Helper for splitting operands of an operation to legal target size and
6876 // apply a function on each part.
6877 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
6878 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
6879 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
6880 // The argument Builder is a function that will be applied on each split part:
6881 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
6882 template <typename F>
6883 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
6884 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
6885 F Builder, bool CheckBWI = true) {
6886 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
6887 unsigned NumSubs = 1;
6888 if ((CheckBWI && Subtarget.useBWIRegs()) ||
6889 (!CheckBWI && Subtarget.useAVX512Regs())) {
6890 if (VT.getSizeInBits() > 512) {
6891 NumSubs = VT.getSizeInBits() / 512;
6892 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
6894 } else if (Subtarget.hasAVX2()) {
6895 if (VT.getSizeInBits() > 256) {
6896 NumSubs = VT.getSizeInBits() / 256;
6897 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
6900 if (VT.getSizeInBits() > 128) {
6901 NumSubs = VT.getSizeInBits() / 128;
6902 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
6907 return Builder(DAG, DL, Ops);
6909 SmallVector<SDValue, 4> Subs;
6910 for (unsigned i = 0; i != NumSubs; ++i) {
6911 SmallVector<SDValue, 2> SubOps;
6912 for (SDValue Op : Ops) {
6913 EVT OpVT = Op.getValueType();
6914 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
6915 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
6916 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
6918 Subs.push_back(Builder(DAG, DL, SubOps));
6920 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
6923 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
6925 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
6926 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
6927 const X86Subtarget &Subtarget) {
6928 assert(Subtarget.hasAVX512() && "AVX512 target expected");
6929 MVT SVT = VT.getScalarType();
6931 // If we have a 32/64 splatted constant, splat it to DstTy to
6932 // encourage a foldable broadcast'd operand.
6933 auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
6934 unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
6935 // AVX512 broadcasts 32/64-bit operands.
6936 // TODO: Support float once getAVX512Node is used by fp-ops.
6937 if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
6938 !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
6940 // If we're not widening, don't bother if we're not bitcasting.
6941 if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
6943 if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
6944 APInt SplatValue, SplatUndef;
6945 unsigned SplatBitSize;
6947 if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
6948 HasAnyUndefs, OpEltSizeInBits) &&
6949 !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
6950 return DAG.getConstant(SplatValue, DL, DstVT);
6955 bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
6959 DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
6961 // Canonicalize src operands.
6962 SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
6963 for (SDValue &Op : SrcOps) {
6964 MVT OpVT = Op.getSimpleValueType();
6965 // Just pass through scalar operands.
6966 if (!OpVT.isVector())
6968 assert(OpVT == VT && "Vector type mismatch");
6970 if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
6975 // Just widen the subvector by inserting into an undef wide vector.
6977 Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
6980 SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
6982 // Perform the 512-bit op then extract the bottom subvector.
6984 Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
6988 /// Insert i1-subvector to i1-vector.
6989 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
6990 const X86Subtarget &Subtarget) {
6993 SDValue Vec = Op.getOperand(0);
6994 SDValue SubVec = Op.getOperand(1);
6995 SDValue Idx = Op.getOperand(2);
6996 unsigned IdxVal = Op.getConstantOperandVal(2);
6998 // Inserting undef is a nop. We can just return the original vector.
6999 if (SubVec.isUndef())
7002 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
7005 MVT OpVT = Op.getSimpleValueType();
7006 unsigned NumElems = OpVT.getVectorNumElements();
7007 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
7009 // Extend to natively supported kshift.
7010 MVT WideOpVT = OpVT;
7011 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
7012 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
7014 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
7016 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
7017 // May need to promote to a legal type.
7018 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
7019 DAG.getConstant(0, dl, WideOpVT),
7021 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
7024 MVT SubVecVT = SubVec.getSimpleValueType();
7025 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
7026 assert(IdxVal + SubVecNumElems <= NumElems &&
7027 IdxVal % SubVecVT.getSizeInBits() == 0 &&
7028 "Unexpected index value in INSERT_SUBVECTOR");
7030 SDValue Undef = DAG.getUNDEF(WideOpVT);
7033 // Zero lower bits of the Vec
7034 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
7035 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
7037 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
7038 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
7039 // Merge them together, SubVec should be zero extended.
7040 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
7041 DAG.getConstant(0, dl, WideOpVT),
7043 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
7044 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
7047 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
7048 Undef, SubVec, ZeroIdx);
7050 if (Vec.isUndef()) {
7051 assert(IdxVal != 0 && "Unexpected index");
7052 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
7053 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
7054 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
7057 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
7058 assert(IdxVal != 0 && "Unexpected index");
7059 // If upper elements of Vec are known undef, then just shift into place.
7060 if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
7061 [](SDValue V) { return V.isUndef(); })) {
7062 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
7063 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
7065 NumElems = WideOpVT.getVectorNumElements();
7066 unsigned ShiftLeft = NumElems - SubVecNumElems;
7067 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
7068 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
7069 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
7070 if (ShiftRight != 0)
7071 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
7072 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
7074 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
7077 // Simple case when we put subvector in the upper part
7078 if (IdxVal + SubVecNumElems == NumElems) {
7079 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
7080 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
7081 if (SubVecNumElems * 2 == NumElems) {
7082 // Special case, use legal zero extending insert_subvector. This allows
7083 // isel to optimize when bits are known zero.
7084 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
7085 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
7086 DAG.getConstant(0, dl, WideOpVT),
7089 // Otherwise use explicit shifts to zero the bits.
7090 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
7091 Undef, Vec, ZeroIdx);
7092 NumElems = WideOpVT.getVectorNumElements();
7093 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
7094 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
7095 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
7097 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
7098 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
7101 // Inserting into the middle is more complicated.
7103 NumElems = WideOpVT.getVectorNumElements();
7105 // Widen the vector if needed.
7106 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
7108 unsigned ShiftLeft = NumElems - SubVecNumElems;
7109 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
7111 // Do an optimization for the the most frequently used types.
7112 if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
7113 APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
7114 Mask0.flipAllBits();
7115 SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
7116 SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
7117 Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
7118 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
7119 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
7120 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
7121 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
7122 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
7124 // Reduce to original width if needed.
7125 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
7128 // Clear the upper bits of the subvector and move it to its insert position.
7129 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
7130 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
7131 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
7132 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
7134 // Isolate the bits below the insertion point.
7135 unsigned LowShift = NumElems - IdxVal;
7136 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
7137 DAG.getTargetConstant(LowShift, dl, MVT::i8));
7138 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
7139 DAG.getTargetConstant(LowShift, dl, MVT::i8));
7141 // Isolate the bits after the last inserted bit.
7142 unsigned HighShift = IdxVal + SubVecNumElems;
7143 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
7144 DAG.getTargetConstant(HighShift, dl, MVT::i8));
7145 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
7146 DAG.getTargetConstant(HighShift, dl, MVT::i8));
7148 // Now OR all 3 pieces together.
7149 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
7150 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
7152 // Reduce to original width if needed.
7153 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
7156 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
7158 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
7159 EVT SubVT = V1.getValueType();
7160 EVT SubSVT = SubVT.getScalarType();
7161 unsigned SubNumElts = SubVT.getVectorNumElements();
7162 unsigned SubVectorWidth = SubVT.getSizeInBits();
7163 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
7164 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
7165 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
7168 /// Returns a vector of specified type with all bits set.
7169 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
7170 /// Then bitcast to their original type, ensuring they get CSE'd.
7171 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
7172 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7173 "Expected a 128/256/512-bit vector type");
7175 APInt Ones = APInt::getAllOnes(32);
7176 unsigned NumElts = VT.getSizeInBits() / 32;
7177 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
7178 return DAG.getBitcast(VT, Vec);
7181 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
7182 SDValue In, SelectionDAG &DAG) {
7183 EVT InVT = In.getValueType();
7184 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
7185 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
7186 ISD::ZERO_EXTEND == Opcode) &&
7187 "Unknown extension opcode");
7189 // For 256-bit vectors, we only need the lower (128-bit) input half.
7190 // For 512-bit vectors, we only need the lower input half or quarter.
7191 if (InVT.getSizeInBits() > 128) {
7192 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
7193 "Expected VTs to be the same size!");
7194 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
7195 In = extractSubVector(In, 0, DAG, DL,
7196 std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
7197 InVT = In.getValueType();
7200 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
7201 Opcode = DAG.getOpcode_EXTEND_VECTOR_INREG(Opcode);
7203 return DAG.getNode(Opcode, DL, VT, In);
7206 // Create OR(AND(LHS,MASK),AND(RHS,~MASK)) bit select pattern
7207 static SDValue getBitSelect(const SDLoc &DL, MVT VT, SDValue LHS, SDValue RHS,
7208 SDValue Mask, SelectionDAG &DAG) {
7209 LHS = DAG.getNode(ISD::AND, DL, VT, LHS, Mask);
7210 RHS = DAG.getNode(X86ISD::ANDNP, DL, VT, Mask, RHS);
7211 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
7214 // Match (xor X, -1) -> X.
7215 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
7216 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
7217 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
7218 V = peekThroughBitcasts(V);
7219 if (V.getOpcode() == ISD::XOR &&
7220 (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
7221 isAllOnesConstant(V.getOperand(1))))
7222 return V.getOperand(0);
7223 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7224 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
7225 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
7226 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
7227 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
7228 Not, V.getOperand(1));
7231 SmallVector<SDValue, 2> CatOps;
7232 if (collectConcatOps(V.getNode(), CatOps, DAG)) {
7233 for (SDValue &CatOp : CatOps) {
7234 SDValue NotCat = IsNOT(CatOp, DAG);
7235 if (!NotCat) return SDValue();
7236 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
7238 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
7243 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
7244 bool Lo, bool Unary) {
7245 assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
7246 "Illegal vector type to unpack");
7247 assert(Mask.empty() && "Expected an empty shuffle mask vector");
7248 int NumElts = VT.getVectorNumElements();
7249 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
7250 for (int i = 0; i < NumElts; ++i) {
7251 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
7252 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
7253 Pos += (Unary ? 0 : NumElts * (i % 2));
7254 Pos += (Lo ? 0 : NumEltsInLane / 2);
7255 Mask.push_back(Pos);
7259 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
7260 /// imposed by AVX and specific to the unary pattern. Example:
7261 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
7262 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
7263 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7265 assert(Mask.empty() && "Expected an empty shuffle mask vector");
7266 int NumElts = VT.getVectorNumElements();
7267 for (int i = 0; i < NumElts; ++i) {
7269 Pos += (Lo ? 0 : NumElts / 2);
7270 Mask.push_back(Pos);
7274 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
7275 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
7276 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
7277 if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
7278 (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
7279 SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
7280 for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
7284 SDValue V = (M < NumElts) ? V1 : V2;
7287 Ops[I] = V.getOperand(M % NumElts);
7289 return DAG.getBuildVector(VT, dl, Ops);
7292 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
7295 /// Returns a vector_shuffle node for an unpackl operation.
7296 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7297 SDValue V1, SDValue V2) {
7298 SmallVector<int, 8> Mask;
7299 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
7300 return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7303 /// Returns a vector_shuffle node for an unpackh operation.
7304 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7305 SDValue V1, SDValue V2) {
7306 SmallVector<int, 8> Mask;
7307 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
7308 return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7311 /// Returns a node that packs the LHS + RHS nodes together at half width.
7312 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
7313 /// TODO: Add subvector splitting if/when we have a need for it.
7314 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
7315 const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
7316 bool PackHiHalf = false) {
7317 MVT OpVT = LHS.getSimpleValueType();
7318 unsigned EltSizeInBits = VT.getScalarSizeInBits();
7319 bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
7320 assert(OpVT == RHS.getSimpleValueType() &&
7321 VT.getSizeInBits() == OpVT.getSizeInBits() &&
7322 (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
7323 "Unexpected PACK operand types");
7324 assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
7325 "Unexpected PACK result type");
7327 // Rely on vector shuffles for vXi64 -> vXi32 packing.
7328 if (EltSizeInBits == 32) {
7329 SmallVector<int> PackMask;
7330 int Offset = PackHiHalf ? 1 : 0;
7331 int NumElts = VT.getVectorNumElements();
7332 for (int I = 0; I != NumElts; I += 4) {
7333 PackMask.push_back(I + Offset);
7334 PackMask.push_back(I + Offset + 2);
7335 PackMask.push_back(I + Offset + NumElts);
7336 PackMask.push_back(I + Offset + NumElts + 2);
7338 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
7339 DAG.getBitcast(VT, RHS), PackMask);
7342 // See if we already have sufficient leading bits for PACKSS/PACKUS.
7345 DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
7346 DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
7347 return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7349 if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
7350 DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
7351 return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7354 // Fallback to sign/zero extending the requested half and pack.
7355 SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
7358 LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
7359 RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
7361 SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
7362 LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
7363 RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
7365 return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7369 LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
7370 RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
7372 LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
7373 RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
7374 return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7377 /// Return a vector_shuffle of the specified vector of zero or undef vector.
7378 /// This produces a shuffle where the low element of V2 is swizzled into the
7379 /// zero/undef vector, landing at element Idx.
7380 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
7381 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
7383 const X86Subtarget &Subtarget,
7384 SelectionDAG &DAG) {
7385 MVT VT = V2.getSimpleValueType();
7387 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
7388 int NumElems = VT.getVectorNumElements();
7389 SmallVector<int, 16> MaskVec(NumElems);
7390 for (int i = 0; i != NumElems; ++i)
7391 // If this is the insertion idx, put the low elt of V2 here.
7392 MaskVec[i] = (i == Idx) ? NumElems : i;
7393 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
7396 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
7397 if (Ptr.getOpcode() == X86ISD::Wrapper ||
7398 Ptr.getOpcode() == X86ISD::WrapperRIP)
7399 Ptr = Ptr.getOperand(0);
7401 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
7402 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
7405 return CNode->getConstVal();
7408 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
7409 if (!Load || !ISD::isNormalLoad(Load))
7411 return getTargetConstantFromBasePtr(Load->getBasePtr());
7414 static const Constant *getTargetConstantFromNode(SDValue Op) {
7415 Op = peekThroughBitcasts(Op);
7416 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
7420 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
7421 assert(LD && "Unexpected null LoadSDNode");
7422 return getTargetConstantFromNode(LD);
7425 // Extract raw constant bits from constant pools.
7426 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
7428 SmallVectorImpl<APInt> &EltBits,
7429 bool AllowWholeUndefs = true,
7430 bool AllowPartialUndefs = true) {
7431 assert(EltBits.empty() && "Expected an empty EltBits vector");
7433 Op = peekThroughBitcasts(Op);
7435 EVT VT = Op.getValueType();
7436 unsigned SizeInBits = VT.getSizeInBits();
7437 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
7438 unsigned NumElts = SizeInBits / EltSizeInBits;
7440 // Bitcast a source array of element bits to the target size.
7441 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
7442 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
7443 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
7444 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
7445 "Constant bit sizes don't match");
7447 // Don't split if we don't allow undef bits.
7448 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
7449 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
7452 // If we're already the right size, don't bother bitcasting.
7453 if (NumSrcElts == NumElts) {
7454 UndefElts = UndefSrcElts;
7455 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
7459 // Extract all the undef/constant element data and pack into single bitsets.
7460 APInt UndefBits(SizeInBits, 0);
7461 APInt MaskBits(SizeInBits, 0);
7463 for (unsigned i = 0; i != NumSrcElts; ++i) {
7464 unsigned BitOffset = i * SrcEltSizeInBits;
7465 if (UndefSrcElts[i])
7466 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
7467 MaskBits.insertBits(SrcEltBits[i], BitOffset);
7470 // Split the undef/constant single bitset data into the target elements.
7471 UndefElts = APInt(NumElts, 0);
7472 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
7474 for (unsigned i = 0; i != NumElts; ++i) {
7475 unsigned BitOffset = i * EltSizeInBits;
7476 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
7478 // Only treat an element as UNDEF if all bits are UNDEF.
7479 if (UndefEltBits.isAllOnes()) {
7480 if (!AllowWholeUndefs)
7482 UndefElts.setBit(i);
7486 // If only some bits are UNDEF then treat them as zero (or bail if not
7488 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
7491 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
7496 // Collect constant bits and insert into mask/undef bit masks.
7497 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
7498 unsigned UndefBitIndex) {
7501 if (isa<UndefValue>(Cst)) {
7502 Undefs.setBit(UndefBitIndex);
7505 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
7506 Mask = CInt->getValue();
7509 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
7510 Mask = CFP->getValueAPF().bitcastToAPInt();
7513 if (auto *CDS = dyn_cast<ConstantDataSequential>(Cst)) {
7514 Type *Ty = CDS->getType();
7515 Mask = APInt::getZero(Ty->getPrimitiveSizeInBits());
7516 Type *EltTy = CDS->getElementType();
7517 bool IsInteger = EltTy->isIntegerTy();
7519 EltTy->isHalfTy() || EltTy->isFloatTy() || EltTy->isDoubleTy();
7520 if (!IsInteger && !IsFP)
7522 unsigned EltBits = EltTy->getPrimitiveSizeInBits();
7523 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I)
7525 Mask.insertBits(CDS->getElementAsAPInt(I), I * EltBits);
7527 Mask.insertBits(CDS->getElementAsAPFloat(I).bitcastToAPInt(),
7536 APInt UndefSrcElts = APInt::getAllOnes(NumElts);
7537 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
7538 return CastBitData(UndefSrcElts, SrcEltBits);
7541 // Extract scalar constant bits.
7542 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
7543 APInt UndefSrcElts = APInt::getZero(1);
7544 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
7545 return CastBitData(UndefSrcElts, SrcEltBits);
7547 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7548 APInt UndefSrcElts = APInt::getZero(1);
7549 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
7550 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
7551 return CastBitData(UndefSrcElts, SrcEltBits);
7554 // Extract constant bits from build vector.
7555 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
7557 SmallVector<APInt> SrcEltBits;
7558 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7559 if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
7560 APInt UndefSrcElts = APInt::getZero(SrcEltBits.size());
7561 for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
7563 UndefSrcElts.setBit(I);
7564 return CastBitData(UndefSrcElts, SrcEltBits);
7568 // Extract constant bits from constant pool vector.
7569 if (auto *Cst = getTargetConstantFromNode(Op)) {
7570 Type *CstTy = Cst->getType();
7571 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7572 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
7575 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
7576 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7578 APInt UndefSrcElts(NumSrcElts, 0);
7579 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
7580 for (unsigned i = 0; i != NumSrcElts; ++i)
7581 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
7585 return CastBitData(UndefSrcElts, SrcEltBits);
7588 // Extract constant bits from a broadcasted constant pool scalar.
7589 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
7590 EltSizeInBits <= VT.getScalarSizeInBits()) {
7591 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7592 if (MemIntr->getMemoryVT().getStoreSizeInBits() != VT.getScalarSizeInBits())
7595 SDValue Ptr = MemIntr->getBasePtr();
7596 if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
7597 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7598 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7600 APInt UndefSrcElts(NumSrcElts, 0);
7601 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
7602 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
7603 if (UndefSrcElts[0])
7604 UndefSrcElts.setBits(0, NumSrcElts);
7605 if (SrcEltBits[0].getBitWidth() != SrcEltSizeInBits)
7606 SrcEltBits[0] = SrcEltBits[0].trunc(SrcEltSizeInBits);
7607 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
7608 return CastBitData(UndefSrcElts, SrcEltBits);
7613 // Extract constant bits from a subvector broadcast.
7614 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
7615 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7616 SDValue Ptr = MemIntr->getBasePtr();
7617 // The source constant may be larger than the subvector broadcast,
7618 // ensure we extract the correct subvector constants.
7619 if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
7620 Type *CstTy = Cst->getType();
7621 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7622 unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
7623 if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
7624 (SizeInBits % SubVecSizeInBits) != 0)
7626 unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
7627 unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
7628 unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
7629 APInt UndefSubElts(NumSubElts, 0);
7630 SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
7631 APInt(CstEltSizeInBits, 0));
7632 for (unsigned i = 0; i != NumSubElts; ++i) {
7633 if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
7636 for (unsigned j = 1; j != NumSubVecs; ++j)
7637 SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
7639 UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
7641 return CastBitData(UndefSubElts, SubEltBits);
7645 // Extract a rematerialized scalar constant insertion.
7646 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
7647 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
7648 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
7649 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7650 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7652 APInt UndefSrcElts(NumSrcElts, 0);
7653 SmallVector<APInt, 64> SrcEltBits;
7654 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
7655 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
7656 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
7657 return CastBitData(UndefSrcElts, SrcEltBits);
7660 // Insert constant bits from a base and sub vector sources.
7661 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
7662 // If bitcasts to larger elements we might lose track of undefs - don't
7663 // allow any to be safe.
7664 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7665 bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
7667 APInt UndefSrcElts, UndefSubElts;
7668 SmallVector<APInt, 32> EltSrcBits, EltSubBits;
7669 if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
7670 UndefSubElts, EltSubBits,
7671 AllowWholeUndefs && AllowUndefs,
7672 AllowPartialUndefs && AllowUndefs) &&
7673 getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
7674 UndefSrcElts, EltSrcBits,
7675 AllowWholeUndefs && AllowUndefs,
7676 AllowPartialUndefs && AllowUndefs)) {
7677 unsigned BaseIdx = Op.getConstantOperandVal(2);
7678 UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
7679 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
7680 EltSrcBits[BaseIdx + i] = EltSubBits[i];
7681 return CastBitData(UndefSrcElts, EltSrcBits);
7685 // Extract constant bits from a subvector's source.
7686 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
7687 // TODO - support extract_subvector through bitcasts.
7688 if (EltSizeInBits != VT.getScalarSizeInBits())
7691 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7692 UndefElts, EltBits, AllowWholeUndefs,
7693 AllowPartialUndefs)) {
7694 EVT SrcVT = Op.getOperand(0).getValueType();
7695 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7696 unsigned NumSubElts = VT.getVectorNumElements();
7697 unsigned BaseIdx = Op.getConstantOperandVal(1);
7698 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
7699 if ((BaseIdx + NumSubElts) != NumSrcElts)
7700 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
7702 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
7707 // Extract constant bits from shuffle node sources.
7708 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
7709 // TODO - support shuffle through bitcasts.
7710 if (EltSizeInBits != VT.getScalarSizeInBits())
7713 ArrayRef<int> Mask = SVN->getMask();
7714 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
7715 llvm::any_of(Mask, [](int M) { return M < 0; }))
7718 APInt UndefElts0, UndefElts1;
7719 SmallVector<APInt, 32> EltBits0, EltBits1;
7720 if (isAnyInRange(Mask, 0, NumElts) &&
7721 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7722 UndefElts0, EltBits0, AllowWholeUndefs,
7723 AllowPartialUndefs))
7725 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
7726 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
7727 UndefElts1, EltBits1, AllowWholeUndefs,
7728 AllowPartialUndefs))
7731 UndefElts = APInt::getZero(NumElts);
7732 for (int i = 0; i != (int)NumElts; ++i) {
7735 UndefElts.setBit(i);
7736 EltBits.push_back(APInt::getZero(EltSizeInBits));
7737 } else if (M < (int)NumElts) {
7739 UndefElts.setBit(i);
7740 EltBits.push_back(EltBits0[M]);
7742 if (UndefElts1[M - NumElts])
7743 UndefElts.setBit(i);
7744 EltBits.push_back(EltBits1[M - NumElts]);
7755 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
7757 SmallVector<APInt, 16> EltBits;
7758 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
7759 UndefElts, EltBits, true,
7760 AllowPartialUndefs)) {
7761 int SplatIndex = -1;
7762 for (int i = 0, e = EltBits.size(); i != e; ++i) {
7765 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
7771 if (0 <= SplatIndex) {
7772 SplatVal = EltBits[SplatIndex];
7782 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
7783 unsigned MaskEltSizeInBits,
7784 SmallVectorImpl<uint64_t> &RawMask,
7786 // Extract the raw target constant bits.
7787 SmallVector<APInt, 64> EltBits;
7788 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
7789 EltBits, /* AllowWholeUndefs */ true,
7790 /* AllowPartialUndefs */ false))
7793 // Insert the extracted elements into the mask.
7794 for (const APInt &Elt : EltBits)
7795 RawMask.push_back(Elt.getZExtValue());
7800 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
7801 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
7802 /// Note: This ignores saturation, so inputs must be checked first.
7803 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7804 bool Unary, unsigned NumStages = 1) {
7805 assert(Mask.empty() && "Expected an empty shuffle mask vector");
7806 unsigned NumElts = VT.getVectorNumElements();
7807 unsigned NumLanes = VT.getSizeInBits() / 128;
7808 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
7809 unsigned Offset = Unary ? 0 : NumElts;
7810 unsigned Repetitions = 1u << (NumStages - 1);
7811 unsigned Increment = 1u << NumStages;
7812 assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
7814 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
7815 for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
7816 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7817 Mask.push_back(Elt + (Lane * NumEltsPerLane));
7818 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7819 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
7824 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
7825 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
7826 APInt &DemandedLHS, APInt &DemandedRHS) {
7827 int NumLanes = VT.getSizeInBits() / 128;
7828 int NumElts = DemandedElts.getBitWidth();
7829 int NumInnerElts = NumElts / 2;
7830 int NumEltsPerLane = NumElts / NumLanes;
7831 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
7833 DemandedLHS = APInt::getZero(NumInnerElts);
7834 DemandedRHS = APInt::getZero(NumInnerElts);
7836 // Map DemandedElts to the packed operands.
7837 for (int Lane = 0; Lane != NumLanes; ++Lane) {
7838 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
7839 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
7840 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
7841 if (DemandedElts[OuterIdx])
7842 DemandedLHS.setBit(InnerIdx);
7843 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
7844 DemandedRHS.setBit(InnerIdx);
7849 // Split the demanded elts of a HADD/HSUB node between its operands.
7850 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
7851 APInt &DemandedLHS, APInt &DemandedRHS) {
7852 int NumLanes = VT.getSizeInBits() / 128;
7853 int NumElts = DemandedElts.getBitWidth();
7854 int NumEltsPerLane = NumElts / NumLanes;
7855 int HalfEltsPerLane = NumEltsPerLane / 2;
7857 DemandedLHS = APInt::getZero(NumElts);
7858 DemandedRHS = APInt::getZero(NumElts);
7860 // Map DemandedElts to the horizontal operands.
7861 for (int Idx = 0; Idx != NumElts; ++Idx) {
7862 if (!DemandedElts[Idx])
7864 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
7865 int LocalIdx = Idx % NumEltsPerLane;
7866 if (LocalIdx < HalfEltsPerLane) {
7867 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7868 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7870 LocalIdx -= HalfEltsPerLane;
7871 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7872 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7877 /// Calculates the shuffle mask corresponding to the target-specific opcode.
7878 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
7879 /// operands in \p Ops, and returns true.
7880 /// Sets \p IsUnary to true if only one source is used. Note that this will set
7881 /// IsUnary for shuffles which use a single input multiple times, and in those
7882 /// cases it will adjust the mask to only have indices within that single input.
7883 /// It is an error to call this with non-empty Mask/Ops vectors.
7884 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
7885 SmallVectorImpl<SDValue> &Ops,
7886 SmallVectorImpl<int> &Mask, bool &IsUnary) {
7887 unsigned NumElems = VT.getVectorNumElements();
7888 unsigned MaskEltSize = VT.getScalarSizeInBits();
7889 SmallVector<uint64_t, 32> RawMask;
7893 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
7894 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
7897 bool IsFakeUnary = false;
7898 switch (N->getOpcode()) {
7899 case X86ISD::BLENDI:
7900 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7901 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7902 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7903 DecodeBLENDMask(NumElems, ImmN, Mask);
7904 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7907 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7908 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7909 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7910 DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
7911 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7913 case X86ISD::INSERTPS:
7914 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7915 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7916 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7917 DecodeINSERTPSMask(ImmN, Mask);
7918 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7920 case X86ISD::EXTRQI:
7921 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7922 if (isa<ConstantSDNode>(N->getOperand(1)) &&
7923 isa<ConstantSDNode>(N->getOperand(2))) {
7924 int BitLen = N->getConstantOperandVal(1);
7925 int BitIdx = N->getConstantOperandVal(2);
7926 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7930 case X86ISD::INSERTQI:
7931 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7932 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7933 if (isa<ConstantSDNode>(N->getOperand(2)) &&
7934 isa<ConstantSDNode>(N->getOperand(3))) {
7935 int BitLen = N->getConstantOperandVal(2);
7936 int BitIdx = N->getConstantOperandVal(3);
7937 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7938 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7941 case X86ISD::UNPCKH:
7942 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7943 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7944 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
7945 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7947 case X86ISD::UNPCKL:
7948 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7949 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7950 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
7951 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7953 case X86ISD::MOVHLPS:
7954 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7955 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7956 DecodeMOVHLPSMask(NumElems, Mask);
7957 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7959 case X86ISD::MOVLHPS:
7960 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7961 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7962 DecodeMOVLHPSMask(NumElems, Mask);
7963 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7965 case X86ISD::VALIGN:
7966 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
7967 "Only 32-bit and 64-bit elements are supported!");
7968 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7969 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7970 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7971 DecodeVALIGNMask(NumElems, ImmN, Mask);
7972 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7973 Ops.push_back(N->getOperand(1));
7974 Ops.push_back(N->getOperand(0));
7976 case X86ISD::PALIGNR:
7977 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7978 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7979 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7980 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7981 DecodePALIGNRMask(NumElems, ImmN, Mask);
7982 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7983 Ops.push_back(N->getOperand(1));
7984 Ops.push_back(N->getOperand(0));
7986 case X86ISD::VSHLDQ:
7987 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7988 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7989 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7990 DecodePSLLDQMask(NumElems, ImmN, Mask);
7993 case X86ISD::VSRLDQ:
7994 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7995 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7996 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7997 DecodePSRLDQMask(NumElems, ImmN, Mask);
8000 case X86ISD::PSHUFD:
8001 case X86ISD::VPERMILPI:
8002 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8003 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
8004 DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
8007 case X86ISD::PSHUFHW:
8008 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8009 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
8010 DecodePSHUFHWMask(NumElems, ImmN, Mask);
8013 case X86ISD::PSHUFLW:
8014 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8015 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
8016 DecodePSHUFLWMask(NumElems, ImmN, Mask);
8019 case X86ISD::VZEXT_MOVL:
8020 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8021 DecodeZeroMoveLowMask(NumElems, Mask);
8024 case X86ISD::VBROADCAST:
8025 // We only decode broadcasts of same-sized vectors, peeking through to
8026 // extracted subvectors is likely to cause hasOneUse issues with
8027 // SimplifyDemandedBits etc.
8028 if (N->getOperand(0).getValueType() == VT) {
8029 DecodeVectorBroadcast(NumElems, Mask);
8034 case X86ISD::VPERMILPV: {
8035 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8037 SDValue MaskNode = N->getOperand(1);
8038 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
8040 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
8045 case X86ISD::PSHUFB: {
8046 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
8047 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8048 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8050 SDValue MaskNode = N->getOperand(1);
8051 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
8052 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
8057 case X86ISD::VPERMI:
8058 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8059 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
8060 DecodeVPERMMask(NumElems, ImmN, Mask);
8066 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8067 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8068 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
8070 case X86ISD::VPERM2X128:
8071 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8072 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8073 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
8074 DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
8075 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
8077 case X86ISD::SHUF128:
8078 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8079 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8080 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
8081 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
8082 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
8084 case X86ISD::MOVSLDUP:
8085 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8086 DecodeMOVSLDUPMask(NumElems, Mask);
8089 case X86ISD::MOVSHDUP:
8090 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8091 DecodeMOVSHDUPMask(NumElems, Mask);
8094 case X86ISD::MOVDDUP:
8095 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8096 DecodeMOVDDUPMask(NumElems, Mask);
8099 case X86ISD::VPERMIL2: {
8100 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8101 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8102 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
8103 SDValue MaskNode = N->getOperand(2);
8104 SDValue CtrlNode = N->getOperand(3);
8105 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
8106 unsigned CtrlImm = CtrlOp->getZExtValue();
8107 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
8109 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
8116 case X86ISD::VPPERM: {
8117 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8118 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8119 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
8120 SDValue MaskNode = N->getOperand(2);
8121 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
8122 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
8127 case X86ISD::VPERMV: {
8128 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
8130 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
8131 Ops.push_back(N->getOperand(1));
8132 SDValue MaskNode = N->getOperand(0);
8133 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
8135 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
8140 case X86ISD::VPERMV3: {
8141 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
8142 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
8143 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
8144 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
8145 Ops.push_back(N->getOperand(0));
8146 Ops.push_back(N->getOperand(2));
8147 SDValue MaskNode = N->getOperand(1);
8148 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
8150 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
8155 default: llvm_unreachable("unknown target shuffle node");
8158 // Empty mask indicates the decode failed.
8162 // Check if we're getting a shuffle mask with zero'd elements.
8163 if (!AllowSentinelZero && isAnyZero(Mask))
8166 // If we have a fake unary shuffle, the shuffle mask is spread across two
8167 // inputs that are actually the same node. Re-map the mask to always point
8168 // into the first input.
8171 if (M >= (int)Mask.size())
8174 // If we didn't already add operands in the opcode-specific code, default to
8175 // adding 1 or 2 operands starting at 0.
8177 Ops.push_back(N->getOperand(0));
8178 if (!IsUnary || IsFakeUnary)
8179 Ops.push_back(N->getOperand(1));
8185 // Wrapper for getTargetShuffleMask with InUnary;
8186 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
8187 SmallVectorImpl<SDValue> &Ops,
8188 SmallVectorImpl<int> &Mask) {
8190 return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
8193 /// Compute whether each element of a shuffle is zeroable.
8195 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
8196 /// Either it is an undef element in the shuffle mask, the element of the input
8197 /// referenced is undef, or the element of the input referenced is known to be
8198 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
8199 /// as many lanes with this technique as possible to simplify the remaining
8201 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
8202 SDValue V1, SDValue V2,
8203 APInt &KnownUndef, APInt &KnownZero) {
8204 int Size = Mask.size();
8205 KnownUndef = KnownZero = APInt::getZero(Size);
8207 V1 = peekThroughBitcasts(V1);
8208 V2 = peekThroughBitcasts(V2);
8210 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
8211 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
8213 int VectorSizeInBits = V1.getValueSizeInBits();
8214 int ScalarSizeInBits = VectorSizeInBits / Size;
8215 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
8217 for (int i = 0; i < Size; ++i) {
8219 // Handle the easy cases.
8221 KnownUndef.setBit(i);
8224 if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
8225 KnownZero.setBit(i);
8229 // Determine shuffle input and normalize the mask.
8230 SDValue V = M < Size ? V1 : V2;
8233 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
8234 if (V.getOpcode() != ISD::BUILD_VECTOR)
8237 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
8238 // the (larger) source element must be UNDEF/ZERO.
8239 if ((Size % V.getNumOperands()) == 0) {
8240 int Scale = Size / V->getNumOperands();
8241 SDValue Op = V.getOperand(M / Scale);
8243 KnownUndef.setBit(i);
8244 if (X86::isZeroNode(Op))
8245 KnownZero.setBit(i);
8246 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
8247 APInt Val = Cst->getAPIntValue();
8248 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
8250 KnownZero.setBit(i);
8251 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
8252 APInt Val = Cst->getValueAPF().bitcastToAPInt();
8253 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
8255 KnownZero.setBit(i);
8260 // If the BUILD_VECTOR has more elements then all the (smaller) source
8261 // elements must be UNDEF or ZERO.
8262 if ((V.getNumOperands() % Size) == 0) {
8263 int Scale = V->getNumOperands() / Size;
8264 bool AllUndef = true;
8265 bool AllZero = true;
8266 for (int j = 0; j < Scale; ++j) {
8267 SDValue Op = V.getOperand((M * Scale) + j);
8268 AllUndef &= Op.isUndef();
8269 AllZero &= X86::isZeroNode(Op);
8272 KnownUndef.setBit(i);
8274 KnownZero.setBit(i);
8280 /// Decode a target shuffle mask and inputs and see if any values are
8281 /// known to be undef or zero from their inputs.
8282 /// Returns true if the target shuffle mask was decoded.
8283 /// FIXME: Merge this with computeZeroableShuffleElements?
8284 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
8285 SmallVectorImpl<SDValue> &Ops,
8286 APInt &KnownUndef, APInt &KnownZero) {
8288 if (!isTargetShuffle(N.getOpcode()))
8291 MVT VT = N.getSimpleValueType();
8292 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
8295 int Size = Mask.size();
8296 SDValue V1 = Ops[0];
8297 SDValue V2 = IsUnary ? V1 : Ops[1];
8298 KnownUndef = KnownZero = APInt::getZero(Size);
8300 V1 = peekThroughBitcasts(V1);
8301 V2 = peekThroughBitcasts(V2);
8303 assert((VT.getSizeInBits() % Size) == 0 &&
8304 "Illegal split of shuffle value type");
8305 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
8307 // Extract known constant input data.
8308 APInt UndefSrcElts[2];
8309 SmallVector<APInt, 32> SrcEltBits[2];
8310 bool IsSrcConstant[2] = {
8311 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
8312 SrcEltBits[0], true, false),
8313 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
8314 SrcEltBits[1], true, false)};
8316 for (int i = 0; i < Size; ++i) {
8319 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
8321 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
8322 if (SM_SentinelUndef == M)
8323 KnownUndef.setBit(i);
8324 if (SM_SentinelZero == M)
8325 KnownZero.setBit(i);
8329 // Determine shuffle input and normalize the mask.
8330 unsigned SrcIdx = M / Size;
8331 SDValue V = M < Size ? V1 : V2;
8334 // We are referencing an UNDEF input.
8336 KnownUndef.setBit(i);
8340 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
8341 // TODO: We currently only set UNDEF for integer types - floats use the same
8342 // registers as vectors and many of the scalar folded loads rely on the
8343 // SCALAR_TO_VECTOR pattern.
8344 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
8345 (Size % V.getValueType().getVectorNumElements()) == 0) {
8346 int Scale = Size / V.getValueType().getVectorNumElements();
8347 int Idx = M / Scale;
8348 if (Idx != 0 && !VT.isFloatingPoint())
8349 KnownUndef.setBit(i);
8350 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
8351 KnownZero.setBit(i);
8355 // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
8357 if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
8358 SDValue Vec = V.getOperand(0);
8359 int NumVecElts = Vec.getValueType().getVectorNumElements();
8360 if (Vec.isUndef() && Size == NumVecElts) {
8361 int Idx = V.getConstantOperandVal(2);
8362 int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
8363 if (M < Idx || (Idx + NumSubElts) <= M)
8364 KnownUndef.setBit(i);
8369 // Attempt to extract from the source's constant bits.
8370 if (IsSrcConstant[SrcIdx]) {
8371 if (UndefSrcElts[SrcIdx][M])
8372 KnownUndef.setBit(i);
8373 else if (SrcEltBits[SrcIdx][M] == 0)
8374 KnownZero.setBit(i);
8378 assert(VT.getVectorNumElements() == (unsigned)Size &&
8379 "Different mask size from vector size!");
8383 // Replace target shuffle mask elements with known undef/zero sentinels.
8384 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
8385 const APInt &KnownUndef,
8386 const APInt &KnownZero,
8387 bool ResolveKnownZeros= true) {
8388 unsigned NumElts = Mask.size();
8389 assert(KnownUndef.getBitWidth() == NumElts &&
8390 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
8392 for (unsigned i = 0; i != NumElts; ++i) {
8394 Mask[i] = SM_SentinelUndef;
8395 else if (ResolveKnownZeros && KnownZero[i])
8396 Mask[i] = SM_SentinelZero;
8400 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
8401 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
8404 unsigned NumElts = Mask.size();
8405 KnownUndef = KnownZero = APInt::getZero(NumElts);
8407 for (unsigned i = 0; i != NumElts; ++i) {
8409 if (SM_SentinelUndef == M)
8410 KnownUndef.setBit(i);
8411 if (SM_SentinelZero == M)
8412 KnownZero.setBit(i);
8416 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
8417 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
8418 SDValue Cond, bool IsBLENDV = false) {
8419 EVT CondVT = Cond.getValueType();
8420 unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
8421 unsigned NumElts = CondVT.getVectorNumElements();
8424 SmallVector<APInt, 32> EltBits;
8425 if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
8429 Mask.resize(NumElts, SM_SentinelUndef);
8431 for (int i = 0; i != (int)NumElts; ++i) {
8433 // Arbitrarily choose from the 2nd operand if the select condition element
8435 // TODO: Can we do better by matching patterns such as even/odd?
8436 if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
8437 (IsBLENDV && EltBits[i].isNonNegative()))
8444 // Forward declaration (for getFauxShuffleMask recursive check).
8445 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8446 SmallVectorImpl<SDValue> &Inputs,
8447 SmallVectorImpl<int> &Mask,
8448 const SelectionDAG &DAG, unsigned Depth,
8449 bool ResolveKnownElts);
8451 // Attempt to decode ops that could be represented as a shuffle mask.
8452 // The decoded shuffle mask may contain a different number of elements to the
8453 // destination value type.
8454 // TODO: Merge into getTargetShuffleInputs()
8455 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
8456 SmallVectorImpl<int> &Mask,
8457 SmallVectorImpl<SDValue> &Ops,
8458 const SelectionDAG &DAG, unsigned Depth,
8459 bool ResolveKnownElts) {
8463 MVT VT = N.getSimpleValueType();
8464 unsigned NumElts = VT.getVectorNumElements();
8465 unsigned NumSizeInBits = VT.getSizeInBits();
8466 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
8467 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
8469 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
8470 unsigned NumSizeInBytes = NumSizeInBits / 8;
8471 unsigned NumBytesPerElt = NumBitsPerElt / 8;
8473 unsigned Opcode = N.getOpcode();
8475 case ISD::VECTOR_SHUFFLE: {
8476 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
8477 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
8478 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
8479 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
8480 Ops.push_back(N.getOperand(0));
8481 Ops.push_back(N.getOperand(1));
8487 case X86ISD::ANDNP: {
8488 // Attempt to decode as a per-byte mask.
8490 SmallVector<APInt, 32> EltBits;
8491 SDValue N0 = N.getOperand(0);
8492 SDValue N1 = N.getOperand(1);
8493 bool IsAndN = (X86ISD::ANDNP == Opcode);
8494 uint64_t ZeroMask = IsAndN ? 255 : 0;
8495 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
8497 // We can't assume an undef src element gives an undef dst - the other src
8499 if (!UndefElts.isZero())
8501 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
8502 const APInt &ByteBits = EltBits[i];
8503 if (ByteBits != 0 && ByteBits != 255)
8505 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
8507 Ops.push_back(IsAndN ? N1 : N0);
8511 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
8512 // is a valid shuffle index.
8513 SDValue N0 = peekThroughBitcasts(N.getOperand(0));
8514 SDValue N1 = peekThroughBitcasts(N.getOperand(1));
8515 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
8518 SmallVector<int, 64> SrcMask0, SrcMask1;
8519 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
8520 APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
8521 APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
8522 if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
8524 !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
8528 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
8529 SmallVector<int, 64> Mask0, Mask1;
8530 narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
8531 narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
8532 for (int i = 0; i != (int)MaskSize; ++i) {
8533 // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
8534 // loops converting between OR and BLEND shuffles due to
8535 // canWidenShuffleElements merging away undef elements, meaning we
8536 // fail to recognise the OR as the undef element isn't known zero.
8537 if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
8538 Mask.push_back(SM_SentinelZero);
8539 else if (Mask1[i] == SM_SentinelZero)
8541 else if (Mask0[i] == SM_SentinelZero)
8542 Mask.push_back(i + MaskSize);
8550 case ISD::INSERT_SUBVECTOR: {
8551 SDValue Src = N.getOperand(0);
8552 SDValue Sub = N.getOperand(1);
8553 EVT SubVT = Sub.getValueType();
8554 unsigned NumSubElts = SubVT.getVectorNumElements();
8555 if (!N->isOnlyUserOf(Sub.getNode()))
8557 uint64_t InsertIdx = N.getConstantOperandVal(2);
8558 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
8559 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8560 Sub.getOperand(0).getValueType() == VT) {
8561 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
8562 for (int i = 0; i != (int)NumElts; ++i)
8564 for (int i = 0; i != (int)NumSubElts; ++i)
8565 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
8567 Ops.push_back(Sub.getOperand(0));
8570 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
8571 SmallVector<int, 64> SubMask;
8572 SmallVector<SDValue, 2> SubInputs;
8573 SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
8574 EVT SubSrcVT = SubSrc.getValueType();
8575 if (!SubSrcVT.isVector())
8578 APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
8579 if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
8580 Depth + 1, ResolveKnownElts))
8583 // Subvector shuffle inputs must not be larger than the subvector.
8584 if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
8585 return SubVT.getFixedSizeInBits() <
8586 SubInput.getValueSizeInBits().getFixedValue();
8590 if (SubMask.size() != NumSubElts) {
8591 assert(((SubMask.size() % NumSubElts) == 0 ||
8592 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
8593 if ((NumSubElts % SubMask.size()) == 0) {
8594 int Scale = NumSubElts / SubMask.size();
8595 SmallVector<int,64> ScaledSubMask;
8596 narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
8597 SubMask = ScaledSubMask;
8599 int Scale = SubMask.size() / NumSubElts;
8600 NumSubElts = SubMask.size();
8606 Ops.append(SubInputs.begin(), SubInputs.end());
8607 if (ISD::isBuildVectorAllZeros(Src.getNode()))
8608 Mask.append(NumElts, SM_SentinelZero);
8610 for (int i = 0; i != (int)NumElts; ++i)
8612 for (int i = 0; i != (int)NumSubElts; ++i) {
8615 int InputIdx = M / NumSubElts;
8616 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
8618 Mask[i + InsertIdx] = M;
8622 case X86ISD::PINSRB:
8623 case X86ISD::PINSRW:
8624 case ISD::SCALAR_TO_VECTOR:
8625 case ISD::INSERT_VECTOR_ELT: {
8626 // Match against a insert_vector_elt/scalar_to_vector of an extract from a
8627 // vector, for matching src/dst vector types.
8628 SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
8630 unsigned DstIdx = 0;
8631 if (Opcode != ISD::SCALAR_TO_VECTOR) {
8632 // Check we have an in-range constant insertion index.
8633 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
8634 N.getConstantOperandAPInt(2).uge(NumElts))
8636 DstIdx = N.getConstantOperandVal(2);
8638 // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
8639 if (X86::isZeroNode(Scl)) {
8640 Ops.push_back(N.getOperand(0));
8641 for (unsigned i = 0; i != NumElts; ++i)
8642 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
8647 // Peek through trunc/aext/zext.
8648 // TODO: aext shouldn't require SM_SentinelZero padding.
8649 // TODO: handle shift of scalars.
8650 unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
8651 while (Scl.getOpcode() == ISD::TRUNCATE ||
8652 Scl.getOpcode() == ISD::ANY_EXTEND ||
8653 Scl.getOpcode() == ISD::ZERO_EXTEND) {
8654 Scl = Scl.getOperand(0);
8656 std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
8658 if ((MinBitsPerElt % 8) != 0)
8661 // Attempt to find the source vector the scalar was extracted from.
8663 if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
8664 Scl.getOpcode() == X86ISD::PEXTRW ||
8665 Scl.getOpcode() == X86ISD::PEXTRB) &&
8666 Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
8669 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
8672 SDValue SrcVec = SrcExtract.getOperand(0);
8673 EVT SrcVT = SrcVec.getValueType();
8674 if (!SrcVT.getScalarType().isByteSized())
8676 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
8677 unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
8678 unsigned DstByte = DstIdx * NumBytesPerElt;
8680 std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
8682 // Create 'identity' byte level shuffle mask and then add inserted bytes.
8683 if (Opcode == ISD::SCALAR_TO_VECTOR) {
8684 Ops.push_back(SrcVec);
8685 Mask.append(NumSizeInBytes, SM_SentinelUndef);
8687 Ops.push_back(SrcVec);
8688 Ops.push_back(N.getOperand(0));
8689 for (int i = 0; i != (int)NumSizeInBytes; ++i)
8690 Mask.push_back(NumSizeInBytes + i);
8693 unsigned MinBytesPerElts = MinBitsPerElt / 8;
8694 MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
8695 for (unsigned i = 0; i != MinBytesPerElts; ++i)
8696 Mask[DstByte + i] = SrcByte + i;
8697 for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
8698 Mask[DstByte + i] = SM_SentinelZero;
8701 case X86ISD::PACKSS:
8702 case X86ISD::PACKUS: {
8703 SDValue N0 = N.getOperand(0);
8704 SDValue N1 = N.getOperand(1);
8705 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
8706 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
8707 "Unexpected input value type");
8709 APInt EltsLHS, EltsRHS;
8710 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
8712 // If we know input saturation won't happen (or we don't care for particular
8713 // lanes), we can treat this as a truncation shuffle.
8714 bool Offset0 = false, Offset1 = false;
8715 if (Opcode == X86ISD::PACKSS) {
8716 if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8717 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
8718 (!(N1.isUndef() || EltsRHS.isZero()) &&
8719 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
8721 // We can't easily fold ASHR into a shuffle, but if it was feeding a
8722 // PACKSS then it was likely being used for sign-extension for a
8723 // truncation, so just peek through and adjust the mask accordingly.
8724 if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
8725 N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
8727 N0 = N0.getOperand(0);
8729 if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
8730 N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
8732 N1 = N1.getOperand(0);
8735 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
8736 if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8737 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
8738 (!(N1.isUndef() || EltsRHS.isZero()) &&
8739 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
8743 bool IsUnary = (N0 == N1);
8749 createPackShuffleMask(VT, Mask, IsUnary);
8751 if (Offset0 || Offset1) {
8753 if ((Offset0 && isInRange(M, 0, NumElts)) ||
8754 (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
8760 case X86ISD::BLENDV: {
8761 SDValue Cond = N.getOperand(0);
8762 if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
8763 Ops.push_back(N.getOperand(1));
8764 Ops.push_back(N.getOperand(2));
8769 case X86ISD::VTRUNC: {
8770 SDValue Src = N.getOperand(0);
8771 EVT SrcVT = Src.getValueType();
8772 // Truncated source must be a simple vector.
8773 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8774 (SrcVT.getScalarSizeInBits() % 8) != 0)
8776 unsigned NumSrcElts = SrcVT.getVectorNumElements();
8777 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
8778 unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
8779 assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
8780 for (unsigned i = 0; i != NumSrcElts; ++i)
8781 Mask.push_back(i * Scale);
8782 Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
8787 case X86ISD::VSRLI: {
8788 uint64_t ShiftVal = N.getConstantOperandVal(1);
8789 // Out of range bit shifts are guaranteed to be zero.
8790 if (NumBitsPerElt <= ShiftVal) {
8791 Mask.append(NumElts, SM_SentinelZero);
8795 // We can only decode 'whole byte' bit shifts as shuffles.
8796 if ((ShiftVal % 8) != 0)
8799 uint64_t ByteShift = ShiftVal / 8;
8800 Ops.push_back(N.getOperand(0));
8802 // Clear mask to all zeros and insert the shifted byte indices.
8803 Mask.append(NumSizeInBytes, SM_SentinelZero);
8805 if (X86ISD::VSHLI == Opcode) {
8806 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8807 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8808 Mask[i + j] = i + j - ByteShift;
8810 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8811 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8812 Mask[i + j - ByteShift] = i + j;
8816 case X86ISD::VROTLI:
8817 case X86ISD::VROTRI: {
8818 // We can only decode 'whole byte' bit rotates as shuffles.
8819 uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
8820 if ((RotateVal % 8) != 0)
8822 Ops.push_back(N.getOperand(0));
8823 int Offset = RotateVal / 8;
8824 Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
8825 for (int i = 0; i != (int)NumElts; ++i) {
8826 int BaseIdx = i * NumBytesPerElt;
8827 for (int j = 0; j != (int)NumBytesPerElt; ++j) {
8828 Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
8833 case X86ISD::VBROADCAST: {
8834 SDValue Src = N.getOperand(0);
8835 if (!Src.getSimpleValueType().isVector()) {
8836 if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8837 !isNullConstant(Src.getOperand(1)) ||
8838 Src.getOperand(0).getValueType().getScalarType() !=
8841 Src = Src.getOperand(0);
8844 Mask.append(NumElts, 0);
8847 case ISD::SIGN_EXTEND_VECTOR_INREG: {
8848 SDValue Src = N.getOperand(0);
8849 EVT SrcVT = Src.getValueType();
8850 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
8852 // Extended source must be a simple vector.
8853 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8854 (NumBitsPerSrcElt % 8) != 0)
8857 // We can only handle all-signbits extensions.
8858 APInt DemandedSrcElts =
8859 DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
8860 if (DAG.ComputeNumSignBits(Src, DemandedSrcElts) != NumBitsPerSrcElt)
8863 assert((NumBitsPerElt % NumBitsPerSrcElt) == 0 && "Unexpected extension");
8864 unsigned Scale = NumBitsPerElt / NumBitsPerSrcElt;
8865 for (unsigned I = 0; I != NumElts; ++I)
8866 Mask.append(Scale, I);
8870 case ISD::ZERO_EXTEND:
8871 case ISD::ANY_EXTEND:
8872 case ISD::ZERO_EXTEND_VECTOR_INREG:
8873 case ISD::ANY_EXTEND_VECTOR_INREG: {
8874 SDValue Src = N.getOperand(0);
8875 EVT SrcVT = Src.getValueType();
8877 // Extended source must be a simple vector.
8878 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8879 (SrcVT.getScalarSizeInBits() % 8) != 0)
8883 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
8884 DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
8894 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
8895 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
8896 SmallVectorImpl<int> &Mask) {
8897 int MaskWidth = Mask.size();
8898 SmallVector<SDValue, 16> UsedInputs;
8899 for (int i = 0, e = Inputs.size(); i < e; ++i) {
8900 int lo = UsedInputs.size() * MaskWidth;
8901 int hi = lo + MaskWidth;
8903 // Strip UNDEF input usage.
8904 if (Inputs[i].isUndef())
8906 if ((lo <= M) && (M < hi))
8907 M = SM_SentinelUndef;
8909 // Check for unused inputs.
8910 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
8917 // Check for repeated inputs.
8918 bool IsRepeat = false;
8919 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
8920 if (UsedInputs[j] != Inputs[i])
8924 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
8931 UsedInputs.push_back(Inputs[i]);
8933 Inputs = UsedInputs;
8936 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
8937 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
8938 /// Returns true if the target shuffle mask was decoded.
8939 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8940 SmallVectorImpl<SDValue> &Inputs,
8941 SmallVectorImpl<int> &Mask,
8942 APInt &KnownUndef, APInt &KnownZero,
8943 const SelectionDAG &DAG, unsigned Depth,
8944 bool ResolveKnownElts) {
8945 if (Depth >= SelectionDAG::MaxRecursionDepth)
8946 return false; // Limit search depth.
8948 EVT VT = Op.getValueType();
8949 if (!VT.isSimple() || !VT.isVector())
8952 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
8953 if (ResolveKnownElts)
8954 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
8957 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
8958 ResolveKnownElts)) {
8959 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
8965 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8966 SmallVectorImpl<SDValue> &Inputs,
8967 SmallVectorImpl<int> &Mask,
8968 const SelectionDAG &DAG, unsigned Depth,
8969 bool ResolveKnownElts) {
8970 APInt KnownUndef, KnownZero;
8971 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
8972 KnownZero, DAG, Depth, ResolveKnownElts);
8975 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
8976 SmallVectorImpl<int> &Mask,
8977 const SelectionDAG &DAG, unsigned Depth = 0,
8978 bool ResolveKnownElts = true) {
8979 EVT VT = Op.getValueType();
8980 if (!VT.isSimple() || !VT.isVector())
8983 unsigned NumElts = Op.getValueType().getVectorNumElements();
8984 APInt DemandedElts = APInt::getAllOnes(NumElts);
8985 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
8989 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
8990 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
8991 EVT MemVT, MemSDNode *Mem, unsigned Offset,
8992 SelectionDAG &DAG) {
8993 assert((Opcode == X86ISD::VBROADCAST_LOAD ||
8994 Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
8995 "Unknown broadcast load type");
8997 // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
8998 if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
9002 DAG.getMemBasePlusOffset(Mem->getBasePtr(), TypeSize::Fixed(Offset), DL);
9003 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9004 SDValue Ops[] = {Mem->getChain(), Ptr};
9005 SDValue BcstLd = DAG.getMemIntrinsicNode(
9006 Opcode, DL, Tys, Ops, MemVT,
9007 DAG.getMachineFunction().getMachineMemOperand(
9008 Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
9009 DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
9013 /// Returns the scalar element that will make up the i'th
9014 /// element of the result of the vector shuffle.
9015 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
9016 SelectionDAG &DAG, unsigned Depth) {
9017 if (Depth >= SelectionDAG::MaxRecursionDepth)
9018 return SDValue(); // Limit search depth.
9020 EVT VT = Op.getValueType();
9021 unsigned Opcode = Op.getOpcode();
9022 unsigned NumElems = VT.getVectorNumElements();
9024 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
9025 if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
9026 int Elt = SV->getMaskElt(Index);
9029 return DAG.getUNDEF(VT.getVectorElementType());
9031 SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
9032 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
9035 // Recurse into target specific vector shuffles to find scalars.
9036 if (isTargetShuffle(Opcode)) {
9037 MVT ShufVT = VT.getSimpleVT();
9038 MVT ShufSVT = ShufVT.getVectorElementType();
9039 int NumElems = (int)ShufVT.getVectorNumElements();
9040 SmallVector<int, 16> ShuffleMask;
9041 SmallVector<SDValue, 16> ShuffleOps;
9042 if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
9046 int Elt = ShuffleMask[Index];
9047 if (Elt == SM_SentinelZero)
9048 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
9049 : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
9050 if (Elt == SM_SentinelUndef)
9051 return DAG.getUNDEF(ShufSVT);
9053 assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
9054 SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
9055 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
9058 // Recurse into insert_subvector base/sub vector to find scalars.
9059 if (Opcode == ISD::INSERT_SUBVECTOR) {
9060 SDValue Vec = Op.getOperand(0);
9061 SDValue Sub = Op.getOperand(1);
9062 uint64_t SubIdx = Op.getConstantOperandVal(2);
9063 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
9065 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
9066 return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
9067 return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
9070 // Recurse into concat_vectors sub vector to find scalars.
9071 if (Opcode == ISD::CONCAT_VECTORS) {
9072 EVT SubVT = Op.getOperand(0).getValueType();
9073 unsigned NumSubElts = SubVT.getVectorNumElements();
9074 uint64_t SubIdx = Index / NumSubElts;
9075 uint64_t SubElt = Index % NumSubElts;
9076 return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
9079 // Recurse into extract_subvector src vector to find scalars.
9080 if (Opcode == ISD::EXTRACT_SUBVECTOR) {
9081 SDValue Src = Op.getOperand(0);
9082 uint64_t SrcIdx = Op.getConstantOperandVal(1);
9083 return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
9086 // We only peek through bitcasts of the same vector width.
9087 if (Opcode == ISD::BITCAST) {
9088 SDValue Src = Op.getOperand(0);
9089 EVT SrcVT = Src.getValueType();
9090 if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
9091 return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
9095 // Actual nodes that may contain scalar elements
9097 // For insert_vector_elt - either return the index matching scalar or recurse
9098 // into the base vector.
9099 if (Opcode == ISD::INSERT_VECTOR_ELT &&
9100 isa<ConstantSDNode>(Op.getOperand(2))) {
9101 if (Op.getConstantOperandAPInt(2) == Index)
9102 return Op.getOperand(1);
9103 return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
9106 if (Opcode == ISD::SCALAR_TO_VECTOR)
9107 return (Index == 0) ? Op.getOperand(0)
9108 : DAG.getUNDEF(VT.getVectorElementType());
9110 if (Opcode == ISD::BUILD_VECTOR)
9111 return Op.getOperand(Index);
9116 // Use PINSRB/PINSRW/PINSRD to create a build vector.
9117 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
9118 unsigned NumNonZero, unsigned NumZero,
9120 const X86Subtarget &Subtarget) {
9121 MVT VT = Op.getSimpleValueType();
9122 unsigned NumElts = VT.getVectorNumElements();
9123 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
9124 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
9125 "Illegal vector insertion");
9131 for (unsigned i = 0; i < NumElts; ++i) {
9132 bool IsNonZero = NonZeroMask[i];
9136 // If the build vector contains zeros or our first insertion is not the
9137 // first index then insert into zero vector to break any register
9138 // dependency else use SCALAR_TO_VECTOR.
9141 if (NumZero || 0 != i)
9142 V = getZeroVector(VT, Subtarget, DAG, dl);
9144 assert(0 == i && "Expected insertion into zero-index");
9145 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
9146 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
9147 V = DAG.getBitcast(VT, V);
9151 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
9152 DAG.getIntPtrConstant(i, dl));
9158 /// Custom lower build_vector of v16i8.
9159 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
9160 unsigned NumNonZero, unsigned NumZero,
9162 const X86Subtarget &Subtarget) {
9163 if (NumNonZero > 8 && !Subtarget.hasSSE41())
9166 // SSE4.1 - use PINSRB to insert each byte directly.
9167 if (Subtarget.hasSSE41())
9168 return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
9174 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
9175 for (unsigned i = 0; i < 16; i += 2) {
9176 bool ThisIsNonZero = NonZeroMask[i];
9177 bool NextIsNonZero = NonZeroMask[i + 1];
9178 if (!ThisIsNonZero && !NextIsNonZero)
9181 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
9183 if (ThisIsNonZero) {
9184 if (NumZero || NextIsNonZero)
9185 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
9187 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
9190 if (NextIsNonZero) {
9191 SDValue NextElt = Op.getOperand(i + 1);
9192 if (i == 0 && NumZero)
9193 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
9195 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
9196 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
9197 DAG.getConstant(8, dl, MVT::i8));
9199 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
9204 // If our first insertion is not the first index or zeros are needed, then
9205 // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
9206 // elements undefined).
9208 if (i != 0 || NumZero)
9209 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
9211 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
9212 V = DAG.getBitcast(MVT::v8i16, V);
9216 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
9217 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
9218 DAG.getIntPtrConstant(i / 2, dl));
9221 return DAG.getBitcast(MVT::v16i8, V);
9224 /// Custom lower build_vector of v8i16.
9225 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
9226 unsigned NumNonZero, unsigned NumZero,
9228 const X86Subtarget &Subtarget) {
9229 if (NumNonZero > 4 && !Subtarget.hasSSE41())
9232 // Use PINSRW to insert each byte directly.
9233 return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
9237 /// Custom lower build_vector of v4i32 or v4f32.
9238 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
9239 const X86Subtarget &Subtarget) {
9240 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
9241 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
9242 // Because we're creating a less complicated build vector here, we may enable
9243 // further folding of the MOVDDUP via shuffle transforms.
9244 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
9245 Op.getOperand(0) == Op.getOperand(2) &&
9246 Op.getOperand(1) == Op.getOperand(3) &&
9247 Op.getOperand(0) != Op.getOperand(1)) {
9249 MVT VT = Op.getSimpleValueType();
9250 MVT EltVT = VT.getVectorElementType();
9251 // Create a new build vector with the first 2 elements followed by undef
9252 // padding, bitcast to v2f64, duplicate, and bitcast back.
9253 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9254 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9255 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
9256 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
9257 return DAG.getBitcast(VT, Dup);
9260 // Find all zeroable elements.
9261 std::bitset<4> Zeroable, Undefs;
9262 for (int i = 0; i < 4; ++i) {
9263 SDValue Elt = Op.getOperand(i);
9264 Undefs[i] = Elt.isUndef();
9265 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
9267 assert(Zeroable.size() - Zeroable.count() > 1 &&
9268 "We expect at least two non-zero elements!");
9270 // We only know how to deal with build_vector nodes where elements are either
9271 // zeroable or extract_vector_elt with constant index.
9272 SDValue FirstNonZero;
9273 unsigned FirstNonZeroIdx;
9274 for (unsigned i = 0; i < 4; ++i) {
9277 SDValue Elt = Op.getOperand(i);
9278 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9279 !isa<ConstantSDNode>(Elt.getOperand(1)))
9281 // Make sure that this node is extracting from a 128-bit vector.
9282 MVT VT = Elt.getOperand(0).getSimpleValueType();
9283 if (!VT.is128BitVector())
9285 if (!FirstNonZero.getNode()) {
9287 FirstNonZeroIdx = i;
9291 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
9292 SDValue V1 = FirstNonZero.getOperand(0);
9293 MVT VT = V1.getSimpleValueType();
9295 // See if this build_vector can be lowered as a blend with zero.
9297 unsigned EltMaskIdx, EltIdx;
9299 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
9300 if (Zeroable[EltIdx]) {
9301 // The zero vector will be on the right hand side.
9302 Mask[EltIdx] = EltIdx+4;
9306 Elt = Op->getOperand(EltIdx);
9307 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
9308 EltMaskIdx = Elt.getConstantOperandVal(1);
9309 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
9311 Mask[EltIdx] = EltIdx;
9315 // Let the shuffle legalizer deal with blend operations.
9316 SDValue VZeroOrUndef = (Zeroable == Undefs)
9318 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
9319 if (V1.getSimpleValueType() != VT)
9320 V1 = DAG.getBitcast(VT, V1);
9321 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
9324 // See if we can lower this build_vector to a INSERTPS.
9325 if (!Subtarget.hasSSE41())
9328 SDValue V2 = Elt.getOperand(0);
9329 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
9332 bool CanFold = true;
9333 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
9337 SDValue Current = Op->getOperand(i);
9338 SDValue SrcVector = Current->getOperand(0);
9341 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
9347 assert(V1.getNode() && "Expected at least two non-zero elements!");
9348 if (V1.getSimpleValueType() != MVT::v4f32)
9349 V1 = DAG.getBitcast(MVT::v4f32, V1);
9350 if (V2.getSimpleValueType() != MVT::v4f32)
9351 V2 = DAG.getBitcast(MVT::v4f32, V2);
9353 // Ok, we can emit an INSERTPS instruction.
9354 unsigned ZMask = Zeroable.to_ulong();
9356 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
9357 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
9359 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
9360 DAG.getIntPtrConstant(InsertPSMask, DL, true));
9361 return DAG.getBitcast(VT, Result);
9364 /// Return a vector logical shift node.
9365 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
9366 SelectionDAG &DAG, const TargetLowering &TLI,
9368 assert(VT.is128BitVector() && "Unknown type for VShift");
9369 MVT ShVT = MVT::v16i8;
9370 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
9371 SrcOp = DAG.getBitcast(ShVT, SrcOp);
9372 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
9373 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
9374 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
9377 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
9378 SelectionDAG &DAG) {
9380 // Check if the scalar load can be widened into a vector load. And if
9381 // the address is "base + cst" see if the cst can be "absorbed" into
9382 // the shuffle mask.
9383 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
9384 SDValue Ptr = LD->getBasePtr();
9385 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
9387 EVT PVT = LD->getValueType(0);
9388 if (PVT != MVT::i32 && PVT != MVT::f32)
9393 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
9394 FI = FINode->getIndex();
9396 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
9397 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9398 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9399 Offset = Ptr.getConstantOperandVal(1);
9400 Ptr = Ptr.getOperand(0);
9405 // FIXME: 256-bit vector instructions don't require a strict alignment,
9406 // improve this code to support it better.
9407 Align RequiredAlign(VT.getSizeInBits() / 8);
9408 SDValue Chain = LD->getChain();
9409 // Make sure the stack object alignment is at least 16 or 32.
9410 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9411 MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
9412 if (!InferredAlign || *InferredAlign < RequiredAlign) {
9413 if (MFI.isFixedObjectIndex(FI)) {
9414 // Can't change the alignment. FIXME: It's possible to compute
9415 // the exact stack offset and reference FI + adjust offset instead.
9416 // If someone *really* cares about this. That's the way to implement it.
9419 MFI.setObjectAlignment(FI, RequiredAlign);
9423 // (Offset % 16 or 32) must be multiple of 4. Then address is then
9424 // Ptr + (Offset & ~15).
9427 if ((Offset % RequiredAlign.value()) & 3)
9429 int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
9432 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
9433 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
9436 int EltNo = (Offset - StartOffset) >> 2;
9437 unsigned NumElems = VT.getVectorNumElements();
9439 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
9440 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
9441 LD->getPointerInfo().getWithOffset(StartOffset));
9443 SmallVector<int, 8> Mask(NumElems, EltNo);
9445 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
9451 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
9452 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
9453 if (ISD::isNON_EXTLoad(Elt.getNode())) {
9454 auto *BaseLd = cast<LoadSDNode>(Elt);
9455 if (!BaseLd->isSimple())
9462 switch (Elt.getOpcode()) {
9465 case ISD::SCALAR_TO_VECTOR:
9466 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
9468 if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9469 uint64_t Amt = AmtC->getZExtValue();
9470 if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
9471 ByteOffset += Amt / 8;
9476 case ISD::EXTRACT_VECTOR_ELT:
9477 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9478 SDValue Src = Elt.getOperand(0);
9479 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
9480 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
9481 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
9482 findEltLoadSrc(Src, Ld, ByteOffset)) {
9483 uint64_t Idx = IdxC->getZExtValue();
9484 ByteOffset += Idx * (SrcSizeInBits / 8);
9494 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
9495 /// elements can be replaced by a single large load which has the same value as
9496 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
9498 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
9499 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
9500 const SDLoc &DL, SelectionDAG &DAG,
9501 const X86Subtarget &Subtarget,
9502 bool IsAfterLegalize) {
9503 if ((VT.getScalarSizeInBits() % 8) != 0)
9506 unsigned NumElems = Elts.size();
9508 int LastLoadedElt = -1;
9509 APInt LoadMask = APInt::getZero(NumElems);
9510 APInt ZeroMask = APInt::getZero(NumElems);
9511 APInt UndefMask = APInt::getZero(NumElems);
9513 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
9514 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
9516 // For each element in the initializer, see if we've found a load, zero or an
9518 for (unsigned i = 0; i < NumElems; ++i) {
9519 SDValue Elt = peekThroughBitcasts(Elts[i]);
9522 if (Elt.isUndef()) {
9523 UndefMask.setBit(i);
9526 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
9531 // Each loaded element must be the correct fractional portion of the
9532 // requested vector load.
9533 unsigned EltSizeInBits = Elt.getValueSizeInBits();
9534 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
9537 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
9539 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
9540 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
9546 assert((ZeroMask.popcount() + UndefMask.popcount() + LoadMask.popcount()) ==
9548 "Incomplete element masks");
9550 // Handle Special Cases - all undef or undef/zero.
9551 if (UndefMask.popcount() == NumElems)
9552 return DAG.getUNDEF(VT);
9553 if ((ZeroMask.popcount() + UndefMask.popcount()) == NumElems)
9554 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
9555 : DAG.getConstantFP(0.0, DL, VT);
9557 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9558 int FirstLoadedElt = LoadMask.countr_zero();
9559 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
9560 EVT EltBaseVT = EltBase.getValueType();
9561 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
9562 "Register/Memory size mismatch");
9563 LoadSDNode *LDBase = Loads[FirstLoadedElt];
9564 assert(LDBase && "Did not find base load for merging consecutive loads");
9565 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
9566 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
9567 int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
9568 int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
9569 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
9571 // TODO: Support offsetting the base load.
9572 if (ByteOffsets[FirstLoadedElt] != 0)
9575 // Check to see if the element's load is consecutive to the base load
9576 // or offset from a previous (already checked) load.
9577 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
9578 LoadSDNode *Ld = Loads[EltIdx];
9579 int64_t ByteOffset = ByteOffsets[EltIdx];
9580 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
9581 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
9582 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
9583 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
9585 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
9586 EltIdx - FirstLoadedElt);
9589 // Consecutive loads can contain UNDEFS but not ZERO elements.
9590 // Consecutive loads with UNDEFs and ZEROs elements require a
9591 // an additional shuffle stage to clear the ZERO elements.
9592 bool IsConsecutiveLoad = true;
9593 bool IsConsecutiveLoadWithZeros = true;
9594 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
9596 if (!CheckConsecutiveLoad(LDBase, i)) {
9597 IsConsecutiveLoad = false;
9598 IsConsecutiveLoadWithZeros = false;
9601 } else if (ZeroMask[i]) {
9602 IsConsecutiveLoad = false;
9606 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
9607 auto MMOFlags = LDBase->getMemOperand()->getFlags();
9608 assert(LDBase->isSimple() &&
9609 "Cannot merge volatile or atomic loads.");
9611 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
9612 LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
9614 for (auto *LD : Loads)
9616 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
9620 // Check if the base load is entirely dereferenceable.
9621 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
9622 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
9624 // LOAD - all consecutive load/undefs (must start/end with a load or be
9625 // entirely dereferenceable). If we have found an entire vector of loads and
9626 // undefs, then return a large load of the entire vector width starting at the
9627 // base pointer. If the vector contains zeros, then attempt to shuffle those
9629 if (FirstLoadedElt == 0 &&
9630 (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
9631 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
9632 if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
9635 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
9636 // will lower to regular temporal loads and use the cache.
9637 if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
9638 VT.is256BitVector() && !Subtarget.hasInt256())
9642 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
9645 return CreateLoad(VT, LDBase);
9647 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
9648 // vector and a zero vector to clear out the zero elements.
9649 if (!IsAfterLegalize && VT.isVector()) {
9650 unsigned NumMaskElts = VT.getVectorNumElements();
9651 if ((NumMaskElts % NumElems) == 0) {
9652 unsigned Scale = NumMaskElts / NumElems;
9653 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
9654 for (unsigned i = 0; i < NumElems; ++i) {
9657 int Offset = ZeroMask[i] ? NumMaskElts : 0;
9658 for (unsigned j = 0; j != Scale; ++j)
9659 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
9661 SDValue V = CreateLoad(VT, LDBase);
9662 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
9663 : DAG.getConstantFP(0.0, DL, VT);
9664 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
9669 // If the upper half of a ymm/zmm load is undef then just load the lower half.
9670 if (VT.is256BitVector() || VT.is512BitVector()) {
9671 unsigned HalfNumElems = NumElems / 2;
9672 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
9674 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
9676 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
9677 DAG, Subtarget, IsAfterLegalize);
9679 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
9680 HalfLD, DAG.getIntPtrConstant(0, DL));
9684 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
9685 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
9686 ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
9687 LoadSizeInBits == 64) &&
9688 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
9689 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
9690 : MVT::getIntegerVT(LoadSizeInBits);
9691 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
9692 // Allow v4f32 on SSE1 only targets.
9693 // FIXME: Add more isel patterns so we can just use VT directly.
9694 if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
9696 if (TLI.isTypeLegal(VecVT)) {
9697 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
9698 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
9699 SDValue ResNode = DAG.getMemIntrinsicNode(
9700 X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
9701 LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
9702 for (auto *LD : Loads)
9704 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
9705 return DAG.getBitcast(VT, ResNode);
9709 // BROADCAST - match the smallest possible repetition pattern, load that
9710 // scalar/subvector element and then broadcast to the entire vector.
9711 if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
9712 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
9713 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
9714 unsigned RepeatSize = SubElems * BaseSizeInBits;
9715 unsigned ScalarSize = std::min(RepeatSize, 64u);
9716 if (!Subtarget.hasAVX2() && ScalarSize < 32)
9719 // Don't attempt a 1:N subvector broadcast - it should be caught by
9720 // combineConcatVectorOps, else will cause infinite loops.
9721 if (RepeatSize > ScalarSize && SubElems == 1)
9725 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
9726 for (unsigned i = 0; i != NumElems && Match; ++i) {
9729 SDValue Elt = peekThroughBitcasts(Elts[i]);
9730 if (RepeatedLoads[i % SubElems].isUndef())
9731 RepeatedLoads[i % SubElems] = Elt;
9733 Match &= (RepeatedLoads[i % SubElems] == Elt);
9736 // We must have loads at both ends of the repetition.
9737 Match &= !RepeatedLoads.front().isUndef();
9738 Match &= !RepeatedLoads.back().isUndef();
9743 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
9744 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
9745 : EVT::getFloatingPointVT(ScalarSize);
9746 if (RepeatSize > ScalarSize)
9747 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
9748 RepeatSize / ScalarSize);
9750 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
9751 VT.getSizeInBits() / ScalarSize);
9752 if (TLI.isTypeLegal(BroadcastVT)) {
9753 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
9754 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
9755 SDValue Broadcast = RepeatLoad;
9756 if (RepeatSize > ScalarSize) {
9757 while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
9758 Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
9760 if (!Subtarget.hasAVX2() &&
9761 !X86::mayFoldLoadIntoBroadcastFromMem(
9762 RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
9764 /*AssumeSingleUse=*/true))
9767 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
9769 return DAG.getBitcast(VT, Broadcast);
9778 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
9779 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
9780 // are consecutive, non-overlapping, and in the right order.
9781 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
9783 const X86Subtarget &Subtarget,
9784 bool IsAfterLegalize) {
9785 SmallVector<SDValue, 64> Elts;
9786 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9787 if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
9788 Elts.push_back(Elt);
9793 assert(Elts.size() == VT.getVectorNumElements());
9794 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
9798 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
9799 unsigned SplatBitSize, LLVMContext &C) {
9800 unsigned ScalarSize = VT.getScalarSizeInBits();
9802 auto getConstantScalar = [&](const APInt &Val) -> Constant * {
9803 if (VT.isFloatingPoint()) {
9804 if (ScalarSize == 16)
9805 return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
9806 if (ScalarSize == 32)
9807 return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
9808 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
9809 return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
9811 return Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
9814 if (ScalarSize == SplatBitSize)
9815 return getConstantScalar(SplatValue);
9817 unsigned NumElm = SplatBitSize / ScalarSize;
9818 SmallVector<Constant *, 32> ConstantVec;
9819 for (unsigned I = 0; I != NumElm; ++I) {
9820 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * I);
9821 ConstantVec.push_back(getConstantScalar(Val));
9823 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
9826 static bool isFoldableUseOfShuffle(SDNode *N) {
9827 for (auto *U : N->uses()) {
9828 unsigned Opc = U->getOpcode();
9829 // VPERMV/VPERMV3 shuffles can never fold their index operands.
9830 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
9832 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
9834 if (isTargetShuffle(Opc))
9836 if (Opc == ISD::BITCAST) // Ignore bitcasts
9837 return isFoldableUseOfShuffle(U);
9838 if (N->hasOneUse()) {
9839 // TODO, there may be some general way to know if a SDNode can
9840 // be folded. We now only know whether an MI is foldable.
9841 if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
9849 /// Attempt to use the vbroadcast instruction to generate a splat value
9850 /// from a splat BUILD_VECTOR which uses:
9851 /// a. A single scalar load, or a constant.
9852 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
9854 /// The VBROADCAST node is returned when a pattern is found,
9855 /// or SDValue() otherwise.
9856 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
9857 const X86Subtarget &Subtarget,
9858 SelectionDAG &DAG) {
9859 // VBROADCAST requires AVX.
9860 // TODO: Splats could be generated for non-AVX CPUs using SSE
9861 // instructions, but there's less potential gain for only 128-bit vectors.
9862 if (!Subtarget.hasAVX())
9865 MVT VT = BVOp->getSimpleValueType(0);
9866 unsigned NumElts = VT.getVectorNumElements();
9869 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
9870 "Unsupported vector type for broadcast.");
9872 // See if the build vector is a repeating sequence of scalars (inc. splat).
9874 BitVector UndefElements;
9875 SmallVector<SDValue, 16> Sequence;
9876 if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
9877 assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
9878 if (Sequence.size() == 1)
9882 // Attempt to use VBROADCASTM
9883 // From this pattern:
9884 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
9885 // b. t1 = (build_vector t0 t0)
9887 // Create (VBROADCASTM v2i1 X)
9888 if (!Sequence.empty() && Subtarget.hasCDI()) {
9889 // If not a splat, are the upper sequence values zeroable?
9890 unsigned SeqLen = Sequence.size();
9891 bool UpperZeroOrUndef =
9893 llvm::all_of(ArrayRef(Sequence).drop_front(), [](SDValue V) {
9894 return !V || V.isUndef() || isNullConstant(V);
9896 SDValue Op0 = Sequence[0];
9897 if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
9898 (Op0.getOpcode() == ISD::ZERO_EXTEND &&
9899 Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
9900 SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
9902 : Op0.getOperand(0).getOperand(0);
9903 MVT MaskVT = BOperand.getSimpleValueType();
9904 MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
9905 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
9906 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
9907 MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
9908 if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
9909 unsigned Scale = 512 / VT.getSizeInBits();
9910 BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
9912 SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
9913 if (BcstVT.getSizeInBits() != VT.getSizeInBits())
9914 Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
9915 return DAG.getBitcast(VT, Bcst);
9920 unsigned NumUndefElts = UndefElements.count();
9921 if (!Ld || (NumElts - NumUndefElts) <= 1) {
9922 APInt SplatValue, Undef;
9923 unsigned SplatBitSize;
9925 // Check if this is a repeated constant pattern suitable for broadcasting.
9926 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
9927 SplatBitSize > VT.getScalarSizeInBits() &&
9928 SplatBitSize < VT.getSizeInBits()) {
9929 // Avoid replacing with broadcast when it's a use of a shuffle
9930 // instruction to preserve the present custom lowering of shuffles.
9931 if (isFoldableUseOfShuffle(BVOp))
9933 // replace BUILD_VECTOR with broadcast of the repeated constants.
9934 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9935 LLVMContext *Ctx = DAG.getContext();
9936 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
9937 if (SplatBitSize == 32 || SplatBitSize == 64 ||
9938 (SplatBitSize < 32 && Subtarget.hasAVX2())) {
9939 // Load the constant scalar/subvector and broadcast it.
9940 MVT CVT = MVT::getIntegerVT(SplatBitSize);
9941 Constant *C = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
9942 SDValue CP = DAG.getConstantPool(C, PVT);
9943 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
9945 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9946 SDVTList Tys = DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
9947 SDValue Ops[] = {DAG.getEntryNode(), CP};
9948 MachinePointerInfo MPI =
9949 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9951 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
9952 MPI, Alignment, MachineMemOperand::MOLoad);
9953 return DAG.getBitcast(VT, Brdcst);
9955 if (SplatBitSize > 64) {
9956 // Load the vector of constants and broadcast it.
9957 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
9958 SDValue VCP = DAG.getConstantPool(VecC, PVT);
9959 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
9960 MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
9961 Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
9962 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9963 SDValue Ops[] = {DAG.getEntryNode(), VCP};
9964 MachinePointerInfo MPI =
9965 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9966 return DAG.getMemIntrinsicNode(X86ISD::SUBV_BROADCAST_LOAD, dl, Tys,
9967 Ops, VVT, MPI, Alignment,
9968 MachineMemOperand::MOLoad);
9972 // If we are moving a scalar into a vector (Ld must be set and all elements
9973 // but 1 are undef) and that operation is not obviously supported by
9974 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
9975 // That's better than general shuffling and may eliminate a load to GPR and
9976 // move from scalar to vector register.
9977 if (!Ld || NumElts - NumUndefElts != 1)
9979 unsigned ScalarSize = Ld.getValueSizeInBits();
9980 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
9984 bool ConstSplatVal =
9985 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
9986 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
9988 // TODO: Handle broadcasts of non-constant sequences.
9990 // Make sure that all of the users of a non-constant load are from the
9991 // BUILD_VECTOR node.
9992 // FIXME: Is the use count needed for non-constant, non-load case?
9993 if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
9996 unsigned ScalarSize = Ld.getValueSizeInBits();
9997 bool IsGE256 = (VT.getSizeInBits() >= 256);
9999 // When optimizing for size, generate up to 5 extra bytes for a broadcast
10000 // instruction to save 8 or more bytes of constant pool data.
10001 // TODO: If multiple splats are generated to load the same constant,
10002 // it may be detrimental to overall size. There needs to be a way to detect
10003 // that condition to know if this is truly a size win.
10004 bool OptForSize = DAG.shouldOptForSize();
10006 // Handle broadcasting a single constant scalar from the constant pool
10008 // On Sandybridge (no AVX2), it is still better to load a constant vector
10009 // from the constant pool and not to broadcast it from a scalar.
10010 // But override that restriction when optimizing for size.
10011 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
10012 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
10013 EVT CVT = Ld.getValueType();
10014 assert(!CVT.isVector() && "Must not broadcast a vector type");
10016 // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
10017 // For size optimization, also splat v2f64 and v2i64, and for size opt
10018 // with AVX2, also splat i8 and i16.
10019 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
10020 if (ScalarSize == 32 ||
10021 (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
10023 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
10024 const Constant *C = nullptr;
10025 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
10026 C = CI->getConstantIntValue();
10027 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
10028 C = CF->getConstantFPValue();
10030 assert(C && "Invalid constant type");
10032 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10034 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
10035 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
10037 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
10038 SDValue Ops[] = {DAG.getEntryNode(), CP};
10039 MachinePointerInfo MPI =
10040 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
10041 return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
10042 MPI, Alignment, MachineMemOperand::MOLoad);
10046 // Handle AVX2 in-register broadcasts.
10047 if (!IsLoad && Subtarget.hasInt256() &&
10048 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
10049 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
10051 // The scalar source must be a normal load.
10055 // Make sure the non-chain result is only used by this build vector.
10056 if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
10059 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
10060 (Subtarget.hasVLX() && ScalarSize == 64)) {
10061 auto *LN = cast<LoadSDNode>(Ld);
10062 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
10063 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
10065 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
10066 LN->getMemoryVT(), LN->getMemOperand());
10067 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
10071 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
10072 // double since there is no vbroadcastsd xmm
10073 if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
10074 (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
10075 auto *LN = cast<LoadSDNode>(Ld);
10076 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
10077 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
10079 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
10080 LN->getMemoryVT(), LN->getMemOperand());
10081 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
10085 if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
10086 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
10088 // Unsupported broadcast.
10092 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
10093 /// underlying vector and index.
10095 /// Modifies \p ExtractedFromVec to the real vector and returns the real
10097 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
10099 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
10100 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
10103 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
10105 // (extract_vector_elt (v8f32 %1), Constant<6>)
10107 // (extract_vector_elt (vector_shuffle<2,u,u,u>
10108 // (extract_subvector (v8f32 %0), Constant<4>),
10111 // In this case the vector is the extract_subvector expression and the index
10112 // is 2, as specified by the shuffle.
10113 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
10114 SDValue ShuffleVec = SVOp->getOperand(0);
10115 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
10116 assert(ShuffleVecVT.getVectorElementType() ==
10117 ExtractedFromVec.getSimpleValueType().getVectorElementType());
10119 int ShuffleIdx = SVOp->getMaskElt(Idx);
10120 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
10121 ExtractedFromVec = ShuffleVec;
10127 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
10128 MVT VT = Op.getSimpleValueType();
10130 // Skip if insert_vec_elt is not supported.
10131 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10132 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
10136 unsigned NumElems = Op.getNumOperands();
10140 SmallVector<unsigned, 4> InsertIndices;
10141 SmallVector<int, 8> Mask(NumElems, -1);
10143 for (unsigned i = 0; i != NumElems; ++i) {
10144 unsigned Opc = Op.getOperand(i).getOpcode();
10146 if (Opc == ISD::UNDEF)
10149 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
10150 // Quit if more than 1 elements need inserting.
10151 if (InsertIndices.size() > 1)
10154 InsertIndices.push_back(i);
10158 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
10159 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
10161 // Quit if non-constant index.
10162 if (!isa<ConstantSDNode>(ExtIdx))
10164 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
10166 // Quit if extracted from vector of different type.
10167 if (ExtractedFromVec.getValueType() != VT)
10170 if (!VecIn1.getNode())
10171 VecIn1 = ExtractedFromVec;
10172 else if (VecIn1 != ExtractedFromVec) {
10173 if (!VecIn2.getNode())
10174 VecIn2 = ExtractedFromVec;
10175 else if (VecIn2 != ExtractedFromVec)
10176 // Quit if more than 2 vectors to shuffle
10180 if (ExtractedFromVec == VecIn1)
10182 else if (ExtractedFromVec == VecIn2)
10183 Mask[i] = Idx + NumElems;
10186 if (!VecIn1.getNode())
10189 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
10190 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
10192 for (unsigned Idx : InsertIndices)
10193 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
10194 DAG.getIntPtrConstant(Idx, DL));
10199 // Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
10200 static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
10201 const X86Subtarget &Subtarget) {
10202 MVT VT = Op.getSimpleValueType();
10203 MVT IVT = VT.changeVectorElementTypeToInteger();
10204 SmallVector<SDValue, 16> NewOps;
10205 for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I)
10206 NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I)));
10207 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
10208 return DAG.getBitcast(VT, Res);
10211 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
10212 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
10213 const X86Subtarget &Subtarget) {
10215 MVT VT = Op.getSimpleValueType();
10216 assert((VT.getVectorElementType() == MVT::i1) &&
10217 "Unexpected type in LowerBUILD_VECTORvXi1!");
10220 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
10221 ISD::isBuildVectorAllOnes(Op.getNode()))
10224 uint64_t Immediate = 0;
10225 SmallVector<unsigned, 16> NonConstIdx;
10226 bool IsSplat = true;
10227 bool HasConstElts = false;
10229 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
10230 SDValue In = Op.getOperand(idx);
10233 if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
10234 Immediate |= (InC->getZExtValue() & 0x1) << idx;
10235 HasConstElts = true;
10237 NonConstIdx.push_back(idx);
10241 else if (In != Op.getOperand(SplatIdx))
10245 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
10247 // The build_vector allows the scalar element to be larger than the vector
10248 // element type. We need to mask it to use as a condition unless we know
10249 // the upper bits are zero.
10250 // FIXME: Use computeKnownBits instead of checking specific opcode?
10251 SDValue Cond = Op.getOperand(SplatIdx);
10252 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
10253 if (Cond.getOpcode() != ISD::SETCC)
10254 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
10255 DAG.getConstant(1, dl, MVT::i8));
10257 // Perform the select in the scalar domain so we can use cmov.
10258 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
10259 SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
10260 DAG.getAllOnesConstant(dl, MVT::i32),
10261 DAG.getConstant(0, dl, MVT::i32));
10262 Select = DAG.getBitcast(MVT::v32i1, Select);
10263 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
10265 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
10266 SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
10267 DAG.getAllOnesConstant(dl, ImmVT),
10268 DAG.getConstant(0, dl, ImmVT));
10269 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
10270 Select = DAG.getBitcast(VecVT, Select);
10271 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
10272 DAG.getIntPtrConstant(0, dl));
10276 // insert elements one by one
10278 if (HasConstElts) {
10279 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
10280 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
10281 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
10282 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
10283 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
10284 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
10286 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
10287 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
10288 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
10289 DstVec = DAG.getBitcast(VecVT, Imm);
10290 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
10291 DAG.getIntPtrConstant(0, dl));
10294 DstVec = DAG.getUNDEF(VT);
10296 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
10297 unsigned InsertIdx = NonConstIdx[i];
10298 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10299 Op.getOperand(InsertIdx),
10300 DAG.getIntPtrConstant(InsertIdx, dl));
10305 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
10307 case X86ISD::PACKSS:
10308 case X86ISD::PACKUS:
10309 case X86ISD::FHADD:
10310 case X86ISD::FHSUB:
10318 /// This is a helper function of LowerToHorizontalOp().
10319 /// This function checks that the build_vector \p N in input implements a
10320 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
10321 /// may not match the layout of an x86 256-bit horizontal instruction.
10322 /// In other words, if this returns true, then some extraction/insertion will
10323 /// be required to produce a valid horizontal instruction.
10325 /// Parameter \p Opcode defines the kind of horizontal operation to match.
10326 /// For example, if \p Opcode is equal to ISD::ADD, then this function
10327 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
10328 /// is equal to ISD::SUB, then this function checks if this is a horizontal
10329 /// arithmetic sub.
10331 /// This function only analyzes elements of \p N whose indices are
10332 /// in range [BaseIdx, LastIdx).
10334 /// TODO: This function was originally used to match both real and fake partial
10335 /// horizontal operations, but the index-matching logic is incorrect for that.
10336 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
10337 /// code because it is only used for partial h-op matching now?
10338 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
10340 unsigned BaseIdx, unsigned LastIdx,
10341 SDValue &V0, SDValue &V1) {
10342 EVT VT = N->getValueType(0);
10343 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
10344 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
10345 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
10346 "Invalid Vector in input!");
10348 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
10349 bool CanFold = true;
10350 unsigned ExpectedVExtractIdx = BaseIdx;
10351 unsigned NumElts = LastIdx - BaseIdx;
10352 V0 = DAG.getUNDEF(VT);
10353 V1 = DAG.getUNDEF(VT);
10355 // Check if N implements a horizontal binop.
10356 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
10357 SDValue Op = N->getOperand(i + BaseIdx);
10360 if (Op->isUndef()) {
10361 // Update the expected vector extract index.
10362 if (i * 2 == NumElts)
10363 ExpectedVExtractIdx = BaseIdx;
10364 ExpectedVExtractIdx += 2;
10368 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
10373 SDValue Op0 = Op.getOperand(0);
10374 SDValue Op1 = Op.getOperand(1);
10376 // Try to match the following pattern:
10377 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
10378 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10379 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10380 Op0.getOperand(0) == Op1.getOperand(0) &&
10381 isa<ConstantSDNode>(Op0.getOperand(1)) &&
10382 isa<ConstantSDNode>(Op1.getOperand(1)));
10386 unsigned I0 = Op0.getConstantOperandVal(1);
10387 unsigned I1 = Op1.getConstantOperandVal(1);
10389 if (i * 2 < NumElts) {
10390 if (V0.isUndef()) {
10391 V0 = Op0.getOperand(0);
10392 if (V0.getValueType() != VT)
10396 if (V1.isUndef()) {
10397 V1 = Op0.getOperand(0);
10398 if (V1.getValueType() != VT)
10401 if (i * 2 == NumElts)
10402 ExpectedVExtractIdx = BaseIdx;
10405 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
10406 if (I0 == ExpectedVExtractIdx)
10407 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
10408 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
10409 // Try to match the following dag sequence:
10410 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
10411 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
10415 ExpectedVExtractIdx += 2;
10421 /// Emit a sequence of two 128-bit horizontal add/sub followed by
10422 /// a concat_vector.
10424 /// This is a helper function of LowerToHorizontalOp().
10425 /// This function expects two 256-bit vectors called V0 and V1.
10426 /// At first, each vector is split into two separate 128-bit vectors.
10427 /// Then, the resulting 128-bit vectors are used to implement two
10428 /// horizontal binary operations.
10430 /// The kind of horizontal binary operation is defined by \p X86Opcode.
10432 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
10433 /// the two new horizontal binop.
10434 /// When Mode is set, the first horizontal binop dag node would take as input
10435 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
10436 /// horizontal binop dag node would take as input the lower 128-bit of V1
10437 /// and the upper 128-bit of V1.
10439 /// HADD V0_LO, V0_HI
10440 /// HADD V1_LO, V1_HI
10442 /// Otherwise, the first horizontal binop dag node takes as input the lower
10443 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
10444 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
10446 /// HADD V0_LO, V1_LO
10447 /// HADD V0_HI, V1_HI
10449 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
10450 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
10451 /// the upper 128-bits of the result.
10452 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
10453 const SDLoc &DL, SelectionDAG &DAG,
10454 unsigned X86Opcode, bool Mode,
10455 bool isUndefLO, bool isUndefHI) {
10456 MVT VT = V0.getSimpleValueType();
10457 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
10458 "Invalid nodes in input!");
10460 unsigned NumElts = VT.getVectorNumElements();
10461 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
10462 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
10463 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
10464 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
10465 MVT NewVT = V0_LO.getSimpleValueType();
10467 SDValue LO = DAG.getUNDEF(NewVT);
10468 SDValue HI = DAG.getUNDEF(NewVT);
10471 // Don't emit a horizontal binop if the result is expected to be UNDEF.
10472 if (!isUndefLO && !V0->isUndef())
10473 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
10474 if (!isUndefHI && !V1->isUndef())
10475 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
10477 // Don't emit a horizontal binop if the result is expected to be UNDEF.
10478 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
10479 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
10481 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
10482 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
10485 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
10488 /// Returns true iff \p BV builds a vector with the result equivalent to
10489 /// the result of ADDSUB/SUBADD operation.
10490 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
10491 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
10492 /// \p Opnd0 and \p Opnd1.
10493 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
10494 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10495 SDValue &Opnd0, SDValue &Opnd1,
10496 unsigned &NumExtracts,
10499 MVT VT = BV->getSimpleValueType(0);
10500 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
10503 unsigned NumElts = VT.getVectorNumElements();
10504 SDValue InVec0 = DAG.getUNDEF(VT);
10505 SDValue InVec1 = DAG.getUNDEF(VT);
10509 // Odd-numbered elements in the input build vector are obtained from
10510 // adding/subtracting two integer/float elements.
10511 // Even-numbered elements in the input build vector are obtained from
10512 // subtracting/adding two integer/float elements.
10513 unsigned Opc[2] = {0, 0};
10514 for (unsigned i = 0, e = NumElts; i != e; ++i) {
10515 SDValue Op = BV->getOperand(i);
10517 // Skip 'undef' values.
10518 unsigned Opcode = Op.getOpcode();
10519 if (Opcode == ISD::UNDEF)
10522 // Early exit if we found an unexpected opcode.
10523 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
10526 SDValue Op0 = Op.getOperand(0);
10527 SDValue Op1 = Op.getOperand(1);
10529 // Try to match the following pattern:
10530 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
10531 // Early exit if we cannot match that sequence.
10532 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10533 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10534 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10535 Op0.getOperand(1) != Op1.getOperand(1))
10538 unsigned I0 = Op0.getConstantOperandVal(1);
10542 // We found a valid add/sub node, make sure its the same opcode as previous
10543 // elements for this parity.
10544 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
10546 Opc[i % 2] = Opcode;
10548 // Update InVec0 and InVec1.
10549 if (InVec0.isUndef()) {
10550 InVec0 = Op0.getOperand(0);
10551 if (InVec0.getSimpleValueType() != VT)
10554 if (InVec1.isUndef()) {
10555 InVec1 = Op1.getOperand(0);
10556 if (InVec1.getSimpleValueType() != VT)
10560 // Make sure that operands in input to each add/sub node always
10561 // come from a same pair of vectors.
10562 if (InVec0 != Op0.getOperand(0)) {
10563 if (Opcode == ISD::FSUB)
10566 // FADD is commutable. Try to commute the operands
10567 // and then test again.
10568 std::swap(Op0, Op1);
10569 if (InVec0 != Op0.getOperand(0))
10573 if (InVec1 != Op1.getOperand(0))
10576 // Increment the number of extractions done.
10580 // Ensure we have found an opcode for both parities and that they are
10581 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
10582 // inputs are undef.
10583 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
10584 InVec0.isUndef() || InVec1.isUndef())
10587 IsSubAdd = Opc[0] == ISD::FADD;
10594 /// Returns true if is possible to fold MUL and an idiom that has already been
10595 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
10596 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
10597 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
10599 /// Prior to calling this function it should be known that there is some
10600 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
10601 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
10602 /// before replacement of such SDNode with ADDSUB operation. Thus the number
10603 /// of \p Opnd0 uses is expected to be equal to 2.
10604 /// For example, this function may be called for the following IR:
10605 /// %AB = fmul fast <2 x double> %A, %B
10606 /// %Sub = fsub fast <2 x double> %AB, %C
10607 /// %Add = fadd fast <2 x double> %AB, %C
10608 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
10609 /// <2 x i32> <i32 0, i32 3>
10610 /// There is a def for %Addsub here, which potentially can be replaced by
10611 /// X86ISD::ADDSUB operation:
10612 /// %Addsub = X86ISD::ADDSUB %AB, %C
10613 /// and such ADDSUB can further be replaced with FMADDSUB:
10614 /// %Addsub = FMADDSUB %A, %B, %C.
10616 /// The main reason why this method is called before the replacement of the
10617 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
10618 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
10620 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
10622 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
10623 unsigned ExpectedUses) {
10624 if (Opnd0.getOpcode() != ISD::FMUL ||
10625 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
10628 // FIXME: These checks must match the similar ones in
10629 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
10630 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
10631 // or MUL + ADDSUB to FMADDSUB.
10632 const TargetOptions &Options = DAG.getTarget().Options;
10634 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
10639 Opnd1 = Opnd0.getOperand(1);
10640 Opnd0 = Opnd0.getOperand(0);
10645 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
10646 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
10647 /// X86ISD::FMSUBADD node.
10648 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
10649 const X86Subtarget &Subtarget,
10650 SelectionDAG &DAG) {
10651 SDValue Opnd0, Opnd1;
10652 unsigned NumExtracts;
10654 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
10658 MVT VT = BV->getSimpleValueType(0);
10661 // Try to generate X86ISD::FMADDSUB node here.
10663 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
10664 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
10665 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
10668 // We only support ADDSUB.
10672 // There are no known X86 targets with 512-bit ADDSUB instructions!
10673 // Convert to blend(fsub,fadd).
10674 if (VT.is512BitVector()) {
10675 SmallVector<int> Mask;
10676 for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
10678 Mask.push_back(I + E + 1);
10680 SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
10681 SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
10682 return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
10685 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
10688 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
10689 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
10690 // Initialize outputs to known values.
10691 MVT VT = BV->getSimpleValueType(0);
10692 HOpcode = ISD::DELETED_NODE;
10693 V0 = DAG.getUNDEF(VT);
10694 V1 = DAG.getUNDEF(VT);
10696 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
10697 // half of the result is calculated independently from the 128-bit halves of
10698 // the inputs, so that makes the index-checking logic below more complicated.
10699 unsigned NumElts = VT.getVectorNumElements();
10700 unsigned GenericOpcode = ISD::DELETED_NODE;
10701 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
10702 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
10703 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
10704 for (unsigned i = 0; i != Num128BitChunks; ++i) {
10705 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
10706 // Ignore undef elements.
10707 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
10711 // If there's an opcode mismatch, we're done.
10712 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
10715 // Initialize horizontal opcode.
10716 if (HOpcode == ISD::DELETED_NODE) {
10717 GenericOpcode = Op.getOpcode();
10718 switch (GenericOpcode) {
10719 case ISD::ADD: HOpcode = X86ISD::HADD; break;
10720 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
10721 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
10722 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
10723 default: return false;
10727 SDValue Op0 = Op.getOperand(0);
10728 SDValue Op1 = Op.getOperand(1);
10729 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10730 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10731 Op0.getOperand(0) != Op1.getOperand(0) ||
10732 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10733 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
10736 // The source vector is chosen based on which 64-bit half of the
10737 // destination vector is being calculated.
10738 if (j < NumEltsIn64Bits) {
10740 V0 = Op0.getOperand(0);
10743 V1 = Op0.getOperand(0);
10746 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
10747 if (SourceVec != Op0.getOperand(0))
10750 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
10751 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
10752 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
10753 unsigned ExpectedIndex = i * NumEltsIn128Bits +
10754 (j % NumEltsIn64Bits) * 2;
10755 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
10758 // If this is not a commutative op, this does not match.
10759 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
10762 // Addition is commutative, so try swapping the extract indexes.
10763 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
10764 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
10767 // Extract indexes do not match horizontal requirement.
10771 // We matched. Opcode and operands are returned by reference as arguments.
10775 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
10776 SelectionDAG &DAG, unsigned HOpcode,
10777 SDValue V0, SDValue V1) {
10778 // If either input vector is not the same size as the build vector,
10779 // extract/insert the low bits to the correct size.
10780 // This is free (examples: zmm --> xmm, xmm --> ymm).
10781 MVT VT = BV->getSimpleValueType(0);
10782 unsigned Width = VT.getSizeInBits();
10783 if (V0.getValueSizeInBits() > Width)
10784 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
10785 else if (V0.getValueSizeInBits() < Width)
10786 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
10788 if (V1.getValueSizeInBits() > Width)
10789 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
10790 else if (V1.getValueSizeInBits() < Width)
10791 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
10793 unsigned NumElts = VT.getVectorNumElements();
10794 APInt DemandedElts = APInt::getAllOnes(NumElts);
10795 for (unsigned i = 0; i != NumElts; ++i)
10796 if (BV->getOperand(i).isUndef())
10797 DemandedElts.clearBit(i);
10799 // If we don't need the upper xmm, then perform as a xmm hop.
10800 unsigned HalfNumElts = NumElts / 2;
10801 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
10802 MVT HalfVT = VT.getHalfNumVectorElementsVT();
10803 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
10804 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
10805 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
10806 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
10809 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
10812 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
10813 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
10814 const X86Subtarget &Subtarget,
10815 SelectionDAG &DAG) {
10816 // We need at least 2 non-undef elements to make this worthwhile by default.
10817 unsigned NumNonUndefs =
10818 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
10819 if (NumNonUndefs < 2)
10822 // There are 4 sets of horizontal math operations distinguished by type:
10823 // int/FP at 128-bit/256-bit. Each type was introduced with a different
10824 // subtarget feature. Try to match those "native" patterns first.
10825 MVT VT = BV->getSimpleValueType(0);
10826 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
10827 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
10828 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
10829 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
10832 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
10833 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
10836 // Try harder to match 256-bit ops by using extract/concat.
10837 if (!Subtarget.hasAVX() || !VT.is256BitVector())
10840 // Count the number of UNDEF operands in the build_vector in input.
10841 unsigned NumElts = VT.getVectorNumElements();
10842 unsigned Half = NumElts / 2;
10843 unsigned NumUndefsLO = 0;
10844 unsigned NumUndefsHI = 0;
10845 for (unsigned i = 0, e = Half; i != e; ++i)
10846 if (BV->getOperand(i)->isUndef())
10849 for (unsigned i = Half, e = NumElts; i != e; ++i)
10850 if (BV->getOperand(i)->isUndef())
10854 SDValue InVec0, InVec1;
10855 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
10856 SDValue InVec2, InVec3;
10857 unsigned X86Opcode;
10858 bool CanFold = true;
10860 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
10861 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
10863 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10864 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10865 X86Opcode = X86ISD::HADD;
10866 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
10868 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
10870 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10871 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10872 X86Opcode = X86ISD::HSUB;
10877 // Do not try to expand this build_vector into a pair of horizontal
10878 // add/sub if we can emit a pair of scalar add/sub.
10879 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10882 // Convert this build_vector into a pair of horizontal binops followed by
10883 // a concat vector. We must adjust the outputs from the partial horizontal
10884 // matching calls above to account for undefined vector halves.
10885 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
10886 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
10887 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
10888 bool isUndefLO = NumUndefsLO == Half;
10889 bool isUndefHI = NumUndefsHI == Half;
10890 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
10895 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
10896 VT == MVT::v16i16) {
10897 unsigned X86Opcode;
10898 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
10899 X86Opcode = X86ISD::HADD;
10900 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
10902 X86Opcode = X86ISD::HSUB;
10903 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
10905 X86Opcode = X86ISD::FHADD;
10906 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
10908 X86Opcode = X86ISD::FHSUB;
10912 // Don't try to expand this build_vector into a pair of horizontal add/sub
10913 // if we can simply emit a pair of scalar add/sub.
10914 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10917 // Convert this build_vector into two horizontal add/sub followed by
10918 // a concat vector.
10919 bool isUndefLO = NumUndefsLO == Half;
10920 bool isUndefHI = NumUndefsHI == Half;
10921 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
10922 isUndefLO, isUndefHI);
10928 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
10929 SelectionDAG &DAG);
10931 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
10932 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
10933 /// just apply the bit to the vectors.
10934 /// NOTE: Its not in our interest to start make a general purpose vectorizer
10935 /// from this, but enough scalar bit operations are created from the later
10936 /// legalization + scalarization stages to need basic support.
10937 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
10938 const X86Subtarget &Subtarget,
10939 SelectionDAG &DAG) {
10941 MVT VT = Op->getSimpleValueType(0);
10942 unsigned NumElems = VT.getVectorNumElements();
10943 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10945 // Check that all elements have the same opcode.
10946 // TODO: Should we allow UNDEFS and if so how many?
10947 unsigned Opcode = Op->getOperand(0).getOpcode();
10948 for (unsigned i = 1; i < NumElems; ++i)
10949 if (Opcode != Op->getOperand(i).getOpcode())
10952 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
10953 bool IsShift = false;
10965 // Don't do this if the buildvector is a splat - we'd replace one
10966 // constant with an entire vector.
10967 if (Op->getSplatValue())
10969 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
10974 SmallVector<SDValue, 4> LHSElts, RHSElts;
10975 for (SDValue Elt : Op->ops()) {
10976 SDValue LHS = Elt.getOperand(0);
10977 SDValue RHS = Elt.getOperand(1);
10979 // We expect the canonicalized RHS operand to be the constant.
10980 if (!isa<ConstantSDNode>(RHS))
10983 // Extend shift amounts.
10984 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
10987 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
10990 LHSElts.push_back(LHS);
10991 RHSElts.push_back(RHS);
10994 // Limit to shifts by uniform immediates.
10995 // TODO: Only accept vXi8/vXi64 special cases?
10996 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
10997 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
11000 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
11001 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
11002 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
11007 // Immediately lower the shift to ensure the constant build vector doesn't
11008 // get converted to a constant pool before the shift is lowered.
11009 return LowerShift(Res, Subtarget, DAG);
11012 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
11013 /// functionality to do this, so it's all zeros, all ones, or some derivation
11014 /// that is cheap to calculate.
11015 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
11016 const X86Subtarget &Subtarget) {
11018 MVT VT = Op.getSimpleValueType();
11020 // Vectors containing all zeros can be matched by pxor and xorps.
11021 if (ISD::isBuildVectorAllZeros(Op.getNode()))
11024 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
11025 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
11026 // vpcmpeqd on 256-bit vectors.
11027 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
11028 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
11031 return getOnesVector(VT, DAG, DL);
11037 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
11038 /// from a vector of source values and a vector of extraction indices.
11039 /// The vectors might be manipulated to match the type of the permute op.
11040 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
11041 SDLoc &DL, SelectionDAG &DAG,
11042 const X86Subtarget &Subtarget) {
11043 MVT ShuffleVT = VT;
11044 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
11045 unsigned NumElts = VT.getVectorNumElements();
11046 unsigned SizeInBits = VT.getSizeInBits();
11048 // Adjust IndicesVec to match VT size.
11049 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
11050 "Illegal variable permute mask size");
11051 if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
11052 // Narrow/widen the indices vector to the correct size.
11053 if (IndicesVec.getValueSizeInBits() > SizeInBits)
11054 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
11055 NumElts * VT.getScalarSizeInBits());
11056 else if (IndicesVec.getValueSizeInBits() < SizeInBits)
11057 IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
11058 SDLoc(IndicesVec), SizeInBits);
11059 // Zero-extend the index elements within the vector.
11060 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
11061 IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
11062 IndicesVT, IndicesVec);
11064 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
11066 // Handle SrcVec that don't match VT type.
11067 if (SrcVec.getValueSizeInBits() != SizeInBits) {
11068 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
11069 // Handle larger SrcVec by treating it as a larger permute.
11070 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
11071 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
11072 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
11073 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
11074 Subtarget, DAG, SDLoc(IndicesVec));
11075 SDValue NewSrcVec =
11076 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
11078 return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
11080 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
11081 // Widen smaller SrcVec to match VT.
11082 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
11087 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
11088 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
11089 EVT SrcVT = Idx.getValueType();
11090 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
11091 uint64_t IndexScale = 0;
11092 uint64_t IndexOffset = 0;
11094 // If we're scaling a smaller permute op, then we need to repeat the
11095 // indices, scaling and offsetting them as well.
11096 // e.g. v4i32 -> v16i8 (Scale = 4)
11097 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
11098 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
11099 for (uint64_t i = 0; i != Scale; ++i) {
11100 IndexScale |= Scale << (i * NumDstBits);
11101 IndexOffset |= i << (i * NumDstBits);
11104 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
11105 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
11106 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
11107 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
11111 unsigned Opcode = 0;
11112 switch (VT.SimpleTy) {
11116 if (Subtarget.hasSSSE3())
11117 Opcode = X86ISD::PSHUFB;
11120 if (Subtarget.hasVLX() && Subtarget.hasBWI())
11121 Opcode = X86ISD::VPERMV;
11122 else if (Subtarget.hasSSSE3()) {
11123 Opcode = X86ISD::PSHUFB;
11124 ShuffleVT = MVT::v16i8;
11129 if (Subtarget.hasAVX()) {
11130 Opcode = X86ISD::VPERMILPV;
11131 ShuffleVT = MVT::v4f32;
11132 } else if (Subtarget.hasSSSE3()) {
11133 Opcode = X86ISD::PSHUFB;
11134 ShuffleVT = MVT::v16i8;
11139 if (Subtarget.hasAVX()) {
11140 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
11141 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
11142 Opcode = X86ISD::VPERMILPV;
11143 ShuffleVT = MVT::v2f64;
11144 } else if (Subtarget.hasSSE41()) {
11145 // SSE41 can compare v2i64 - select between indices 0 and 1.
11146 return DAG.getSelectCC(
11148 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
11149 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
11150 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
11151 ISD::CondCode::SETEQ);
11155 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
11156 Opcode = X86ISD::VPERMV;
11157 else if (Subtarget.hasXOP()) {
11158 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
11159 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
11160 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
11161 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
11162 return DAG.getNode(
11163 ISD::CONCAT_VECTORS, DL, VT,
11164 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
11165 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
11166 } else if (Subtarget.hasAVX()) {
11167 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
11168 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
11169 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
11170 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
11171 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
11172 ArrayRef<SDValue> Ops) {
11173 // Permute Lo and Hi and then select based on index range.
11174 // This works as SHUFB uses bits[3:0] to permute elements and we don't
11175 // care about the bit[7] as its just an index vector.
11176 SDValue Idx = Ops[2];
11177 EVT VT = Idx.getValueType();
11178 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
11179 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
11180 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
11181 ISD::CondCode::SETGT);
11183 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
11184 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
11189 if (Subtarget.hasVLX() && Subtarget.hasBWI())
11190 Opcode = X86ISD::VPERMV;
11191 else if (Subtarget.hasAVX()) {
11192 // Scale to v32i8 and perform as v32i8.
11193 IndicesVec = ScaleIndices(IndicesVec, 2);
11194 return DAG.getBitcast(
11195 VT, createVariablePermute(
11196 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
11197 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
11202 if (Subtarget.hasAVX2())
11203 Opcode = X86ISD::VPERMV;
11204 else if (Subtarget.hasAVX()) {
11205 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
11206 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
11207 {0, 1, 2, 3, 0, 1, 2, 3});
11208 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
11209 {4, 5, 6, 7, 4, 5, 6, 7});
11210 if (Subtarget.hasXOP())
11211 return DAG.getBitcast(
11212 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
11213 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
11214 // Permute Lo and Hi and then select based on index range.
11215 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
11216 SDValue Res = DAG.getSelectCC(
11217 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
11218 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
11219 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
11220 ISD::CondCode::SETGT);
11221 return DAG.getBitcast(VT, Res);
11226 if (Subtarget.hasAVX512()) {
11227 if (!Subtarget.hasVLX()) {
11228 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
11229 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
11231 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
11232 DAG, SDLoc(IndicesVec));
11233 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
11235 return extract256BitVector(Res, 0, DAG, DL);
11237 Opcode = X86ISD::VPERMV;
11238 } else if (Subtarget.hasAVX()) {
11239 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
11241 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
11243 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
11244 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
11245 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
11246 if (Subtarget.hasXOP())
11247 return DAG.getBitcast(
11248 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
11249 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
11250 // Permute Lo and Hi and then select based on index range.
11251 // This works as VPERMILPD only uses index bit[1] to permute elements.
11252 SDValue Res = DAG.getSelectCC(
11253 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
11254 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
11255 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
11256 ISD::CondCode::SETGT);
11257 return DAG.getBitcast(VT, Res);
11261 if (Subtarget.hasVBMI())
11262 Opcode = X86ISD::VPERMV;
11265 if (Subtarget.hasBWI())
11266 Opcode = X86ISD::VPERMV;
11272 if (Subtarget.hasAVX512())
11273 Opcode = X86ISD::VPERMV;
11279 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
11280 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
11281 "Illegal variable permute shuffle type");
11283 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
11285 IndicesVec = ScaleIndices(IndicesVec, Scale);
11287 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
11288 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
11290 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
11291 SDValue Res = Opcode == X86ISD::VPERMV
11292 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
11293 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
11294 return DAG.getBitcast(VT, Res);
11297 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
11298 // reasoned to be a permutation of a vector by indices in a non-constant vector.
11299 // (build_vector (extract_elt V, (extract_elt I, 0)),
11300 // (extract_elt V, (extract_elt I, 1)),
11305 // TODO: Handle undefs
11306 // TODO: Utilize pshufb and zero mask blending to support more efficient
11307 // construction of vectors with constant-0 elements.
11309 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
11310 const X86Subtarget &Subtarget) {
11311 SDValue SrcVec, IndicesVec;
11312 // Check for a match of the permute source vector and permute index elements.
11313 // This is done by checking that the i-th build_vector operand is of the form:
11314 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
11315 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
11316 SDValue Op = V.getOperand(Idx);
11317 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11320 // If this is the first extract encountered in V, set the source vector,
11321 // otherwise verify the extract is from the previously defined source
11324 SrcVec = Op.getOperand(0);
11325 else if (SrcVec != Op.getOperand(0))
11327 SDValue ExtractedIndex = Op->getOperand(1);
11328 // Peek through extends.
11329 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
11330 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
11331 ExtractedIndex = ExtractedIndex.getOperand(0);
11332 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11335 // If this is the first extract from the index vector candidate, set the
11336 // indices vector, otherwise verify the extract is from the previously
11337 // defined indices vector.
11339 IndicesVec = ExtractedIndex.getOperand(0);
11340 else if (IndicesVec != ExtractedIndex.getOperand(0))
11343 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
11344 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
11349 MVT VT = V.getSimpleValueType();
11350 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
11354 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
11357 MVT VT = Op.getSimpleValueType();
11358 MVT EltVT = VT.getVectorElementType();
11359 MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
11360 unsigned NumElems = Op.getNumOperands();
11362 // Generate vectors for predicate vectors.
11363 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
11364 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
11366 if (VT.getVectorElementType() == MVT::bf16 && Subtarget.hasBF16())
11367 return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
11369 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
11370 return VectorConstant;
11372 unsigned EVTBits = EltVT.getSizeInBits();
11373 APInt UndefMask = APInt::getZero(NumElems);
11374 APInt FrozenUndefMask = APInt::getZero(NumElems);
11375 APInt ZeroMask = APInt::getZero(NumElems);
11376 APInt NonZeroMask = APInt::getZero(NumElems);
11377 bool IsAllConstants = true;
11378 bool OneUseFrozenUndefs = true;
11379 SmallSet<SDValue, 8> Values;
11380 unsigned NumConstants = NumElems;
11381 for (unsigned i = 0; i < NumElems; ++i) {
11382 SDValue Elt = Op.getOperand(i);
11383 if (Elt.isUndef()) {
11384 UndefMask.setBit(i);
11387 if (ISD::isFreezeUndef(Elt.getNode())) {
11388 OneUseFrozenUndefs = OneUseFrozenUndefs && Elt->hasOneUse();
11389 FrozenUndefMask.setBit(i);
11392 Values.insert(Elt);
11393 if (!isIntOrFPConstant(Elt)) {
11394 IsAllConstants = false;
11397 if (X86::isZeroNode(Elt)) {
11398 ZeroMask.setBit(i);
11400 NonZeroMask.setBit(i);
11404 // All undef vector. Return an UNDEF.
11405 if (UndefMask.isAllOnes())
11406 return DAG.getUNDEF(VT);
11408 // All undef/freeze(undef) vector. Return a FREEZE UNDEF.
11409 if (OneUseFrozenUndefs && (UndefMask | FrozenUndefMask).isAllOnes())
11410 return DAG.getFreeze(DAG.getUNDEF(VT));
11412 // All undef/freeze(undef)/zero vector. Return a zero vector.
11413 if ((UndefMask | FrozenUndefMask | ZeroMask).isAllOnes())
11414 return getZeroVector(VT, Subtarget, DAG, dl);
11416 // If we have multiple FREEZE-UNDEF operands, we are likely going to end up
11417 // lowering into a suboptimal insertion sequence. Instead, thaw the UNDEF in
11418 // our source BUILD_VECTOR, create another FREEZE-UNDEF splat BUILD_VECTOR,
11419 // and blend the FREEZE-UNDEF operands back in.
11420 // FIXME: is this worthwhile even for a single FREEZE-UNDEF operand?
11421 if (unsigned NumFrozenUndefElts = FrozenUndefMask.popcount();
11422 NumFrozenUndefElts >= 2 && NumFrozenUndefElts < NumElems) {
11423 SmallVector<int, 16> BlendMask(NumElems, -1);
11424 SmallVector<SDValue, 16> Elts(NumElems, DAG.getUNDEF(OpEltVT));
11425 for (unsigned i = 0; i < NumElems; ++i) {
11426 if (UndefMask[i]) {
11431 if (!FrozenUndefMask[i])
11432 Elts[i] = Op.getOperand(i);
11434 BlendMask[i] += NumElems;
11436 SDValue EltsBV = DAG.getBuildVector(VT, dl, Elts);
11437 SDValue FrozenUndefElt = DAG.getFreeze(DAG.getUNDEF(OpEltVT));
11438 SDValue FrozenUndefBV = DAG.getSplatBuildVector(VT, dl, FrozenUndefElt);
11439 return DAG.getVectorShuffle(VT, dl, EltsBV, FrozenUndefBV, BlendMask);
11442 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
11444 // If the upper elts of a ymm/zmm are undef/freeze(undef)/zero then we might
11445 // be better off lowering to a smaller build vector and padding with
11447 if ((VT.is256BitVector() || VT.is512BitVector()) &&
11448 !isFoldableUseOfShuffle(BV)) {
11449 unsigned UpperElems = NumElems / 2;
11450 APInt UndefOrZeroMask = FrozenUndefMask | UndefMask | ZeroMask;
11451 unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countl_one();
11452 if (NumUpperUndefsOrZeros >= UpperElems) {
11453 if (VT.is512BitVector() &&
11454 NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
11455 UpperElems = NumElems - (NumElems / 4);
11456 // If freeze(undef) is in any upper elements, force to zero.
11457 bool UndefUpper = UndefMask.countl_one() >= UpperElems;
11458 MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
11460 DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
11461 return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
11465 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
11467 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
11468 return HorizontalOp;
11469 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
11471 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
11474 unsigned NumZero = ZeroMask.popcount();
11475 unsigned NumNonZero = NonZeroMask.popcount();
11477 // If we are inserting one variable into a vector of non-zero constants, try
11478 // to avoid loading each constant element as a scalar. Load the constants as a
11479 // vector and then insert the variable scalar element. If insertion is not
11480 // supported, fall back to a shuffle to get the scalar blended with the
11481 // constants. Insertion into a zero vector is handled as a special-case
11482 // somewhere below here.
11483 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
11484 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
11485 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
11486 // Create an all-constant vector. The variable element in the old
11487 // build vector is replaced by undef in the constant vector. Save the
11488 // variable scalar element and its index for use in the insertelement.
11489 LLVMContext &Context = *DAG.getContext();
11490 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
11491 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
11494 for (unsigned i = 0; i != NumElems; ++i) {
11495 SDValue Elt = Op.getOperand(i);
11496 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
11497 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
11498 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
11499 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
11500 else if (!Elt.isUndef()) {
11501 assert(!VarElt.getNode() && !InsIndex.getNode() &&
11502 "Expected one variable element in this vector");
11504 InsIndex = DAG.getVectorIdxConstant(i, dl);
11507 Constant *CV = ConstantVector::get(ConstVecOps);
11508 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
11510 // The constants we just created may not be legal (eg, floating point). We
11511 // must lower the vector right here because we can not guarantee that we'll
11512 // legalize it before loading it. This is also why we could not just create
11513 // a new build vector here. If the build vector contains illegal constants,
11514 // it could get split back up into a series of insert elements.
11515 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
11516 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
11517 MachineFunction &MF = DAG.getMachineFunction();
11518 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
11519 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
11520 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
11521 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
11522 if (InsertC < NumEltsInLow128Bits)
11523 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
11525 // There's no good way to insert into the high elements of a >128-bit
11526 // vector, so use shuffles to avoid an extract/insert sequence.
11527 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
11528 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
11529 SmallVector<int, 8> ShuffleMask;
11530 unsigned NumElts = VT.getVectorNumElements();
11531 for (unsigned i = 0; i != NumElts; ++i)
11532 ShuffleMask.push_back(i == InsertC ? NumElts : i);
11533 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
11534 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
11537 // Special case for single non-zero, non-undef, element.
11538 if (NumNonZero == 1) {
11539 unsigned Idx = NonZeroMask.countr_zero();
11540 SDValue Item = Op.getOperand(Idx);
11542 // If we have a constant or non-constant insertion into the low element of
11543 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
11544 // the rest of the elements. This will be matched as movd/movq/movss/movsd
11545 // depending on what the source datatype is.
11548 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11550 if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
11551 EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
11552 (EltVT == MVT::i16 && Subtarget.hasFP16())) {
11553 assert((VT.is128BitVector() || VT.is256BitVector() ||
11554 VT.is512BitVector()) &&
11555 "Expected an SSE value type!");
11556 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11557 // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
11559 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11562 // We can't directly insert an i8 or i16 into a vector, so zero extend
11563 // it to i32 first.
11564 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
11565 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
11566 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
11567 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
11568 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11569 return DAG.getBitcast(VT, Item);
11573 // Is it a vector logical left shift?
11574 if (NumElems == 2 && Idx == 1 &&
11575 X86::isZeroNode(Op.getOperand(0)) &&
11576 !X86::isZeroNode(Op.getOperand(1))) {
11577 unsigned NumBits = VT.getSizeInBits();
11578 return getVShift(true, VT,
11579 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11580 VT, Op.getOperand(1)),
11581 NumBits/2, DAG, *this, dl);
11584 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
11587 // Otherwise, if this is a vector with i32 or f32 elements, and the element
11588 // is a non-constant being inserted into an element other than the low one,
11589 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
11590 // movd/movss) to move this into the low element, then shuffle it into
11592 if (EVTBits == 32) {
11593 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11594 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
11598 // Splat is obviously ok. Let legalizer expand it to a shuffle.
11599 if (Values.size() == 1) {
11600 if (EVTBits == 32) {
11601 // Instead of a shuffle like this:
11602 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
11603 // Check if it's possible to issue this instead.
11604 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
11605 unsigned Idx = NonZeroMask.countr_zero();
11606 SDValue Item = Op.getOperand(Idx);
11607 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
11608 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
11613 // A vector full of immediates; various special cases are already
11614 // handled, so this is best done with a single constant-pool load.
11615 if (IsAllConstants)
11618 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
11621 // See if we can use a vector load to get all of the elements.
11623 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
11625 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
11629 // If this is a splat of pairs of 32-bit elements, we can use a narrower
11630 // build_vector and broadcast it.
11631 // TODO: We could probably generalize this more.
11632 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
11633 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
11634 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
11635 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
11636 // Make sure all the even/odd operands match.
11637 for (unsigned i = 2; i != NumElems; ++i)
11638 if (Ops[i % 2] != Op.getOperand(i))
11642 if (CanSplat(Op, NumElems, Ops)) {
11643 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
11644 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
11645 // Create a new build vector and cast to v2i64/v2f64.
11646 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
11647 DAG.getBuildVector(NarrowVT, dl, Ops));
11648 // Broadcast from v2i64/v2f64 and cast to final VT.
11649 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
11650 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
11655 // For AVX-length vectors, build the individual 128-bit pieces and use
11656 // shuffles to put them in place.
11657 if (VT.getSizeInBits() > 128) {
11658 MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
11660 // Build both the lower and upper subvector.
11662 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
11663 SDValue Upper = DAG.getBuildVector(
11664 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
11666 // Recreate the wider vector with the lower and upper part.
11667 return concatSubVectors(Lower, Upper, DAG, dl);
11670 // Let legalizer expand 2-wide build_vectors.
11671 if (EVTBits == 64) {
11672 if (NumNonZero == 1) {
11673 // One half is zero or undef.
11674 unsigned Idx = NonZeroMask.countr_zero();
11675 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
11676 Op.getOperand(Idx));
11677 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
11682 // If element VT is < 32 bits, convert it to inserts into a zero vector.
11683 if (EVTBits == 8 && NumElems == 16)
11684 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
11688 if (EltVT == MVT::i16 && NumElems == 8)
11689 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
11693 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
11694 if (EVTBits == 32 && NumElems == 4)
11695 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
11698 // If element VT is == 32 bits, turn it into a number of shuffles.
11699 if (NumElems == 4 && NumZero > 0) {
11700 SmallVector<SDValue, 8> Ops(NumElems);
11701 for (unsigned i = 0; i < 4; ++i) {
11702 bool isZero = !NonZeroMask[i];
11704 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
11706 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11709 for (unsigned i = 0; i < 2; ++i) {
11710 switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
11711 default: llvm_unreachable("Unexpected NonZero count");
11713 Ops[i] = Ops[i*2]; // Must be a zero vector.
11716 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
11719 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11722 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11727 bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
11728 bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
11732 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
11733 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
11735 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
11738 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
11740 // Check for a build vector from mostly shuffle plus few inserting.
11741 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
11744 // For SSE 4.1, use insertps to put the high elements into the low element.
11745 if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
11747 if (!Op.getOperand(0).isUndef())
11748 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
11750 Result = DAG.getUNDEF(VT);
11752 for (unsigned i = 1; i < NumElems; ++i) {
11753 if (Op.getOperand(i).isUndef()) continue;
11754 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
11755 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
11760 // Otherwise, expand into a number of unpckl*, start by extending each of
11761 // our (non-undef) elements to the full vector width with the element in the
11762 // bottom slot of the vector (which generates no code for SSE).
11763 SmallVector<SDValue, 8> Ops(NumElems);
11764 for (unsigned i = 0; i < NumElems; ++i) {
11765 if (!Op.getOperand(i).isUndef())
11766 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11768 Ops[i] = DAG.getUNDEF(VT);
11771 // Next, we iteratively mix elements, e.g. for v4f32:
11772 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
11773 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
11774 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
11775 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
11776 // Generate scaled UNPCKL shuffle mask.
11777 SmallVector<int, 16> Mask;
11778 for(unsigned i = 0; i != Scale; ++i)
11780 for (unsigned i = 0; i != Scale; ++i)
11781 Mask.push_back(NumElems+i);
11782 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
11784 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
11785 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
11790 // 256-bit AVX can use the vinsertf128 instruction
11791 // to create 256-bit vectors from two other 128-bit ones.
11792 // TODO: Detect subvector broadcast here instead of DAG combine?
11793 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
11794 const X86Subtarget &Subtarget) {
11796 MVT ResVT = Op.getSimpleValueType();
11798 assert((ResVT.is256BitVector() ||
11799 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
11801 unsigned NumOperands = Op.getNumOperands();
11802 unsigned NumFreezeUndef = 0;
11803 unsigned NumZero = 0;
11804 unsigned NumNonZero = 0;
11805 unsigned NonZeros = 0;
11806 for (unsigned i = 0; i != NumOperands; ++i) {
11807 SDValue SubVec = Op.getOperand(i);
11808 if (SubVec.isUndef())
11810 if (ISD::isFreezeUndef(SubVec.getNode())) {
11811 // If the freeze(undef) has multiple uses then we must fold to zero.
11812 if (SubVec.hasOneUse())
11817 else if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11820 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11821 NonZeros |= 1 << i;
11826 // If we have more than 2 non-zeros, build each half separately.
11827 if (NumNonZero > 2) {
11828 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11829 ArrayRef<SDUse> Ops = Op->ops();
11830 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11831 Ops.slice(0, NumOperands/2));
11832 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11833 Ops.slice(NumOperands/2));
11834 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11837 // Otherwise, build it up through insert_subvectors.
11838 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
11839 : (NumFreezeUndef ? DAG.getFreeze(DAG.getUNDEF(ResVT))
11840 : DAG.getUNDEF(ResVT));
11842 MVT SubVT = Op.getOperand(0).getSimpleValueType();
11843 unsigned NumSubElems = SubVT.getVectorNumElements();
11844 for (unsigned i = 0; i != NumOperands; ++i) {
11845 if ((NonZeros & (1 << i)) == 0)
11848 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
11850 DAG.getIntPtrConstant(i * NumSubElems, dl));
11856 // Returns true if the given node is a type promotion (by concatenating i1
11857 // zeros) of the result of a node that already zeros all upper bits of
11859 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
11860 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
11861 const X86Subtarget &Subtarget,
11862 SelectionDAG & DAG) {
11864 MVT ResVT = Op.getSimpleValueType();
11865 unsigned NumOperands = Op.getNumOperands();
11867 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11868 "Unexpected number of operands in CONCAT_VECTORS");
11870 uint64_t Zeros = 0;
11871 uint64_t NonZeros = 0;
11872 for (unsigned i = 0; i != NumOperands; ++i) {
11873 SDValue SubVec = Op.getOperand(i);
11874 if (SubVec.isUndef())
11876 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11877 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11878 Zeros |= (uint64_t)1 << i;
11880 NonZeros |= (uint64_t)1 << i;
11883 unsigned NumElems = ResVT.getVectorNumElements();
11885 // If we are inserting non-zero vector and there are zeros in LSBs and undef
11886 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
11887 // insert_subvector will give us two kshifts.
11888 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
11889 Log2_64(NonZeros) != NumOperands - 1) {
11890 MVT ShiftVT = ResVT;
11891 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
11892 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
11893 unsigned Idx = Log2_64(NonZeros);
11894 SDValue SubVec = Op.getOperand(Idx);
11895 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11896 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
11897 DAG.getUNDEF(ShiftVT), SubVec,
11898 DAG.getIntPtrConstant(0, dl));
11899 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
11900 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
11901 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
11902 DAG.getIntPtrConstant(0, dl));
11905 // If there are zero or one non-zeros we can handle this very simply.
11906 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
11907 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
11910 unsigned Idx = Log2_64(NonZeros);
11911 SDValue SubVec = Op.getOperand(Idx);
11912 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11913 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
11914 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
11917 if (NumOperands > 2) {
11918 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11919 ArrayRef<SDUse> Ops = Op->ops();
11920 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11921 Ops.slice(0, NumOperands/2));
11922 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11923 Ops.slice(NumOperands/2));
11924 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11927 assert(llvm::popcount(NonZeros) == 2 && "Simple cases not handled?");
11929 if (ResVT.getVectorNumElements() >= 16)
11930 return Op; // The operation is legal with KUNPCK
11932 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
11933 DAG.getUNDEF(ResVT), Op.getOperand(0),
11934 DAG.getIntPtrConstant(0, dl));
11935 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
11936 DAG.getIntPtrConstant(NumElems/2, dl));
11939 static SDValue LowerCONCAT_VECTORS(SDValue Op,
11940 const X86Subtarget &Subtarget,
11941 SelectionDAG &DAG) {
11942 MVT VT = Op.getSimpleValueType();
11943 if (VT.getVectorElementType() == MVT::i1)
11944 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
11946 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
11947 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
11948 Op.getNumOperands() == 4)));
11950 // AVX can use the vinsertf128 instruction to create 256-bit vectors
11951 // from two other 128-bit ones.
11953 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
11954 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
11957 //===----------------------------------------------------------------------===//
11958 // Vector shuffle lowering
11960 // This is an experimental code path for lowering vector shuffles on x86. It is
11961 // designed to handle arbitrary vector shuffles and blends, gracefully
11962 // degrading performance as necessary. It works hard to recognize idiomatic
11963 // shuffles and lower them to optimal instruction patterns without leaving
11964 // a framework that allows reasonably efficient handling of all vector shuffle
11966 //===----------------------------------------------------------------------===//
11968 /// Tiny helper function to identify a no-op mask.
11970 /// This is a somewhat boring predicate function. It checks whether the mask
11971 /// array input, which is assumed to be a single-input shuffle mask of the kind
11972 /// used by the X86 shuffle instructions (not a fully general
11973 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
11974 /// in-place shuffle are 'no-op's.
11975 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
11976 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11977 assert(Mask[i] >= -1 && "Out of bound mask element!");
11978 if (Mask[i] >= 0 && Mask[i] != i)
11984 /// Test whether there are elements crossing LaneSizeInBits lanes in this
11987 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
11988 /// and we routinely test for these.
11989 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
11990 unsigned ScalarSizeInBits,
11991 ArrayRef<int> Mask) {
11992 assert(LaneSizeInBits && ScalarSizeInBits &&
11993 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11994 "Illegal shuffle lane size");
11995 int LaneSize = LaneSizeInBits / ScalarSizeInBits;
11996 int Size = Mask.size();
11997 for (int i = 0; i < Size; ++i)
11998 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
12003 /// Test whether there are elements crossing 128-bit lanes in this
12005 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
12006 return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
12009 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
12010 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
12011 /// better support 'repeated mask + lane permute' style shuffles.
12012 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
12013 unsigned ScalarSizeInBits,
12014 ArrayRef<int> Mask) {
12015 assert(LaneSizeInBits && ScalarSizeInBits &&
12016 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
12017 "Illegal shuffle lane size");
12018 int NumElts = Mask.size();
12019 int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
12020 int NumLanes = NumElts / NumEltsPerLane;
12021 if (NumLanes > 1) {
12022 for (int i = 0; i != NumLanes; ++i) {
12024 for (int j = 0; j != NumEltsPerLane; ++j) {
12025 int M = Mask[(i * NumEltsPerLane) + j];
12028 int Lane = (M % NumElts) / NumEltsPerLane;
12029 if (SrcLane >= 0 && SrcLane != Lane)
12038 /// Test whether a shuffle mask is equivalent within each sub-lane.
12040 /// This checks a shuffle mask to see if it is performing the same
12041 /// lane-relative shuffle in each sub-lane. This trivially implies
12042 /// that it is also not lane-crossing. It may however involve a blend from the
12043 /// same lane of a second vector.
12045 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
12046 /// non-trivial to compute in the face of undef lanes. The representation is
12047 /// suitable for use with existing 128-bit shuffles as entries from the second
12048 /// vector have been remapped to [LaneSize, 2*LaneSize).
12049 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
12050 ArrayRef<int> Mask,
12051 SmallVectorImpl<int> &RepeatedMask) {
12052 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
12053 RepeatedMask.assign(LaneSize, -1);
12054 int Size = Mask.size();
12055 for (int i = 0; i < Size; ++i) {
12056 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
12059 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
12060 // This entry crosses lanes, so there is no way to model this shuffle.
12063 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
12064 // Adjust second vector indices to start at LaneSize instead of Size.
12065 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
12066 : Mask[i] % LaneSize + LaneSize;
12067 if (RepeatedMask[i % LaneSize] < 0)
12068 // This is the first non-undef entry in this slot of a 128-bit lane.
12069 RepeatedMask[i % LaneSize] = LocalM;
12070 else if (RepeatedMask[i % LaneSize] != LocalM)
12071 // Found a mismatch with the repeated mask.
12077 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
12079 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
12080 SmallVectorImpl<int> &RepeatedMask) {
12081 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
12085 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
12086 SmallVector<int, 32> RepeatedMask;
12087 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
12090 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
12092 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
12093 SmallVectorImpl<int> &RepeatedMask) {
12094 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
12097 /// Test whether a target shuffle mask is equivalent within each sub-lane.
12098 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
12099 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
12100 unsigned EltSizeInBits,
12101 ArrayRef<int> Mask,
12102 SmallVectorImpl<int> &RepeatedMask) {
12103 int LaneSize = LaneSizeInBits / EltSizeInBits;
12104 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
12105 int Size = Mask.size();
12106 for (int i = 0; i < Size; ++i) {
12107 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
12108 if (Mask[i] == SM_SentinelUndef)
12110 if (Mask[i] == SM_SentinelZero) {
12111 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
12113 RepeatedMask[i % LaneSize] = SM_SentinelZero;
12116 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
12117 // This entry crosses lanes, so there is no way to model this shuffle.
12120 // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
12121 // later vector indices to start at multiples of LaneSize instead of Size.
12122 int LaneM = Mask[i] / Size;
12123 int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
12124 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
12125 // This is the first non-undef entry in this slot of a 128-bit lane.
12126 RepeatedMask[i % LaneSize] = LocalM;
12127 else if (RepeatedMask[i % LaneSize] != LocalM)
12128 // Found a mismatch with the repeated mask.
12134 /// Test whether a target shuffle mask is equivalent within each sub-lane.
12135 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
12136 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
12137 ArrayRef<int> Mask,
12138 SmallVectorImpl<int> &RepeatedMask) {
12139 return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
12140 Mask, RepeatedMask);
12143 /// Checks whether the vector elements referenced by two shuffle masks are
12145 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
12146 int Idx, int ExpectedIdx) {
12147 assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
12148 ExpectedIdx < MaskSize && "Out of range element index");
12149 if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
12152 switch (Op.getOpcode()) {
12153 case ISD::BUILD_VECTOR:
12154 // If the values are build vectors, we can look through them to find
12155 // equivalent inputs that make the shuffles equivalent.
12156 // TODO: Handle MaskSize != Op.getNumOperands()?
12157 if (MaskSize == (int)Op.getNumOperands() &&
12158 MaskSize == (int)ExpectedOp.getNumOperands())
12159 return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
12161 case X86ISD::VBROADCAST:
12162 case X86ISD::VBROADCAST_LOAD:
12163 // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
12164 return (Op == ExpectedOp &&
12165 (int)Op.getValueType().getVectorNumElements() == MaskSize);
12168 case X86ISD::FHADD:
12169 case X86ISD::FHSUB:
12170 case X86ISD::PACKSS:
12171 case X86ISD::PACKUS:
12172 // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
12173 // TODO: Handle MaskSize != NumElts?
12174 // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
12175 if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
12176 MVT VT = Op.getSimpleValueType();
12177 int NumElts = VT.getVectorNumElements();
12178 if (MaskSize == NumElts) {
12179 int NumLanes = VT.getSizeInBits() / 128;
12180 int NumEltsPerLane = NumElts / NumLanes;
12181 int NumHalfEltsPerLane = NumEltsPerLane / 2;
12183 (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
12185 (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
12186 return SameLane && SameElt;
12195 /// Checks whether a shuffle mask is equivalent to an explicit list of
12198 /// This is a fast way to test a shuffle mask against a fixed pattern:
12200 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
12202 /// It returns true if the mask is exactly as wide as the argument list, and
12203 /// each element of the mask is either -1 (signifying undef) or the value given
12204 /// in the argument.
12205 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
12206 SDValue V1 = SDValue(),
12207 SDValue V2 = SDValue()) {
12208 int Size = Mask.size();
12209 if (Size != (int)ExpectedMask.size())
12212 for (int i = 0; i < Size; ++i) {
12213 assert(Mask[i] >= -1 && "Out of bound mask element!");
12214 int MaskIdx = Mask[i];
12215 int ExpectedIdx = ExpectedMask[i];
12216 if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
12217 SDValue MaskV = MaskIdx < Size ? V1 : V2;
12218 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12219 MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
12220 ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12221 if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
12228 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
12230 /// The masks must be exactly the same width.
12232 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
12233 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
12235 /// SM_SentinelZero is accepted as a valid negative index but must match in
12236 /// both, or via a known bits test.
12237 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
12238 ArrayRef<int> ExpectedMask,
12239 const SelectionDAG &DAG,
12240 SDValue V1 = SDValue(),
12241 SDValue V2 = SDValue()) {
12242 int Size = Mask.size();
12243 if (Size != (int)ExpectedMask.size())
12245 assert(llvm::all_of(ExpectedMask,
12246 [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
12247 "Illegal target shuffle mask");
12249 // Check for out-of-range target shuffle mask indices.
12250 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
12253 // Don't use V1/V2 if they're not the same size as the shuffle mask type.
12254 if (V1 && (V1.getValueSizeInBits() != VT.getSizeInBits() ||
12255 !V1.getValueType().isVector()))
12257 if (V2 && (V2.getValueSizeInBits() != VT.getSizeInBits() ||
12258 !V2.getValueType().isVector()))
12261 APInt ZeroV1 = APInt::getZero(Size);
12262 APInt ZeroV2 = APInt::getZero(Size);
12264 for (int i = 0; i < Size; ++i) {
12265 int MaskIdx = Mask[i];
12266 int ExpectedIdx = ExpectedMask[i];
12267 if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
12269 if (MaskIdx == SM_SentinelZero) {
12270 // If we need this expected index to be a zero element, then update the
12271 // relevant zero mask and perform the known bits at the end to minimize
12272 // repeated computes.
12273 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12275 Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
12276 int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12277 APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
12278 ZeroMask.setBit(BitIdx);
12282 if (MaskIdx >= 0) {
12283 SDValue MaskV = MaskIdx < Size ? V1 : V2;
12284 SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12285 MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
12286 ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12287 if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
12292 return (ZeroV1.isZero() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
12293 (ZeroV2.isZero() || DAG.MaskedVectorIsZero(V2, ZeroV2));
12296 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
12298 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
12299 const SelectionDAG &DAG) {
12300 if (VT != MVT::v8i32 && VT != MVT::v8f32)
12303 SmallVector<int, 8> Unpcklwd;
12304 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
12305 /* Unary = */ false);
12306 SmallVector<int, 8> Unpckhwd;
12307 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
12308 /* Unary = */ false);
12309 bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
12310 isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
12311 return IsUnpackwdMask;
12314 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
12315 const SelectionDAG &DAG) {
12316 // Create 128-bit vector type based on mask size.
12317 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
12318 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
12320 // We can't assume a canonical shuffle mask, so try the commuted version too.
12321 SmallVector<int, 4> CommutedMask(Mask);
12322 ShuffleVectorSDNode::commuteMask(CommutedMask);
12324 // Match any of unary/binary or low/high.
12325 for (unsigned i = 0; i != 4; ++i) {
12326 SmallVector<int, 16> UnpackMask;
12327 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
12328 if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
12329 isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
12335 /// Return true if a shuffle mask chooses elements identically in its top and
12336 /// bottom halves. For example, any splat mask has the same top and bottom
12337 /// halves. If an element is undefined in only one half of the mask, the halves
12338 /// are not considered identical.
12339 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
12340 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
12341 unsigned HalfSize = Mask.size() / 2;
12342 for (unsigned i = 0; i != HalfSize; ++i) {
12343 if (Mask[i] != Mask[i + HalfSize])
12349 /// Get a 4-lane 8-bit shuffle immediate for a mask.
12351 /// This helper function produces an 8-bit shuffle immediate corresponding to
12352 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
12353 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
12356 /// NB: We rely heavily on "undef" masks preserving the input lane.
12357 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
12358 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
12359 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
12360 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
12361 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
12362 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
12364 // If the mask only uses one non-undef element, then fully 'splat' it to
12365 // improve later broadcast matching.
12366 int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
12367 assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
12369 int FirstElt = Mask[FirstIndex];
12370 if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
12371 return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
12374 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
12375 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
12376 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
12377 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
12381 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
12382 SelectionDAG &DAG) {
12383 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
12386 // The Shuffle result is as follow:
12387 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
12388 // Each Zeroable's element correspond to a particular Mask's element.
12389 // As described in computeZeroableShuffleElements function.
12391 // The function looks for a sub-mask that the nonzero elements are in
12392 // increasing order. If such sub-mask exist. The function returns true.
12393 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
12394 ArrayRef<int> Mask, const EVT &VectorType,
12395 bool &IsZeroSideLeft) {
12396 int NextElement = -1;
12397 // Check if the Mask's nonzero elements are in increasing order.
12398 for (int i = 0, e = Mask.size(); i < e; i++) {
12399 // Checks if the mask's zeros elements are built from only zeros.
12400 assert(Mask[i] >= -1 && "Out of bound mask element!");
12405 // Find the lowest non zero element
12406 if (NextElement < 0) {
12407 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
12408 IsZeroSideLeft = NextElement != 0;
12410 // Exit if the mask's non zero elements are not in increasing order.
12411 if (NextElement != Mask[i])
12418 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
12419 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
12420 ArrayRef<int> Mask, SDValue V1,
12421 SDValue V2, const APInt &Zeroable,
12422 const X86Subtarget &Subtarget,
12423 SelectionDAG &DAG) {
12424 int Size = Mask.size();
12425 int LaneSize = 128 / VT.getScalarSizeInBits();
12426 const int NumBytes = VT.getSizeInBits() / 8;
12427 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
12429 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
12430 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
12431 (Subtarget.hasBWI() && VT.is512BitVector()));
12433 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
12434 // Sign bit set in i8 mask means zero element.
12435 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
12438 for (int i = 0; i < NumBytes; ++i) {
12439 int M = Mask[i / NumEltBytes];
12441 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
12444 if (Zeroable[i / NumEltBytes]) {
12445 PSHUFBMask[i] = ZeroMask;
12449 // We can only use a single input of V1 or V2.
12450 SDValue SrcV = (M >= Size ? V2 : V1);
12451 if (V && V != SrcV)
12456 // PSHUFB can't cross lanes, ensure this doesn't happen.
12457 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
12461 M = M * NumEltBytes + (i % NumEltBytes);
12462 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
12464 assert(V && "Failed to find a source input");
12466 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
12467 return DAG.getBitcast(
12468 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
12469 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
12472 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
12473 const X86Subtarget &Subtarget, SelectionDAG &DAG,
12476 // X86 has dedicated shuffle that can be lowered to VEXPAND
12477 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
12478 const APInt &Zeroable,
12479 ArrayRef<int> Mask, SDValue &V1,
12480 SDValue &V2, SelectionDAG &DAG,
12481 const X86Subtarget &Subtarget) {
12482 bool IsLeftZeroSide = true;
12483 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
12486 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
12488 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
12489 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
12490 unsigned NumElts = VT.getVectorNumElements();
12491 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
12492 "Unexpected number of vector elements");
12493 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
12494 Subtarget, DAG, DL);
12495 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
12496 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
12497 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
12500 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
12501 unsigned &UnpackOpcode, bool IsUnary,
12502 ArrayRef<int> TargetMask, const SDLoc &DL,
12504 const X86Subtarget &Subtarget) {
12505 int NumElts = VT.getVectorNumElements();
12507 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
12508 for (int i = 0; i != NumElts; i += 2) {
12509 int M1 = TargetMask[i + 0];
12510 int M2 = TargetMask[i + 1];
12511 Undef1 &= (SM_SentinelUndef == M1);
12512 Undef2 &= (SM_SentinelUndef == M2);
12513 Zero1 &= isUndefOrZero(M1);
12514 Zero2 &= isUndefOrZero(M2);
12516 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
12517 "Zeroable shuffle detected");
12519 // Attempt to match the target mask against the unpack lo/hi mask patterns.
12520 SmallVector<int, 64> Unpckl, Unpckh;
12521 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
12522 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
12523 (IsUnary ? V1 : V2))) {
12524 UnpackOpcode = X86ISD::UNPCKL;
12525 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12526 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12530 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
12531 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
12532 (IsUnary ? V1 : V2))) {
12533 UnpackOpcode = X86ISD::UNPCKH;
12534 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12535 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12539 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
12540 if (IsUnary && (Zero1 || Zero2)) {
12541 // Don't bother if we can blend instead.
12542 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
12543 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
12546 bool MatchLo = true, MatchHi = true;
12547 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
12548 int M = TargetMask[i];
12550 // Ignore if the input is known to be zero or the index is undef.
12551 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
12552 (M == SM_SentinelUndef))
12555 MatchLo &= (M == Unpckl[i]);
12556 MatchHi &= (M == Unpckh[i]);
12559 if (MatchLo || MatchHi) {
12560 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
12561 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12562 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12567 // If a binary shuffle, commute and try again.
12569 ShuffleVectorSDNode::commuteMask(Unpckl);
12570 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
12571 UnpackOpcode = X86ISD::UNPCKL;
12576 ShuffleVectorSDNode::commuteMask(Unpckh);
12577 if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
12578 UnpackOpcode = X86ISD::UNPCKH;
12587 // X86 has dedicated unpack instructions that can handle specific blend
12588 // operations: UNPCKH and UNPCKL.
12589 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
12590 ArrayRef<int> Mask, SDValue V1, SDValue V2,
12591 SelectionDAG &DAG) {
12592 SmallVector<int, 8> Unpckl;
12593 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
12594 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12595 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
12597 SmallVector<int, 8> Unpckh;
12598 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
12599 if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12600 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
12602 // Commute and try again.
12603 ShuffleVectorSDNode::commuteMask(Unpckl);
12604 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12605 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
12607 ShuffleVectorSDNode::commuteMask(Unpckh);
12608 if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12609 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
12614 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
12615 /// followed by unpack 256-bit.
12616 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
12617 ArrayRef<int> Mask, SDValue V1,
12618 SDValue V2, SelectionDAG &DAG) {
12619 SmallVector<int, 32> Unpckl, Unpckh;
12620 createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
12621 createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
12623 unsigned UnpackOpcode;
12624 if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12625 UnpackOpcode = X86ISD::UNPCKL;
12626 else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12627 UnpackOpcode = X86ISD::UNPCKH;
12631 // This is a "natural" unpack operation (rather than the 128-bit sectored
12632 // operation implemented by AVX). We need to rearrange 64-bit chunks of the
12633 // input in order to use the x86 instruction.
12634 V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
12635 DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
12636 V1 = DAG.getBitcast(VT, V1);
12637 return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
12640 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
12641 // source into the lower elements and zeroing the upper elements.
12642 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
12643 ArrayRef<int> Mask, const APInt &Zeroable,
12644 const X86Subtarget &Subtarget) {
12645 if (!VT.is512BitVector() && !Subtarget.hasVLX())
12648 unsigned NumElts = Mask.size();
12649 unsigned EltSizeInBits = VT.getScalarSizeInBits();
12650 unsigned MaxScale = 64 / EltSizeInBits;
12652 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12653 unsigned SrcEltBits = EltSizeInBits * Scale;
12654 if (SrcEltBits < 32 && !Subtarget.hasBWI())
12656 unsigned NumSrcElts = NumElts / Scale;
12657 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
12659 unsigned UpperElts = NumElts - NumSrcElts;
12660 if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12662 SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
12663 SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
12664 DstVT = MVT::getIntegerVT(EltSizeInBits);
12665 if ((NumSrcElts * EltSizeInBits) >= 128) {
12667 DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
12670 DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
12678 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
12679 // element padding to the final DstVT.
12680 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
12681 const X86Subtarget &Subtarget,
12682 SelectionDAG &DAG, bool ZeroUppers) {
12683 MVT SrcVT = Src.getSimpleValueType();
12684 MVT DstSVT = DstVT.getScalarType();
12685 unsigned NumDstElts = DstVT.getVectorNumElements();
12686 unsigned NumSrcElts = SrcVT.getVectorNumElements();
12687 unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
12689 if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
12692 // Perform a direct ISD::TRUNCATE if possible.
12693 if (NumSrcElts == NumDstElts)
12694 return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
12696 if (NumSrcElts > NumDstElts) {
12697 MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12698 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12699 return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
12702 if ((NumSrcElts * DstEltSizeInBits) >= 128) {
12703 MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12704 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12705 return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12706 DstVT.getSizeInBits());
12709 // Non-VLX targets must truncate from a 512-bit type, so we need to
12710 // widen, truncate and then possibly extract the original subvector.
12711 if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
12712 SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
12713 return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
12716 // Fallback to a X86ISD::VTRUNC, padding if necessary.
12717 MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
12718 SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
12719 if (DstVT != TruncVT)
12720 Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12721 DstVT.getSizeInBits());
12725 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
12727 // An example is the following:
12729 // t0: ch = EntryToken
12730 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
12731 // t25: v4i32 = truncate t2
12732 // t41: v8i16 = bitcast t25
12733 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
12734 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
12735 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
12736 // t18: v2i64 = bitcast t51
12738 // One can just use a single vpmovdw instruction, without avx512vl we need to
12739 // use the zmm variant and extract the lower subvector, padding with zeroes.
12740 // TODO: Merge with lowerShuffleAsVTRUNC.
12741 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
12742 SDValue V2, ArrayRef<int> Mask,
12743 const APInt &Zeroable,
12744 const X86Subtarget &Subtarget,
12745 SelectionDAG &DAG) {
12746 assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
12747 if (!Subtarget.hasAVX512())
12750 unsigned NumElts = VT.getVectorNumElements();
12751 unsigned EltSizeInBits = VT.getScalarSizeInBits();
12752 unsigned MaxScale = 64 / EltSizeInBits;
12753 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12754 unsigned SrcEltBits = EltSizeInBits * Scale;
12755 unsigned NumSrcElts = NumElts / Scale;
12756 unsigned UpperElts = NumElts - NumSrcElts;
12757 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
12758 !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12761 // Attempt to find a matching source truncation, but as a fall back VLX
12762 // cases can use the VPMOV directly.
12763 SDValue Src = peekThroughBitcasts(V1);
12764 if (Src.getOpcode() == ISD::TRUNCATE &&
12765 Src.getScalarValueSizeInBits() == SrcEltBits) {
12766 Src = Src.getOperand(0);
12767 } else if (Subtarget.hasVLX()) {
12768 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12769 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12770 Src = DAG.getBitcast(SrcVT, Src);
12771 // Don't do this if PACKSS/PACKUS could perform it cheaper.
12773 ((DAG.ComputeNumSignBits(Src) > EltSizeInBits) ||
12774 (DAG.computeKnownBits(Src).countMinLeadingZeros() >= EltSizeInBits)))
12779 // VPMOVWB is only available with avx512bw.
12780 if (!Subtarget.hasBWI() && Src.getScalarValueSizeInBits() < 32)
12783 bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
12784 return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12790 // Attempt to match binary shuffle patterns as a truncate.
12791 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
12792 SDValue V2, ArrayRef<int> Mask,
12793 const APInt &Zeroable,
12794 const X86Subtarget &Subtarget,
12795 SelectionDAG &DAG) {
12796 assert((VT.is128BitVector() || VT.is256BitVector()) &&
12797 "Unexpected VTRUNC type");
12798 if (!Subtarget.hasAVX512())
12801 unsigned NumElts = VT.getVectorNumElements();
12802 unsigned EltSizeInBits = VT.getScalarSizeInBits();
12803 unsigned MaxScale = 64 / EltSizeInBits;
12804 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12805 // TODO: Support non-BWI VPMOVWB truncations?
12806 unsigned SrcEltBits = EltSizeInBits * Scale;
12807 if (SrcEltBits < 32 && !Subtarget.hasBWI())
12810 // Match shuffle <Ofs,Ofs+Scale,Ofs+2*Scale,..,undef_or_zero,undef_or_zero>
12811 // Bail if the V2 elements are undef.
12812 unsigned NumHalfSrcElts = NumElts / Scale;
12813 unsigned NumSrcElts = 2 * NumHalfSrcElts;
12814 for (unsigned Offset = 0; Offset != Scale; ++Offset) {
12815 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, Offset, Scale) ||
12816 isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
12819 // The elements beyond the truncation must be undef/zero.
12820 unsigned UpperElts = NumElts - NumSrcElts;
12821 if (UpperElts > 0 &&
12822 !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12825 UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
12827 // For offset truncations, ensure that the concat is cheap.
12829 auto IsCheapConcat = [&](SDValue Lo, SDValue Hi) {
12830 if (Lo.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
12831 Hi.getOpcode() == ISD::EXTRACT_SUBVECTOR)
12832 return Lo.getOperand(0) == Hi.getOperand(0);
12833 if (ISD::isNormalLoad(Lo.getNode()) &&
12834 ISD::isNormalLoad(Hi.getNode())) {
12835 auto *LDLo = cast<LoadSDNode>(Lo);
12836 auto *LDHi = cast<LoadSDNode>(Hi);
12837 return DAG.areNonVolatileConsecutiveLoads(
12838 LDHi, LDLo, Lo.getValueType().getStoreSize(), 1);
12842 if (!IsCheapConcat(V1, V2))
12846 // As we're using both sources then we need to concat them together
12847 // and truncate from the double-sized src.
12848 MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
12849 SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
12851 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12852 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12853 Src = DAG.getBitcast(SrcVT, Src);
12855 // Shift the offset'd elements into place for the truncation.
12856 // TODO: Use getTargetVShiftByConstNode.
12859 X86ISD::VSRLI, DL, SrcVT, Src,
12860 DAG.getTargetConstant(Offset * EltSizeInBits, DL, MVT::i8));
12862 return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12869 /// Check whether a compaction lowering can be done by dropping even/odd
12870 /// elements and compute how many times even/odd elements must be dropped.
12872 /// This handles shuffles which take every Nth element where N is a power of
12873 /// two. Example shuffle masks:
12876 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
12877 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
12878 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
12879 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
12880 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
12881 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
12884 /// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 0, 2, 4, 6, 8, 10, 12, 14
12885 /// N = 1: 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
12887 /// Any of these lanes can of course be undef.
12889 /// This routine only supports N <= 3.
12890 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
12893 /// \returns N above, or the number of times even/odd elements must be dropped
12894 /// if there is such a number. Otherwise returns zero.
12895 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
12896 bool IsSingleInput) {
12897 // The modulus for the shuffle vector entries is based on whether this is
12898 // a single input or not.
12899 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
12900 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
12901 "We should only be called with masks with a power-of-2 size!");
12903 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
12904 int Offset = MatchEven ? 0 : 1;
12906 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
12907 // and 2^3 simultaneously. This is because we may have ambiguity with
12908 // partially undef inputs.
12909 bool ViableForN[3] = {true, true, true};
12911 for (int i = 0, e = Mask.size(); i < e; ++i) {
12912 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
12917 bool IsAnyViable = false;
12918 for (unsigned j = 0; j != std::size(ViableForN); ++j)
12919 if (ViableForN[j]) {
12920 uint64_t N = j + 1;
12922 // The shuffle mask must be equal to (i * 2^N) % M.
12923 if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
12924 IsAnyViable = true;
12926 ViableForN[j] = false;
12928 // Early exit if we exhaust the possible powers of two.
12933 for (unsigned j = 0; j != std::size(ViableForN); ++j)
12937 // Return 0 as there is no viable power of two.
12941 // X86 has dedicated pack instructions that can handle specific truncation
12942 // operations: PACKSS and PACKUS.
12943 // Checks for compaction shuffle masks if MaxStages > 1.
12944 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
12945 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
12946 unsigned &PackOpcode, ArrayRef<int> TargetMask,
12947 const SelectionDAG &DAG,
12948 const X86Subtarget &Subtarget,
12949 unsigned MaxStages = 1) {
12950 unsigned NumElts = VT.getVectorNumElements();
12951 unsigned BitSize = VT.getScalarSizeInBits();
12952 assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
12953 "Illegal maximum compaction");
12955 auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
12956 unsigned NumSrcBits = PackVT.getScalarSizeInBits();
12957 unsigned NumPackedBits = NumSrcBits - BitSize;
12958 N1 = peekThroughBitcasts(N1);
12959 N2 = peekThroughBitcasts(N2);
12960 unsigned NumBits1 = N1.getScalarValueSizeInBits();
12961 unsigned NumBits2 = N2.getScalarValueSizeInBits();
12962 bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
12963 bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
12964 if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
12965 (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
12967 if (Subtarget.hasSSE41() || BitSize == 8) {
12968 APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
12969 if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
12970 (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
12974 PackOpcode = X86ISD::PACKUS;
12978 bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
12979 bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
12980 if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
12981 DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
12982 (N2.isUndef() || IsZero2 || IsAllOnes2 ||
12983 DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
12987 PackOpcode = X86ISD::PACKSS;
12993 // Attempt to match against wider and wider compaction patterns.
12994 for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
12995 MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
12996 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
12998 // Try binary shuffle.
12999 SmallVector<int, 32> BinaryMask;
13000 createPackShuffleMask(VT, BinaryMask, false, NumStages);
13001 if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
13002 if (MatchPACK(V1, V2, PackVT))
13005 // Try unary shuffle.
13006 SmallVector<int, 32> UnaryMask;
13007 createPackShuffleMask(VT, UnaryMask, true, NumStages);
13008 if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
13009 if (MatchPACK(V1, V1, PackVT))
13016 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
13017 SDValue V1, SDValue V2, SelectionDAG &DAG,
13018 const X86Subtarget &Subtarget) {
13020 unsigned PackOpcode;
13021 unsigned SizeBits = VT.getSizeInBits();
13022 unsigned EltBits = VT.getScalarSizeInBits();
13023 unsigned MaxStages = Log2_32(64 / EltBits);
13024 if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
13025 Subtarget, MaxStages))
13028 unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
13029 unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
13031 // Don't lower multi-stage packs on AVX512, truncation is better.
13032 if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
13035 // Pack to the largest type possible:
13036 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
13037 unsigned MaxPackBits = 16;
13038 if (CurrentEltBits > 16 &&
13039 (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
13042 // Repeatedly pack down to the target size.
13044 for (unsigned i = 0; i != NumStages; ++i) {
13045 unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
13046 unsigned NumSrcElts = SizeBits / SrcEltBits;
13047 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
13048 MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
13049 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
13050 MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
13051 Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
13052 DAG.getBitcast(SrcVT, V2));
13054 CurrentEltBits /= 2;
13056 assert(Res && Res.getValueType() == VT &&
13057 "Failed to lower compaction shuffle");
13061 /// Try to emit a bitmask instruction for a shuffle.
13063 /// This handles cases where we can model a blend exactly as a bitmask due to
13064 /// one of the inputs being zeroable.
13065 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
13066 SDValue V2, ArrayRef<int> Mask,
13067 const APInt &Zeroable,
13068 const X86Subtarget &Subtarget,
13069 SelectionDAG &DAG) {
13071 MVT EltVT = VT.getVectorElementType();
13072 SDValue Zero, AllOnes;
13073 // Use f64 if i64 isn't legal.
13074 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
13076 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
13080 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
13081 Zero = DAG.getConstantFP(0.0, DL, EltVT);
13082 APFloat AllOnesValue =
13083 APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
13084 AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
13086 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
13088 Zero = DAG.getConstant(0, DL, EltVT);
13089 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
13092 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
13094 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
13097 if (Mask[i] % Size != i)
13098 return SDValue(); // Not a blend.
13100 V = Mask[i] < Size ? V1 : V2;
13101 else if (V != (Mask[i] < Size ? V1 : V2))
13102 return SDValue(); // Can only let one input through the mask.
13104 VMaskOps[i] = AllOnes;
13107 return SDValue(); // No non-zeroable elements!
13109 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
13110 VMask = DAG.getBitcast(LogicVT, VMask);
13111 V = DAG.getBitcast(LogicVT, V);
13112 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
13113 return DAG.getBitcast(VT, And);
13116 /// Try to emit a blend instruction for a shuffle using bit math.
13118 /// This is used as a fallback approach when first class blend instructions are
13119 /// unavailable. Currently it is only suitable for integer vectors, but could
13120 /// be generalized for floating point vectors if desirable.
13121 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
13122 SDValue V2, ArrayRef<int> Mask,
13123 SelectionDAG &DAG) {
13124 assert(VT.isInteger() && "Only supports integer vector types!");
13125 MVT EltVT = VT.getVectorElementType();
13126 SDValue Zero = DAG.getConstant(0, DL, EltVT);
13127 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
13128 SmallVector<SDValue, 16> MaskOps;
13129 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
13130 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
13131 return SDValue(); // Shuffled input!
13132 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
13135 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
13136 return getBitSelect(DL, VT, V1, V2, V1Mask, DAG);
13139 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
13140 SDValue PreservedSrc,
13141 const X86Subtarget &Subtarget,
13142 SelectionDAG &DAG);
13144 static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
13145 MutableArrayRef<int> Mask,
13146 const APInt &Zeroable, bool &ForceV1Zero,
13147 bool &ForceV2Zero, uint64_t &BlendMask) {
13148 bool V1IsZeroOrUndef =
13149 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
13150 bool V2IsZeroOrUndef =
13151 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
13154 ForceV1Zero = false, ForceV2Zero = false;
13155 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
13157 int NumElts = Mask.size();
13158 int NumLanes = VT.getSizeInBits() / 128;
13159 int NumEltsPerLane = NumElts / NumLanes;
13160 assert((NumLanes * NumEltsPerLane) == NumElts && "Value type mismatch");
13162 // For 32/64-bit elements, if we only reference one input (plus any undefs),
13163 // then ensure the blend mask part for that lane just references that input.
13164 bool ForceWholeLaneMasks =
13165 VT.is256BitVector() && VT.getScalarSizeInBits() >= 32;
13167 // Attempt to generate the binary blend mask. If an input is zero then
13168 // we can use any lane.
13169 for (int Lane = 0; Lane != NumLanes; ++Lane) {
13170 // Keep track of the inputs used per lane.
13171 bool LaneV1InUse = false;
13172 bool LaneV2InUse = false;
13173 uint64_t LaneBlendMask = 0;
13174 for (int LaneElt = 0; LaneElt != NumEltsPerLane; ++LaneElt) {
13175 int Elt = (Lane * NumEltsPerLane) + LaneElt;
13177 if (M == SM_SentinelUndef)
13179 if (M == Elt || (0 <= M && M < NumElts &&
13180 IsElementEquivalent(NumElts, V1, V1, M, Elt))) {
13182 LaneV1InUse = true;
13185 if (M == (Elt + NumElts) ||
13187 IsElementEquivalent(NumElts, V2, V2, M - NumElts, Elt))) {
13188 LaneBlendMask |= 1ull << LaneElt;
13189 Mask[Elt] = Elt + NumElts;
13190 LaneV2InUse = true;
13193 if (Zeroable[Elt]) {
13194 if (V1IsZeroOrUndef) {
13195 ForceV1Zero = true;
13197 LaneV1InUse = true;
13200 if (V2IsZeroOrUndef) {
13201 ForceV2Zero = true;
13202 LaneBlendMask |= 1ull << LaneElt;
13203 Mask[Elt] = Elt + NumElts;
13204 LaneV2InUse = true;
13211 // If we only used V2 then splat the lane blend mask to avoid any demanded
13212 // elts from V1 in this lane (the V1 equivalent is implicit with a zero
13213 // blend mask bit).
13214 if (ForceWholeLaneMasks && LaneV2InUse && !LaneV1InUse)
13215 LaneBlendMask = (1ull << NumEltsPerLane) - 1;
13217 BlendMask |= LaneBlendMask << (Lane * NumEltsPerLane);
13222 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
13224 uint64_t ScaledMask = 0;
13225 for (int i = 0; i != Size; ++i)
13226 if (BlendMask & (1ull << i))
13227 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
13231 /// Try to emit a blend instruction for a shuffle.
13233 /// This doesn't do any checks for the availability of instructions for blending
13234 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
13235 /// be matched in the backend with the type given. What it does check for is
13236 /// that the shuffle mask is a blend, or convertible into a blend with zero.
13237 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
13238 SDValue V2, ArrayRef<int> Original,
13239 const APInt &Zeroable,
13240 const X86Subtarget &Subtarget,
13241 SelectionDAG &DAG) {
13242 uint64_t BlendMask = 0;
13243 bool ForceV1Zero = false, ForceV2Zero = false;
13244 SmallVector<int, 64> Mask(Original);
13245 if (!matchShuffleAsBlend(VT, V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
13249 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
13251 V1 = getZeroVector(VT, Subtarget, DAG, DL);
13253 V2 = getZeroVector(VT, Subtarget, DAG, DL);
13255 unsigned NumElts = VT.getVectorNumElements();
13257 switch (VT.SimpleTy) {
13260 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
13264 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
13271 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
13272 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
13273 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
13274 case MVT::v16i16: {
13275 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
13276 SmallVector<int, 8> RepeatedMask;
13277 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
13278 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
13279 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
13281 for (int i = 0; i < 8; ++i)
13282 if (RepeatedMask[i] >= 8)
13283 BlendMask |= 1ull << i;
13284 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13285 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
13287 // Use PBLENDW for lower/upper lanes and then blend lanes.
13288 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
13289 // merge to VSELECT where useful.
13290 uint64_t LoMask = BlendMask & 0xFF;
13291 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
13292 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
13293 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13294 DAG.getTargetConstant(LoMask, DL, MVT::i8));
13295 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13296 DAG.getTargetConstant(HiMask, DL, MVT::i8));
13297 return DAG.getVectorShuffle(
13298 MVT::v16i16, DL, Lo, Hi,
13299 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
13304 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
13307 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
13309 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
13310 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
13314 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
13315 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
13316 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
13317 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
13320 // If we have VPTERNLOG, we can use that as a bit blend.
13321 if (Subtarget.hasVLX())
13322 if (SDValue BitBlend =
13323 lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
13326 // Scale the blend by the number of bytes per element.
13327 int Scale = VT.getScalarSizeInBits() / 8;
13329 // This form of blend is always done on bytes. Compute the byte vector
13331 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13333 // x86 allows load folding with blendvb from the 2nd source operand. But
13334 // we are still using LLVM select here (see comment below), so that's V1.
13335 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
13336 // allow that load-folding possibility.
13337 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
13338 ShuffleVectorSDNode::commuteMask(Mask);
13342 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
13343 // mix of LLVM's code generator and the x86 backend. We tell the code
13344 // generator that boolean values in the elements of an x86 vector register
13345 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
13346 // mapping a select to operand #1, and 'false' mapping to operand #2. The
13347 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
13348 // of the element (the remaining are ignored) and 0 in that high bit would
13349 // mean operand #1 while 1 in the high bit would mean operand #2. So while
13350 // the LLVM model for boolean values in vector elements gets the relevant
13351 // bit set, it is set backwards and over constrained relative to x86's
13353 SmallVector<SDValue, 32> VSELECTMask;
13354 for (int i = 0, Size = Mask.size(); i < Size; ++i)
13355 for (int j = 0; j < Scale; ++j)
13356 VSELECTMask.push_back(
13357 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
13358 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
13361 V1 = DAG.getBitcast(BlendVT, V1);
13362 V2 = DAG.getBitcast(BlendVT, V2);
13363 return DAG.getBitcast(
13365 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
13374 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
13375 bool OptForSize = DAG.shouldOptForSize();
13377 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
13382 // Otherwise load an immediate into a GPR, cast to k-register, and use a
13384 MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
13385 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
13386 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
13389 llvm_unreachable("Not a supported integer vector type!");
13393 /// Try to lower as a blend of elements from two inputs followed by
13394 /// a single-input permutation.
13396 /// This matches the pattern where we can blend elements from two inputs and
13397 /// then reduce the shuffle to a single-input permutation.
13398 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
13399 SDValue V1, SDValue V2,
13400 ArrayRef<int> Mask,
13402 bool ImmBlends = false) {
13403 // We build up the blend mask while checking whether a blend is a viable way
13404 // to reduce the shuffle.
13405 SmallVector<int, 32> BlendMask(Mask.size(), -1);
13406 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
13408 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
13412 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
13414 if (BlendMask[Mask[i] % Size] < 0)
13415 BlendMask[Mask[i] % Size] = Mask[i];
13416 else if (BlendMask[Mask[i] % Size] != Mask[i])
13417 return SDValue(); // Can't blend in the needed input!
13419 PermuteMask[i] = Mask[i] % Size;
13422 // If only immediate blends, then bail if the blend mask can't be widened to
13424 unsigned EltSize = VT.getScalarSizeInBits();
13425 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
13428 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
13429 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
13432 /// Try to lower as an unpack of elements from two inputs followed by
13433 /// a single-input permutation.
13435 /// This matches the pattern where we can unpack elements from two inputs and
13436 /// then reduce the shuffle to a single-input (wider) permutation.
13437 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
13438 SDValue V1, SDValue V2,
13439 ArrayRef<int> Mask,
13440 SelectionDAG &DAG) {
13441 int NumElts = Mask.size();
13442 int NumLanes = VT.getSizeInBits() / 128;
13443 int NumLaneElts = NumElts / NumLanes;
13444 int NumHalfLaneElts = NumLaneElts / 2;
13446 bool MatchLo = true, MatchHi = true;
13447 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
13449 // Determine UNPCKL/UNPCKH type and operand order.
13450 for (int Elt = 0; Elt != NumElts; ++Elt) {
13455 // Normalize the mask value depending on whether it's V1 or V2.
13457 SDValue &Op = Ops[Elt & 1];
13458 if (M < NumElts && (Op.isUndef() || Op == V1))
13460 else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
13466 bool MatchLoAnyLane = false, MatchHiAnyLane = false;
13467 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
13468 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
13469 MatchLoAnyLane |= isUndefOrInRange(NormM, Lo, Mid);
13470 MatchHiAnyLane |= isUndefOrInRange(NormM, Mid, Hi);
13471 if (MatchLoAnyLane || MatchHiAnyLane) {
13472 assert((MatchLoAnyLane ^ MatchHiAnyLane) &&
13473 "Failed to match UNPCKLO/UNPCKHI");
13477 MatchLo &= MatchLoAnyLane;
13478 MatchHi &= MatchHiAnyLane;
13479 if (!MatchLo && !MatchHi)
13482 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
13484 // Element indices have changed after unpacking. Calculate permute mask
13485 // so that they will be put back to the position as dictated by the
13486 // original shuffle mask indices.
13487 SmallVector<int, 32> PermuteMask(NumElts, -1);
13488 for (int Elt = 0; Elt != NumElts; ++Elt) {
13495 bool IsFirstOp = M < NumElts;
13497 NumLaneElts * (NormM / NumLaneElts) + (2 * (NormM % NumHalfLaneElts));
13498 if ((IsFirstOp && V1 == Ops[0]) || (!IsFirstOp && V2 == Ops[0]))
13499 PermuteMask[Elt] = BaseMaskElt;
13500 else if ((IsFirstOp && V1 == Ops[1]) || (!IsFirstOp && V2 == Ops[1]))
13501 PermuteMask[Elt] = BaseMaskElt + 1;
13502 assert(PermuteMask[Elt] != -1 &&
13503 "Input mask element is defined but failed to assign permute mask");
13506 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
13507 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
13508 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
13511 /// Try to lower a shuffle as a permute of the inputs followed by an
13512 /// UNPCK instruction.
13514 /// This specifically targets cases where we end up with alternating between
13515 /// the two inputs, and so can permute them into something that feeds a single
13516 /// UNPCK instruction. Note that this routine only targets integer vectors
13517 /// because for floating point vectors we have a generalized SHUFPS lowering
13518 /// strategy that handles everything that doesn't *exactly* match an unpack,
13519 /// making this clever lowering unnecessary.
13520 static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
13521 SDValue V1, SDValue V2,
13522 ArrayRef<int> Mask,
13523 const X86Subtarget &Subtarget,
13524 SelectionDAG &DAG) {
13525 int Size = Mask.size();
13526 assert(Mask.size() >= 2 && "Single element masks are invalid.");
13528 // This routine only supports 128-bit integer dual input vectors.
13529 if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
13533 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13535 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13537 bool UnpackLo = NumLoInputs >= NumHiInputs;
13539 auto TryUnpack = [&](int ScalarSize, int Scale) {
13540 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13541 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13543 for (int i = 0; i < Size; ++i) {
13547 // Each element of the unpack contains Scale elements from this mask.
13548 int UnpackIdx = i / Scale;
13550 // We only handle the case where V1 feeds the first slots of the unpack.
13551 // We rely on canonicalization to ensure this is the case.
13552 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13555 // Setup the mask for this input. The indexing is tricky as we have to
13556 // handle the unpack stride.
13557 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13558 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13562 // If we will have to shuffle both inputs to use the unpack, check whether
13563 // we can just unpack first and shuffle the result. If so, skip this unpack.
13564 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13565 !isNoopShuffleMask(V2Mask))
13568 // Shuffle the inputs into place.
13569 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13570 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13572 // Cast the inputs to the type we will use to unpack them.
13574 MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13575 V1 = DAG.getBitcast(UnpackVT, V1);
13576 V2 = DAG.getBitcast(UnpackVT, V2);
13578 // Unpack the inputs and cast the result back to the desired type.
13579 return DAG.getBitcast(
13580 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13581 UnpackVT, V1, V2));
13584 // We try each unpack from the largest to the smallest to try and find one
13585 // that fits this mask.
13586 int OrigScalarSize = VT.getScalarSizeInBits();
13587 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13588 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13591 // If we're shuffling with a zero vector then we're better off not doing
13592 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13593 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13594 ISD::isBuildVectorAllZeros(V2.getNode()))
13597 // If none of the unpack-rooted lowerings worked (or were profitable) try an
13599 if (NumLoInputs == 0 || NumHiInputs == 0) {
13600 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13601 "We have to have *some* inputs!");
13602 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13604 // FIXME: We could consider the total complexity of the permute of each
13605 // possible unpacking. Or at the least we should consider how many
13606 // half-crossings are created.
13607 // FIXME: We could consider commuting the unpacks.
13609 SmallVector<int, 32> PermMask((unsigned)Size, -1);
13610 for (int i = 0; i < Size; ++i) {
13614 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13617 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13619 return DAG.getVectorShuffle(
13621 DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, DL, VT,
13623 DAG.getUNDEF(VT), PermMask);
13629 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
13630 /// permuting the elements of the result in place.
13631 static SDValue lowerShuffleAsByteRotateAndPermute(
13632 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13633 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13634 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
13635 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
13636 (VT.is512BitVector() && !Subtarget.hasBWI()))
13639 // We don't currently support lane crossing permutes.
13640 if (is128BitLaneCrossingShuffleMask(VT, Mask))
13643 int Scale = VT.getScalarSizeInBits() / 8;
13644 int NumLanes = VT.getSizeInBits() / 128;
13645 int NumElts = VT.getVectorNumElements();
13646 int NumEltsPerLane = NumElts / NumLanes;
13648 // Determine range of mask elts.
13649 bool Blend1 = true;
13650 bool Blend2 = true;
13651 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
13652 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
13653 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13654 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13655 int M = Mask[Lane + Elt];
13659 Blend1 &= (M == (Lane + Elt));
13660 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13661 M = M % NumEltsPerLane;
13662 Range1.first = std::min(Range1.first, M);
13663 Range1.second = std::max(Range1.second, M);
13666 Blend2 &= (M == (Lane + Elt));
13667 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13668 M = M % NumEltsPerLane;
13669 Range2.first = std::min(Range2.first, M);
13670 Range2.second = std::max(Range2.second, M);
13675 // Bail if we don't need both elements.
13676 // TODO - it might be worth doing this for unary shuffles if the permute
13678 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
13679 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
13682 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
13685 // Rotate the 2 ops so we can access both ranges, then permute the result.
13686 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
13687 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13688 SDValue Rotate = DAG.getBitcast(
13689 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
13690 DAG.getBitcast(ByteVT, Lo),
13691 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
13692 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
13693 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13694 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13695 int M = Mask[Lane + Elt];
13699 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
13701 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
13704 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
13707 // Check if the ranges are small enough to rotate from either direction.
13708 if (Range2.second < Range1.first)
13709 return RotateAndPermute(V1, V2, Range1.first, 0);
13710 if (Range1.second < Range2.first)
13711 return RotateAndPermute(V2, V1, Range2.first, NumElts);
13715 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
13716 return isUndefOrEqual(Mask, 0);
13719 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
13720 return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
13723 /// Check if the Mask consists of the same element repeated multiple times.
13724 static bool isSingleElementRepeatedMask(ArrayRef<int> Mask) {
13725 size_t NumUndefs = 0;
13726 std::optional<int> UniqueElt;
13727 for (int Elt : Mask) {
13728 if (Elt == SM_SentinelUndef) {
13732 if (UniqueElt.has_value() && UniqueElt.value() != Elt)
13736 // Make sure the element is repeated enough times by checking the number of
13737 // undefs is small.
13738 return NumUndefs <= Mask.size() / 2 && UniqueElt.has_value();
13741 /// Generic routine to decompose a shuffle and blend into independent
13742 /// blends and permutes.
13744 /// This matches the extremely common pattern for handling combined
13745 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
13746 /// operations. It will try to pick the best arrangement of shuffles and
13747 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
13748 static SDValue lowerShuffleAsDecomposedShuffleMerge(
13749 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13750 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13751 int NumElts = Mask.size();
13752 int NumLanes = VT.getSizeInBits() / 128;
13753 int NumEltsPerLane = NumElts / NumLanes;
13755 // Shuffle the input elements into the desired positions in V1 and V2 and
13756 // unpack/blend them together.
13757 bool IsAlternating = true;
13758 SmallVector<int, 32> V1Mask(NumElts, -1);
13759 SmallVector<int, 32> V2Mask(NumElts, -1);
13760 SmallVector<int, 32> FinalMask(NumElts, -1);
13761 for (int i = 0; i < NumElts; ++i) {
13763 if (M >= 0 && M < NumElts) {
13766 IsAlternating &= (i & 1) == 0;
13767 } else if (M >= NumElts) {
13768 V2Mask[i] = M - NumElts;
13769 FinalMask[i] = i + NumElts;
13770 IsAlternating &= (i & 1) == 1;
13774 // If we effectively only demand the 0'th element of \p Input, and not only
13775 // as 0'th element, then broadcast said input,
13776 // and change \p InputMask to be a no-op (identity) mask.
13777 auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
13778 &DAG](SDValue &Input,
13779 MutableArrayRef<int> InputMask) {
13780 unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
13781 if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
13782 !X86::mayFoldLoad(Input, Subtarget)))
13784 if (isNoopShuffleMask(InputMask))
13786 assert(isBroadcastShuffleMask(InputMask) &&
13787 "Expected to demand only the 0'th element.");
13788 Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
13789 for (auto I : enumerate(InputMask)) {
13790 int &InputMaskElt = I.value();
13791 if (InputMaskElt >= 0)
13792 InputMaskElt = I.index();
13796 // Currently, we may need to produce one shuffle per input, and blend results.
13797 // It is possible that the shuffle for one of the inputs is already a no-op.
13798 // See if we can simplify non-no-op shuffles into broadcasts,
13799 // which we consider to be strictly better than an arbitrary shuffle.
13800 if (isNoopOrBroadcastShuffleMask(V1Mask) &&
13801 isNoopOrBroadcastShuffleMask(V2Mask)) {
13802 canonicalizeBroadcastableInput(V1, V1Mask);
13803 canonicalizeBroadcastableInput(V2, V2Mask);
13806 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
13807 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
13808 // the shuffle may be able to fold with a load or other benefit. However, when
13809 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
13810 // pre-shuffle first is a better strategy.
13811 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
13812 // Only prefer immediate blends to unpack/rotate.
13813 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13816 // If either input vector provides only a single element which is repeated
13817 // multiple times, unpacking from both input vectors would generate worse
13819 // t5: v16i8 = vector_shuffle<16,0,16,1,16,2,16,3,16,4,16,5,16,6,16,7> t2, t4
13820 // it is better to process t4 first to create a vector of t4[0], then unpack
13821 // that vector with t2.
13822 if (!isSingleElementRepeatedMask(V1Mask) &&
13823 !isSingleElementRepeatedMask(V2Mask))
13824 if (SDValue UnpackPerm =
13825 lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG))
13827 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
13828 DL, VT, V1, V2, Mask, Subtarget, DAG))
13830 // Unpack/rotate failed - try again with variable blends.
13831 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13834 if (VT.getScalarSizeInBits() >= 32)
13835 if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack(
13836 DL, VT, V1, V2, Mask, Subtarget, DAG))
13840 // If the final mask is an alternating blend of vXi8/vXi16, convert to an
13841 // UNPCKL(SHUFFLE, SHUFFLE) pattern.
13842 // TODO: It doesn't have to be alternating - but each lane mustn't have more
13843 // than half the elements coming from each source.
13844 if (IsAlternating && VT.getScalarSizeInBits() < 32) {
13845 V1Mask.assign(NumElts, -1);
13846 V2Mask.assign(NumElts, -1);
13847 FinalMask.assign(NumElts, -1);
13848 for (int i = 0; i != NumElts; i += NumEltsPerLane)
13849 for (int j = 0; j != NumEltsPerLane; ++j) {
13850 int M = Mask[i + j];
13851 if (M >= 0 && M < NumElts) {
13852 V1Mask[i + (j / 2)] = M;
13853 FinalMask[i + j] = i + (j / 2);
13854 } else if (M >= NumElts) {
13855 V2Mask[i + (j / 2)] = M - NumElts;
13856 FinalMask[i + j] = i + (j / 2) + NumElts;
13861 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13862 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13863 return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
13866 /// Try to lower a vector shuffle as a bit rotation.
13868 /// Look for a repeated rotation pattern in each sub group.
13869 /// Returns a ISD::ROTL element rotation amount or -1 if failed.
13870 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
13871 int NumElts = Mask.size();
13872 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
13874 int RotateAmt = -1;
13875 for (int i = 0; i != NumElts; i += NumSubElts) {
13876 for (int j = 0; j != NumSubElts; ++j) {
13877 int M = Mask[i + j];
13880 if (!isInRange(M, i, i + NumSubElts))
13882 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
13883 if (0 <= RotateAmt && Offset != RotateAmt)
13885 RotateAmt = Offset;
13891 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
13892 const X86Subtarget &Subtarget,
13893 ArrayRef<int> Mask) {
13894 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13895 assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
13897 // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
13898 int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
13899 int MaxSubElts = 64 / EltSizeInBits;
13900 for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
13901 int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
13905 int NumElts = Mask.size();
13906 MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
13907 RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
13908 return RotateAmt * EltSizeInBits;
13914 /// Lower shuffle using X86ISD::VROTLI rotations.
13915 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
13916 ArrayRef<int> Mask,
13917 const X86Subtarget &Subtarget,
13918 SelectionDAG &DAG) {
13919 // Only XOP + AVX512 targets have bit rotation instructions.
13920 // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
13922 (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
13923 if (!IsLegal && Subtarget.hasSSE3())
13927 int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
13932 // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
13933 // expanded to OR(SRL,SHL), will be more efficient, but if they can
13934 // widen to vXi16 or more then existing lowering should will be better.
13936 if ((RotateAmt % 16) == 0)
13938 // TODO: Use getTargetVShiftByConstNode.
13939 unsigned ShlAmt = RotateAmt;
13940 unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
13941 V1 = DAG.getBitcast(RotateVT, V1);
13942 SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
13943 DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
13944 SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
13945 DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
13946 SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
13947 return DAG.getBitcast(VT, Rot);
13951 DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
13952 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
13953 return DAG.getBitcast(VT, Rot);
13956 /// Try to match a vector shuffle as an element rotation.
13958 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
13959 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
13960 ArrayRef<int> Mask) {
13961 int NumElts = Mask.size();
13963 // We need to detect various ways of spelling a rotation:
13964 // [11, 12, 13, 14, 15, 0, 1, 2]
13965 // [-1, 12, 13, 14, -1, -1, 1, -1]
13966 // [-1, -1, -1, -1, -1, -1, 1, 2]
13967 // [ 3, 4, 5, 6, 7, 8, 9, 10]
13968 // [-1, 4, 5, 6, -1, -1, 9, -1]
13969 // [-1, 4, 5, 6, -1, -1, -1, -1]
13972 for (int i = 0; i < NumElts; ++i) {
13974 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
13975 "Unexpected mask index.");
13979 // Determine where a rotated vector would have started.
13980 int StartIdx = i - (M % NumElts);
13982 // The identity rotation isn't interesting, stop.
13985 // If we found the tail of a vector the rotation must be the missing
13986 // front. If we found the head of a vector, it must be how much of the
13988 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
13991 Rotation = CandidateRotation;
13992 else if (Rotation != CandidateRotation)
13993 // The rotations don't match, so we can't match this mask.
13996 // Compute which value this mask is pointing at.
13997 SDValue MaskV = M < NumElts ? V1 : V2;
13999 // Compute which of the two target values this index should be assigned
14000 // to. This reflects whether the high elements are remaining or the low
14001 // elements are remaining.
14002 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
14004 // Either set up this value if we've not encountered it before, or check
14005 // that it remains consistent.
14008 else if (TargetV != MaskV)
14009 // This may be a rotation, but it pulls from the inputs in some
14010 // unsupported interleaving.
14014 // Check that we successfully analyzed the mask, and normalize the results.
14015 assert(Rotation != 0 && "Failed to locate a viable rotation!");
14016 assert((Lo || Hi) && "Failed to find a rotated input vector!");
14028 /// Try to lower a vector shuffle as a byte rotation.
14030 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
14031 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
14032 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
14033 /// try to generically lower a vector shuffle through such an pattern. It
14034 /// does not check for the profitability of lowering either as PALIGNR or
14035 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
14036 /// This matches shuffle vectors that look like:
14038 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
14040 /// Essentially it concatenates V1 and V2, shifts right by some number of
14041 /// elements, and takes the low elements as the result. Note that while this is
14042 /// specified as a *right shift* because x86 is little-endian, it is a *left
14043 /// rotate* of the vector lanes.
14044 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
14045 ArrayRef<int> Mask) {
14046 // Don't accept any shuffles with zero elements.
14047 if (isAnyZero(Mask))
14050 // PALIGNR works on 128-bit lanes.
14051 SmallVector<int, 16> RepeatedMask;
14052 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
14055 int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
14059 // PALIGNR rotates bytes, so we need to scale the
14060 // rotation based on how many bytes are in the vector lane.
14061 int NumElts = RepeatedMask.size();
14062 int Scale = 16 / NumElts;
14063 return Rotation * Scale;
14066 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
14067 SDValue V2, ArrayRef<int> Mask,
14068 const X86Subtarget &Subtarget,
14069 SelectionDAG &DAG) {
14070 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
14072 SDValue Lo = V1, Hi = V2;
14073 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
14074 if (ByteRotation <= 0)
14077 // Cast the inputs to i8 vector of correct length to match PALIGNR or
14079 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
14080 Lo = DAG.getBitcast(ByteVT, Lo);
14081 Hi = DAG.getBitcast(ByteVT, Hi);
14083 // SSSE3 targets can use the palignr instruction.
14084 if (Subtarget.hasSSSE3()) {
14085 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
14086 "512-bit PALIGNR requires BWI instructions");
14087 return DAG.getBitcast(
14088 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
14089 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
14092 assert(VT.is128BitVector() &&
14093 "Rotate-based lowering only supports 128-bit lowering!");
14094 assert(Mask.size() <= 16 &&
14095 "Can shuffle at most 16 bytes in a 128-bit vector!");
14096 assert(ByteVT == MVT::v16i8 &&
14097 "SSE2 rotate lowering only needed for v16i8!");
14099 // Default SSE2 implementation
14100 int LoByteShift = 16 - ByteRotation;
14101 int HiByteShift = ByteRotation;
14104 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
14105 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
14107 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
14108 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
14109 return DAG.getBitcast(VT,
14110 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
14113 /// Try to lower a vector shuffle as a dword/qword rotation.
14115 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
14116 /// rotation of the concatenation of two vectors; This routine will
14117 /// try to generically lower a vector shuffle through such an pattern.
14119 /// Essentially it concatenates V1 and V2, shifts right by some number of
14120 /// elements, and takes the low elements as the result. Note that while this is
14121 /// specified as a *right shift* because x86 is little-endian, it is a *left
14122 /// rotate* of the vector lanes.
14123 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
14124 SDValue V2, ArrayRef<int> Mask,
14125 const X86Subtarget &Subtarget,
14126 SelectionDAG &DAG) {
14127 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
14128 "Only 32-bit and 64-bit elements are supported!");
14130 // 128/256-bit vectors are only supported with VLX.
14131 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
14132 && "VLX required for 128/256-bit vectors");
14134 SDValue Lo = V1, Hi = V2;
14135 int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
14139 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
14140 DAG.getTargetConstant(Rotation, DL, MVT::i8));
14143 /// Try to lower a vector shuffle as a byte shift sequence.
14144 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
14145 SDValue V2, ArrayRef<int> Mask,
14146 const APInt &Zeroable,
14147 const X86Subtarget &Subtarget,
14148 SelectionDAG &DAG) {
14149 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
14150 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
14152 // We need a shuffle that has zeros at one/both ends and a sequential
14153 // shuffle from one source within.
14154 unsigned ZeroLo = Zeroable.countr_one();
14155 unsigned ZeroHi = Zeroable.countl_one();
14156 if (!ZeroLo && !ZeroHi)
14159 unsigned NumElts = Mask.size();
14160 unsigned Len = NumElts - (ZeroLo + ZeroHi);
14161 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
14164 unsigned Scale = VT.getScalarSizeInBits() / 8;
14165 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
14166 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
14167 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
14170 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
14171 Res = DAG.getBitcast(MVT::v16i8, Res);
14173 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
14174 // inner sequential set of elements, possibly offset:
14175 // 01234567 --> zzzzzz01 --> 1zzzzzzz
14176 // 01234567 --> 4567zzzz --> zzzzz456
14177 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
14179 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
14180 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
14181 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
14182 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
14183 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
14184 } else if (ZeroHi == 0) {
14185 unsigned Shift = Mask[ZeroLo] % NumElts;
14186 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
14187 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
14188 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
14189 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
14190 } else if (!Subtarget.hasSSSE3()) {
14191 // If we don't have PSHUFB then its worth avoiding an AND constant mask
14192 // by performing 3 byte shifts. Shuffle combining can kick in above that.
14193 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
14194 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
14195 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
14196 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
14197 Shift += Mask[ZeroLo] % NumElts;
14198 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
14199 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
14200 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
14201 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
14205 return DAG.getBitcast(VT, Res);
14208 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
14210 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
14211 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
14212 /// matches elements from one of the input vectors shuffled to the left or
14213 /// right with zeroable elements 'shifted in'. It handles both the strictly
14214 /// bit-wise element shifts and the byte shift across an entire 128-bit double
14215 /// quad word lane.
14217 /// PSHL : (little-endian) left bit shift.
14218 /// [ zz, 0, zz, 2 ]
14219 /// [ -1, 4, zz, -1 ]
14220 /// PSRL : (little-endian) right bit shift.
14221 /// [ 1, zz, 3, zz]
14222 /// [ -1, -1, 7, zz]
14223 /// PSLLDQ : (little-endian) left byte shift
14224 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
14225 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
14226 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
14227 /// PSRLDQ : (little-endian) right byte shift
14228 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
14229 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
14230 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
14231 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
14232 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
14233 int MaskOffset, const APInt &Zeroable,
14234 const X86Subtarget &Subtarget) {
14235 int Size = Mask.size();
14236 unsigned SizeInBits = Size * ScalarSizeInBits;
14238 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
14239 for (int i = 0; i < Size; i += Scale)
14240 for (int j = 0; j < Shift; ++j)
14241 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
14247 auto MatchShift = [&](int Shift, int Scale, bool Left) {
14248 for (int i = 0; i != Size; i += Scale) {
14249 unsigned Pos = Left ? i + Shift : i;
14250 unsigned Low = Left ? i : i + Shift;
14251 unsigned Len = Scale - Shift;
14252 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
14256 int ShiftEltBits = ScalarSizeInBits * Scale;
14257 bool ByteShift = ShiftEltBits > 64;
14258 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
14259 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
14260 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
14262 // Normalize the scale for byte shifts to still produce an i64 element
14264 Scale = ByteShift ? Scale / 2 : Scale;
14266 // We need to round trip through the appropriate type for the shift.
14267 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
14268 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
14269 : MVT::getVectorVT(ShiftSVT, Size / Scale);
14270 return (int)ShiftAmt;
14273 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
14274 // keep doubling the size of the integer elements up to that. We can
14275 // then shift the elements of the integer vector by whole multiples of
14276 // their width within the elements of the larger integer vector. Test each
14277 // multiple to see if we can find a match with the moved element indices
14278 // and that the shifted in elements are all zeroable.
14279 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
14280 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
14281 for (int Shift = 1; Shift != Scale; ++Shift)
14282 for (bool Left : {true, false})
14283 if (CheckZeros(Shift, Scale, Left)) {
14284 int ShiftAmt = MatchShift(Shift, Scale, Left);
14293 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
14294 SDValue V2, ArrayRef<int> Mask,
14295 const APInt &Zeroable,
14296 const X86Subtarget &Subtarget,
14297 SelectionDAG &DAG, bool BitwiseOnly) {
14298 int Size = Mask.size();
14299 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14305 // Try to match shuffle against V1 shift.
14306 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
14307 Mask, 0, Zeroable, Subtarget);
14309 // If V1 failed, try to match shuffle against V2 shift.
14310 if (ShiftAmt < 0) {
14311 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
14312 Mask, Size, Zeroable, Subtarget);
14319 if (BitwiseOnly && (Opcode == X86ISD::VSHLDQ || Opcode == X86ISD::VSRLDQ))
14322 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
14323 "Illegal integer vector type");
14324 V = DAG.getBitcast(ShiftVT, V);
14325 V = DAG.getNode(Opcode, DL, ShiftVT, V,
14326 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
14327 return DAG.getBitcast(VT, V);
14330 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
14331 // Remainder of lower half result is zero and upper half is all undef.
14332 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
14333 ArrayRef<int> Mask, uint64_t &BitLen,
14334 uint64_t &BitIdx, const APInt &Zeroable) {
14335 int Size = Mask.size();
14336 int HalfSize = Size / 2;
14337 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14338 assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
14340 // Upper half must be undefined.
14341 if (!isUndefUpperHalf(Mask))
14344 // Determine the extraction length from the part of the
14345 // lower half that isn't zeroable.
14346 int Len = HalfSize;
14347 for (; Len > 0; --Len)
14348 if (!Zeroable[Len - 1])
14350 assert(Len > 0 && "Zeroable shuffle mask");
14352 // Attempt to match first Len sequential elements from the lower half.
14355 for (int i = 0; i != Len; ++i) {
14357 if (M == SM_SentinelUndef)
14359 SDValue &V = (M < Size ? V1 : V2);
14362 // The extracted elements must start at a valid index and all mask
14363 // elements must be in the lower half.
14364 if (i > M || M >= HalfSize)
14367 if (Idx < 0 || (Src == V && Idx == (M - i))) {
14375 if (!Src || Idx < 0)
14378 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
14379 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
14380 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
14385 // INSERTQ: Extract lowest Len elements from lower half of second source and
14386 // insert over first source, starting at Idx.
14387 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
14388 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
14389 ArrayRef<int> Mask, uint64_t &BitLen,
14390 uint64_t &BitIdx) {
14391 int Size = Mask.size();
14392 int HalfSize = Size / 2;
14393 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14395 // Upper half must be undefined.
14396 if (!isUndefUpperHalf(Mask))
14399 for (int Idx = 0; Idx != HalfSize; ++Idx) {
14402 // Attempt to match first source from mask before insertion point.
14403 if (isUndefInRange(Mask, 0, Idx)) {
14405 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
14407 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
14413 // Extend the extraction length looking to match both the insertion of
14414 // the second source and the remaining elements of the first.
14415 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
14417 int Len = Hi - Idx;
14419 // Match insertion.
14420 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
14422 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
14428 // Match the remaining elements of the lower half.
14429 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
14431 } else if ((!Base || (Base == V1)) &&
14432 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
14434 } else if ((!Base || (Base == V2)) &&
14435 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
14442 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
14443 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
14453 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
14454 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
14455 SDValue V2, ArrayRef<int> Mask,
14456 const APInt &Zeroable, SelectionDAG &DAG) {
14457 uint64_t BitLen, BitIdx;
14458 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
14459 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
14460 DAG.getTargetConstant(BitLen, DL, MVT::i8),
14461 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
14463 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
14464 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
14465 V2 ? V2 : DAG.getUNDEF(VT),
14466 DAG.getTargetConstant(BitLen, DL, MVT::i8),
14467 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
14472 /// Lower a vector shuffle as a zero or any extension.
14474 /// Given a specific number of elements, element bit width, and extension
14475 /// stride, produce either a zero or any extension based on the available
14476 /// features of the subtarget. The extended elements are consecutive and
14477 /// begin and can start from an offsetted element index in the input; to
14478 /// avoid excess shuffling the offset must either being in the bottom lane
14479 /// or at the start of a higher lane. All extended elements must be from
14481 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
14482 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
14483 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14484 assert(Scale > 1 && "Need a scale to extend.");
14485 int EltBits = VT.getScalarSizeInBits();
14486 int NumElements = VT.getVectorNumElements();
14487 int NumEltsPerLane = 128 / EltBits;
14488 int OffsetLane = Offset / NumEltsPerLane;
14489 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
14490 "Only 8, 16, and 32 bit elements can be extended.");
14491 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
14492 assert(0 <= Offset && "Extension offset must be positive.");
14493 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
14494 "Extension offset must be in the first lane or start an upper lane.");
14496 // Check that an index is in same lane as the base offset.
14497 auto SafeOffset = [&](int Idx) {
14498 return OffsetLane == (Idx / NumEltsPerLane);
14501 // Shift along an input so that the offset base moves to the first element.
14502 auto ShuffleOffset = [&](SDValue V) {
14506 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
14507 for (int i = 0; i * Scale < NumElements; ++i) {
14508 int SrcIdx = i + Offset;
14509 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
14511 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
14514 // Found a valid a/zext mask! Try various lowering strategies based on the
14515 // input type and available ISA extensions.
14516 if (Subtarget.hasSSE41()) {
14517 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
14518 // PUNPCK will catch this in a later shuffle match.
14519 if (Offset && Scale == 2 && VT.is128BitVector())
14521 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
14522 NumElements / Scale);
14523 InputV = DAG.getBitcast(VT, InputV);
14524 InputV = ShuffleOffset(InputV);
14525 InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
14526 DL, ExtVT, InputV, DAG);
14527 return DAG.getBitcast(VT, InputV);
14530 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
14531 InputV = DAG.getBitcast(VT, InputV);
14533 // For any extends we can cheat for larger element sizes and use shuffle
14534 // instructions that can fold with a load and/or copy.
14535 if (AnyExt && EltBits == 32) {
14536 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
14538 return DAG.getBitcast(
14539 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
14540 DAG.getBitcast(MVT::v4i32, InputV),
14541 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14543 if (AnyExt && EltBits == 16 && Scale > 2) {
14544 int PSHUFDMask[4] = {Offset / 2, -1,
14545 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
14546 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
14547 DAG.getBitcast(MVT::v4i32, InputV),
14548 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
14549 int PSHUFWMask[4] = {1, -1, -1, -1};
14550 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
14551 return DAG.getBitcast(
14552 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
14553 DAG.getBitcast(MVT::v8i16, InputV),
14554 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
14557 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
14559 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
14560 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
14561 assert(VT.is128BitVector() && "Unexpected vector width!");
14563 int LoIdx = Offset * EltBits;
14564 SDValue Lo = DAG.getBitcast(
14565 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
14566 DAG.getTargetConstant(EltBits, DL, MVT::i8),
14567 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
14569 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
14570 return DAG.getBitcast(VT, Lo);
14572 int HiIdx = (Offset + 1) * EltBits;
14573 SDValue Hi = DAG.getBitcast(
14574 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
14575 DAG.getTargetConstant(EltBits, DL, MVT::i8),
14576 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
14577 return DAG.getBitcast(VT,
14578 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
14581 // If this would require more than 2 unpack instructions to expand, use
14582 // pshufb when available. We can only use more than 2 unpack instructions
14583 // when zero extending i8 elements which also makes it easier to use pshufb.
14584 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
14585 assert(NumElements == 16 && "Unexpected byte vector width!");
14586 SDValue PSHUFBMask[16];
14587 for (int i = 0; i < 16; ++i) {
14588 int Idx = Offset + (i / Scale);
14589 if ((i % Scale == 0 && SafeOffset(Idx))) {
14590 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
14594 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
14596 InputV = DAG.getBitcast(MVT::v16i8, InputV);
14597 return DAG.getBitcast(
14598 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
14599 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
14602 // If we are extending from an offset, ensure we start on a boundary that
14603 // we can unpack from.
14604 int AlignToUnpack = Offset % (NumElements / Scale);
14605 if (AlignToUnpack) {
14606 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
14607 for (int i = AlignToUnpack; i < NumElements; ++i)
14608 ShMask[i - AlignToUnpack] = i;
14609 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
14610 Offset -= AlignToUnpack;
14613 // Otherwise emit a sequence of unpacks.
14615 unsigned UnpackLoHi = X86ISD::UNPCKL;
14616 if (Offset >= (NumElements / 2)) {
14617 UnpackLoHi = X86ISD::UNPCKH;
14618 Offset -= (NumElements / 2);
14621 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
14622 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
14623 : getZeroVector(InputVT, Subtarget, DAG, DL);
14624 InputV = DAG.getBitcast(InputVT, InputV);
14625 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
14629 } while (Scale > 1);
14630 return DAG.getBitcast(VT, InputV);
14633 /// Try to lower a vector shuffle as a zero extension on any microarch.
14635 /// This routine will try to do everything in its power to cleverly lower
14636 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
14637 /// check for the profitability of this lowering, it tries to aggressively
14638 /// match this pattern. It will use all of the micro-architectural details it
14639 /// can to emit an efficient lowering. It handles both blends with all-zero
14640 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
14641 /// masking out later).
14643 /// The reason we have dedicated lowering for zext-style shuffles is that they
14644 /// are both incredibly common and often quite performance sensitive.
14645 static SDValue lowerShuffleAsZeroOrAnyExtend(
14646 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14647 const APInt &Zeroable, const X86Subtarget &Subtarget,
14648 SelectionDAG &DAG) {
14649 int Bits = VT.getSizeInBits();
14650 int NumLanes = Bits / 128;
14651 int NumElements = VT.getVectorNumElements();
14652 int NumEltsPerLane = NumElements / NumLanes;
14653 assert(VT.getScalarSizeInBits() <= 32 &&
14654 "Exceeds 32-bit integer zero extension limit");
14655 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
14657 // Define a helper function to check a particular ext-scale and lower to it if
14659 auto Lower = [&](int Scale) -> SDValue {
14661 bool AnyExt = true;
14664 for (int i = 0; i < NumElements; ++i) {
14667 continue; // Valid anywhere but doesn't tell us anything.
14668 if (i % Scale != 0) {
14669 // Each of the extended elements need to be zeroable.
14673 // We no longer are in the anyext case.
14678 // Each of the base elements needs to be consecutive indices into the
14679 // same input vector.
14680 SDValue V = M < NumElements ? V1 : V2;
14681 M = M % NumElements;
14684 Offset = M - (i / Scale);
14685 } else if (InputV != V)
14686 return SDValue(); // Flip-flopping inputs.
14688 // Offset must start in the lowest 128-bit lane or at the start of an
14690 // FIXME: Is it ever worth allowing a negative base offset?
14691 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
14692 (Offset % NumEltsPerLane) == 0))
14695 // If we are offsetting, all referenced entries must come from the same
14697 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
14700 if ((M % NumElements) != (Offset + (i / Scale)))
14701 return SDValue(); // Non-consecutive strided elements.
14705 // If we fail to find an input, we have a zero-shuffle which should always
14706 // have already been handled.
14707 // FIXME: Maybe handle this here in case during blending we end up with one?
14711 // If we are offsetting, don't extend if we only match a single input, we
14712 // can always do better by using a basic PSHUF or PUNPCK.
14713 if (Offset != 0 && Matches < 2)
14716 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
14717 InputV, Mask, Subtarget, DAG);
14720 // The widest scale possible for extending is to a 64-bit integer.
14721 assert(Bits % 64 == 0 &&
14722 "The number of bits in a vector must be divisible by 64 on x86!");
14723 int NumExtElements = Bits / 64;
14725 // Each iteration, try extending the elements half as much, but into twice as
14727 for (; NumExtElements < NumElements; NumExtElements *= 2) {
14728 assert(NumElements % NumExtElements == 0 &&
14729 "The input vector size must be divisible by the extended size.");
14730 if (SDValue V = Lower(NumElements / NumExtElements))
14734 // General extends failed, but 128-bit vectors may be able to use MOVQ.
14738 // Returns one of the source operands if the shuffle can be reduced to a
14739 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
14740 auto CanZExtLowHalf = [&]() {
14741 for (int i = NumElements / 2; i != NumElements; ++i)
14744 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
14746 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
14751 if (SDValue V = CanZExtLowHalf()) {
14752 V = DAG.getBitcast(MVT::v2i64, V);
14753 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
14754 return DAG.getBitcast(VT, V);
14757 // No viable ext lowering found.
14761 /// Try to get a scalar value for a specific element of a vector.
14763 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
14764 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
14765 SelectionDAG &DAG) {
14766 MVT VT = V.getSimpleValueType();
14767 MVT EltVT = VT.getVectorElementType();
14768 V = peekThroughBitcasts(V);
14770 // If the bitcasts shift the element size, we can't extract an equivalent
14771 // element from it.
14772 MVT NewVT = V.getSimpleValueType();
14773 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
14776 if (V.getOpcode() == ISD::BUILD_VECTOR ||
14777 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
14778 // Ensure the scalar operand is the same size as the destination.
14779 // FIXME: Add support for scalar truncation where possible.
14780 SDValue S = V.getOperand(Idx);
14781 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
14782 return DAG.getBitcast(EltVT, S);
14788 /// Helper to test for a load that can be folded with x86 shuffles.
14790 /// This is particularly important because the set of instructions varies
14791 /// significantly based on whether the operand is a load or not.
14792 static bool isShuffleFoldableLoad(SDValue V) {
14793 return V->hasOneUse() &&
14794 ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
14797 template<typename T>
14798 static bool isSoftFP16(T VT, const X86Subtarget &Subtarget) {
14799 return VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16();
14802 template<typename T>
14803 bool X86TargetLowering::isSoftFP16(T VT) const {
14804 return ::isSoftFP16(VT, Subtarget);
14807 /// Try to lower insertion of a single element into a zero vector.
14809 /// This is a common pattern that we have especially efficient patterns to lower
14810 /// across all subtarget feature sets.
14811 static SDValue lowerShuffleAsElementInsertion(
14812 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14813 const APInt &Zeroable, const X86Subtarget &Subtarget,
14814 SelectionDAG &DAG) {
14816 MVT EltVT = VT.getVectorElementType();
14817 unsigned NumElts = VT.getVectorNumElements();
14818 unsigned EltBits = VT.getScalarSizeInBits();
14820 if (isSoftFP16(EltVT, Subtarget))
14824 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
14826 bool IsV1Constant = getTargetConstantFromNode(V1) != nullptr;
14827 bool IsV1Zeroable = true;
14828 for (int i = 0, Size = Mask.size(); i < Size; ++i)
14829 if (i != V2Index && !Zeroable[i]) {
14830 IsV1Zeroable = false;
14834 // Bail if a non-zero V1 isn't used in place.
14835 if (!IsV1Zeroable) {
14836 SmallVector<int, 8> V1Mask(Mask);
14837 V1Mask[V2Index] = -1;
14838 if (!isNoopShuffleMask(V1Mask))
14842 // Check for a single input from a SCALAR_TO_VECTOR node.
14843 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
14844 // all the smarts here sunk into that routine. However, the current
14845 // lowering of BUILD_VECTOR makes that nearly impossible until the old
14846 // vector shuffle lowering is dead.
14847 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
14849 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
14850 // We need to zext the scalar if it is smaller than an i32.
14851 V2S = DAG.getBitcast(EltVT, V2S);
14852 if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
14853 // Using zext to expand a narrow element won't work for non-zero
14854 // insertions. But we can use a masked constant vector if we're
14855 // inserting V2 into the bottom of V1.
14856 if (!IsV1Zeroable && !(IsV1Constant && V2Index == 0))
14859 // Zero-extend directly to i32.
14860 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
14861 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
14863 // If we're inserting into a constant, mask off the inserted index
14864 // and OR with the zero-extended scalar.
14865 if (!IsV1Zeroable) {
14866 SmallVector<APInt> Bits(NumElts, APInt::getAllOnes(EltBits));
14867 Bits[V2Index] = APInt::getZero(EltBits);
14868 SDValue BitMask = getConstVector(Bits, VT, DAG, DL);
14869 V1 = DAG.getNode(ISD::AND, DL, VT, V1, BitMask);
14870 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
14871 V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2));
14872 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
14875 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
14876 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
14877 EltVT == MVT::i16) {
14878 // Either not inserting from the low element of the input or the input
14879 // element size is too small to use VZEXT_MOVL to clear the high bits.
14883 if (!IsV1Zeroable) {
14884 // If V1 can't be treated as a zero vector we have fewer options to lower
14885 // this. We can't support integer vectors or non-zero targets cheaply.
14886 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
14887 if (!VT.isFloatingPoint() || V2Index != 0)
14889 if (!VT.is128BitVector())
14892 // Otherwise, use MOVSD, MOVSS or MOVSH.
14893 unsigned MovOpc = 0;
14894 if (EltVT == MVT::f16)
14895 MovOpc = X86ISD::MOVSH;
14896 else if (EltVT == MVT::f32)
14897 MovOpc = X86ISD::MOVSS;
14898 else if (EltVT == MVT::f64)
14899 MovOpc = X86ISD::MOVSD;
14901 llvm_unreachable("Unsupported floating point element type to handle!");
14902 return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
14905 // This lowering only works for the low element with floating point vectors.
14906 if (VT.isFloatingPoint() && V2Index != 0)
14909 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
14911 V2 = DAG.getBitcast(VT, V2);
14913 if (V2Index != 0) {
14914 // If we have 4 or fewer lanes we can cheaply shuffle the element into
14915 // the desired position. Otherwise it is more efficient to do a vector
14916 // shift left. We know that we can do a vector shift left because all
14917 // the inputs are zero.
14918 if (VT.isFloatingPoint() || NumElts <= 4) {
14919 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
14920 V2Shuffle[V2Index] = 0;
14921 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
14923 V2 = DAG.getBitcast(MVT::v16i8, V2);
14925 X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
14926 DAG.getTargetConstant(V2Index * EltBits / 8, DL, MVT::i8));
14927 V2 = DAG.getBitcast(VT, V2);
14933 /// Try to lower broadcast of a single - truncated - integer element,
14934 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
14936 /// This assumes we have AVX2.
14937 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
14939 const X86Subtarget &Subtarget,
14940 SelectionDAG &DAG) {
14941 assert(Subtarget.hasAVX2() &&
14942 "We can only lower integer broadcasts with AVX2!");
14944 MVT EltVT = VT.getVectorElementType();
14945 MVT V0VT = V0.getSimpleValueType();
14947 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
14948 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
14950 MVT V0EltVT = V0VT.getVectorElementType();
14951 if (!V0EltVT.isInteger())
14954 const unsigned EltSize = EltVT.getSizeInBits();
14955 const unsigned V0EltSize = V0EltVT.getSizeInBits();
14957 // This is only a truncation if the original element type is larger.
14958 if (V0EltSize <= EltSize)
14961 assert(((V0EltSize % EltSize) == 0) &&
14962 "Scalar type sizes must all be powers of 2 on x86!");
14964 const unsigned V0Opc = V0.getOpcode();
14965 const unsigned Scale = V0EltSize / EltSize;
14966 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
14968 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
14969 V0Opc != ISD::BUILD_VECTOR)
14972 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
14974 // If we're extracting non-least-significant bits, shift so we can truncate.
14975 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
14976 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
14977 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
14978 if (const int OffsetIdx = BroadcastIdx % Scale)
14979 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
14980 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
14982 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
14983 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
14986 /// Test whether this can be lowered with a single SHUFPS instruction.
14988 /// This is used to disable more specialized lowerings when the shufps lowering
14989 /// will happen to be efficient.
14990 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
14991 // This routine only handles 128-bit shufps.
14992 assert(Mask.size() == 4 && "Unsupported mask size!");
14993 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
14994 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
14995 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
14996 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
14998 // To lower with a single SHUFPS we need to have the low half and high half
14999 // each requiring a single input.
15000 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
15002 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
15008 /// Test whether the specified input (0 or 1) is in-place blended by the
15011 /// This returns true if the elements from a particular input are already in the
15012 /// slot required by the given mask and require no permutation.
15013 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15014 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
15015 int Size = Mask.size();
15016 for (int i = 0; i < Size; ++i)
15017 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15023 /// If we are extracting two 128-bit halves of a vector and shuffling the
15024 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
15025 /// multi-shuffle lowering.
15026 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
15027 SDValue N1, ArrayRef<int> Mask,
15028 SelectionDAG &DAG) {
15029 MVT VT = N0.getSimpleValueType();
15030 assert((VT.is128BitVector() &&
15031 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
15032 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
15034 // Check that both sources are extracts of the same source vector.
15035 if (N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
15036 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
15037 N0.getOperand(0) != N1.getOperand(0) ||
15038 !N0.hasOneUse() || !N1.hasOneUse())
15041 SDValue WideVec = N0.getOperand(0);
15042 MVT WideVT = WideVec.getSimpleValueType();
15043 if (!WideVT.is256BitVector())
15046 // Match extracts of each half of the wide source vector. Commute the shuffle
15047 // if the extract of the low half is N1.
15048 unsigned NumElts = VT.getVectorNumElements();
15049 SmallVector<int, 4> NewMask(Mask);
15050 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
15051 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
15052 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
15053 ShuffleVectorSDNode::commuteMask(NewMask);
15054 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
15057 // Final bailout: if the mask is simple, we are better off using an extract
15058 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
15059 // because that avoids a constant load from memory.
15060 if (NumElts == 4 &&
15061 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
15064 // Extend the shuffle mask with undef elements.
15065 NewMask.append(NumElts, -1);
15067 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
15068 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
15070 // This is free: ymm -> xmm.
15071 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
15072 DAG.getIntPtrConstant(0, DL));
15075 /// Try to lower broadcast of a single element.
15077 /// For convenience, this code also bundles all of the subtarget feature set
15078 /// filtering. While a little annoying to re-dispatch on type here, there isn't
15079 /// a convenient way to factor it out.
15080 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
15081 SDValue V2, ArrayRef<int> Mask,
15082 const X86Subtarget &Subtarget,
15083 SelectionDAG &DAG) {
15084 MVT EltVT = VT.getVectorElementType();
15085 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
15086 (Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
15087 (Subtarget.hasAVX2() && (VT.isInteger() || EltVT == MVT::f16))))
15090 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
15091 // we can only broadcast from a register with AVX2.
15092 unsigned NumEltBits = VT.getScalarSizeInBits();
15093 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
15095 : X86ISD::VBROADCAST;
15096 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
15098 // Check that the mask is a broadcast.
15099 int BroadcastIdx = getSplatIndex(Mask);
15100 if (BroadcastIdx < 0)
15102 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
15103 "a sorted mask where the broadcast "
15106 // Go up the chain of (vector) values to find a scalar load that we can
15107 // combine with the broadcast.
15108 // TODO: Combine this logic with findEltLoadSrc() used by
15109 // EltsFromConsecutiveLoads().
15110 int BitOffset = BroadcastIdx * NumEltBits;
15113 switch (V.getOpcode()) {
15114 case ISD::BITCAST: {
15115 V = V.getOperand(0);
15118 case ISD::CONCAT_VECTORS: {
15119 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
15120 int OpIdx = BitOffset / OpBitWidth;
15121 V = V.getOperand(OpIdx);
15122 BitOffset %= OpBitWidth;
15125 case ISD::EXTRACT_SUBVECTOR: {
15126 // The extraction index adds to the existing offset.
15127 unsigned EltBitWidth = V.getScalarValueSizeInBits();
15128 unsigned Idx = V.getConstantOperandVal(1);
15129 unsigned BeginOffset = Idx * EltBitWidth;
15130 BitOffset += BeginOffset;
15131 V = V.getOperand(0);
15134 case ISD::INSERT_SUBVECTOR: {
15135 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
15136 int EltBitWidth = VOuter.getScalarValueSizeInBits();
15137 int Idx = (int)V.getConstantOperandVal(2);
15138 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
15139 int BeginOffset = Idx * EltBitWidth;
15140 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
15141 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
15142 BitOffset -= BeginOffset;
15152 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
15153 BroadcastIdx = BitOffset / NumEltBits;
15155 // Do we need to bitcast the source to retrieve the original broadcast index?
15156 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
15158 // Check if this is a broadcast of a scalar. We special case lowering
15159 // for scalars so that we can more effectively fold with loads.
15160 // If the original value has a larger element type than the shuffle, the
15161 // broadcast element is in essence truncated. Make that explicit to ease
15163 if (BitCastSrc && VT.isInteger())
15164 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
15165 DL, VT, V, BroadcastIdx, Subtarget, DAG))
15166 return TruncBroadcast;
15168 // Also check the simpler case, where we can directly reuse the scalar.
15170 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
15171 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
15172 V = V.getOperand(BroadcastIdx);
15174 // If we can't broadcast from a register, check that the input is a load.
15175 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
15177 } else if (ISD::isNormalLoad(V.getNode()) &&
15178 cast<LoadSDNode>(V)->isSimple()) {
15179 // We do not check for one-use of the vector load because a broadcast load
15180 // is expected to be a win for code size, register pressure, and possibly
15181 // uops even if the original vector load is not eliminated.
15183 // Reduce the vector load and shuffle to a broadcasted scalar load.
15184 LoadSDNode *Ld = cast<LoadSDNode>(V);
15185 SDValue BaseAddr = Ld->getOperand(1);
15186 MVT SVT = VT.getScalarType();
15187 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
15188 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
15190 DAG.getMemBasePlusOffset(BaseAddr, TypeSize::Fixed(Offset), DL);
15192 // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
15194 // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
15195 if (Opcode == X86ISD::VBROADCAST) {
15196 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
15197 SDValue Ops[] = {Ld->getChain(), NewAddr};
15198 V = DAG.getMemIntrinsicNode(
15199 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
15200 DAG.getMachineFunction().getMachineMemOperand(
15201 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
15202 DAG.makeEquivalentMemoryOrdering(Ld, V);
15203 return DAG.getBitcast(VT, V);
15205 assert(SVT == MVT::f64 && "Unexpected VT!");
15206 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
15207 DAG.getMachineFunction().getMachineMemOperand(
15208 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
15209 DAG.makeEquivalentMemoryOrdering(Ld, V);
15210 } else if (!BroadcastFromReg) {
15211 // We can't broadcast from a vector register.
15213 } else if (BitOffset != 0) {
15214 // We can only broadcast from the zero-element of a vector register,
15215 // but it can be advantageous to broadcast from the zero-element of a
15217 if (!VT.is256BitVector() && !VT.is512BitVector())
15220 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
15221 if (VT == MVT::v4f64 || VT == MVT::v4i64)
15224 // Only broadcast the zero-element of a 128-bit subvector.
15225 if ((BitOffset % 128) != 0)
15228 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
15229 "Unexpected bit-offset");
15230 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
15231 "Unexpected vector size");
15232 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
15233 V = extract128BitVector(V, ExtractIdx, DAG, DL);
15236 // On AVX we can use VBROADCAST directly for scalar sources.
15237 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
15238 V = DAG.getBitcast(MVT::f64, V);
15239 if (Subtarget.hasAVX()) {
15240 V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
15241 return DAG.getBitcast(VT, V);
15243 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
15246 // If this is a scalar, do the broadcast on this type and bitcast.
15247 if (!V.getValueType().isVector()) {
15248 assert(V.getScalarValueSizeInBits() == NumEltBits &&
15249 "Unexpected scalar size");
15250 MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
15251 VT.getVectorNumElements());
15252 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
15255 // We only support broadcasting from 128-bit vectors to minimize the
15256 // number of patterns we need to deal with in isel. So extract down to
15257 // 128-bits, removing as many bitcasts as possible.
15258 if (V.getValueSizeInBits() > 128)
15259 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
15261 // Otherwise cast V to a vector with the same element type as VT, but
15262 // possibly narrower than VT. Then perform the broadcast.
15263 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
15264 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
15265 return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
15268 // Check for whether we can use INSERTPS to perform the shuffle. We only use
15269 // INSERTPS when the V1 elements are already in the correct locations
15270 // because otherwise we can just always use two SHUFPS instructions which
15271 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
15272 // perform INSERTPS if a single V1 element is out of place and all V2
15273 // elements are zeroable.
15274 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
15275 unsigned &InsertPSMask,
15276 const APInt &Zeroable,
15277 ArrayRef<int> Mask, SelectionDAG &DAG) {
15278 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
15279 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
15280 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15282 // Attempt to match INSERTPS with one element from VA or VB being
15283 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
15285 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
15286 ArrayRef<int> CandidateMask) {
15287 unsigned ZMask = 0;
15288 int VADstIndex = -1;
15289 int VBDstIndex = -1;
15290 bool VAUsedInPlace = false;
15292 for (int i = 0; i < 4; ++i) {
15293 // Synthesize a zero mask from the zeroable elements (includes undefs).
15299 // Flag if we use any VA inputs in place.
15300 if (i == CandidateMask[i]) {
15301 VAUsedInPlace = true;
15305 // We can only insert a single non-zeroable element.
15306 if (VADstIndex >= 0 || VBDstIndex >= 0)
15309 if (CandidateMask[i] < 4) {
15310 // VA input out of place for insertion.
15313 // VB input for insertion.
15318 // Don't bother if we have no (non-zeroable) element for insertion.
15319 if (VADstIndex < 0 && VBDstIndex < 0)
15322 // Determine element insertion src/dst indices. The src index is from the
15323 // start of the inserted vector, not the start of the concatenated vector.
15324 unsigned VBSrcIndex = 0;
15325 if (VADstIndex >= 0) {
15326 // If we have a VA input out of place, we use VA as the V2 element
15327 // insertion and don't use the original V2 at all.
15328 VBSrcIndex = CandidateMask[VADstIndex];
15329 VBDstIndex = VADstIndex;
15332 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
15335 // If no V1 inputs are used in place, then the result is created only from
15336 // the zero mask and the V2 insertion - so remove V1 dependency.
15337 if (!VAUsedInPlace)
15338 VA = DAG.getUNDEF(MVT::v4f32);
15340 // Update V1, V2 and InsertPSMask accordingly.
15344 // Insert the V2 element into the desired position.
15345 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
15346 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
15350 if (matchAsInsertPS(V1, V2, Mask))
15353 // Commute and try again.
15354 SmallVector<int, 4> CommutedMask(Mask);
15355 ShuffleVectorSDNode::commuteMask(CommutedMask);
15356 if (matchAsInsertPS(V2, V1, CommutedMask))
15362 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
15363 ArrayRef<int> Mask, const APInt &Zeroable,
15364 SelectionDAG &DAG) {
15365 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15366 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15368 // Attempt to match the insertps pattern.
15369 unsigned InsertPSMask = 0;
15370 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
15373 // Insert the V2 element into the desired position.
15374 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
15375 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
15378 /// Handle lowering of 2-lane 64-bit floating point shuffles.
15380 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
15381 /// support for floating point shuffles but not integer shuffles. These
15382 /// instructions will incur a domain crossing penalty on some chips though so
15383 /// it is better to avoid lowering through this for integer vectors where
15385 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15386 const APInt &Zeroable, SDValue V1, SDValue V2,
15387 const X86Subtarget &Subtarget,
15388 SelectionDAG &DAG) {
15389 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
15390 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
15391 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
15393 if (V2.isUndef()) {
15394 // Check for being able to broadcast a single element.
15395 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
15396 Mask, Subtarget, DAG))
15399 // Straight shuffle of a single input vector. Simulate this by using the
15400 // single input as both of the "inputs" to this instruction..
15401 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
15403 if (Subtarget.hasAVX()) {
15404 // If we have AVX, we can use VPERMILPS which will allow folding a load
15405 // into the shuffle.
15406 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
15407 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15410 return DAG.getNode(
15411 X86ISD::SHUFP, DL, MVT::v2f64,
15412 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
15413 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
15414 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15416 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
15417 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
15418 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
15419 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
15421 if (Subtarget.hasAVX2())
15422 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15425 // When loading a scalar and then shuffling it into a vector we can often do
15426 // the insertion cheaply.
15427 if (SDValue Insertion = lowerShuffleAsElementInsertion(
15428 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
15430 // Try inverting the insertion since for v2 masks it is easy to do and we
15431 // can't reliably sort the mask one way or the other.
15432 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
15433 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
15434 if (SDValue Insertion = lowerShuffleAsElementInsertion(
15435 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
15438 // Try to use one of the special instruction patterns to handle two common
15439 // blend patterns if a zero-blend above didn't work.
15440 if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
15441 isShuffleEquivalent(Mask, {1, 3}, V1, V2))
15442 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
15443 // We can either use a special instruction to load over the low double or
15444 // to move just the low double.
15445 return DAG.getNode(
15446 X86ISD::MOVSD, DL, MVT::v2f64, V2,
15447 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
15449 if (Subtarget.hasSSE41())
15450 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
15451 Zeroable, Subtarget, DAG))
15454 // Use dedicated unpack instructions for masks that match their pattern.
15455 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
15458 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
15459 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
15460 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15463 /// Handle lowering of 2-lane 64-bit integer shuffles.
15465 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
15466 /// the integer unit to minimize domain crossing penalties. However, for blends
15467 /// it falls back to the floating point shuffle operation with appropriate bit
15469 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15470 const APInt &Zeroable, SDValue V1, SDValue V2,
15471 const X86Subtarget &Subtarget,
15472 SelectionDAG &DAG) {
15473 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
15474 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
15475 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
15477 if (V2.isUndef()) {
15478 // Check for being able to broadcast a single element.
15479 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
15480 Mask, Subtarget, DAG))
15483 // Straight shuffle of a single input vector. For everything from SSE2
15484 // onward this has a single fast instruction with no scary immediates.
15485 // We have to map the mask as it is actually a v4i32 shuffle instruction.
15486 V1 = DAG.getBitcast(MVT::v4i32, V1);
15487 int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
15488 Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
15489 Mask[1] < 0 ? -1 : (Mask[1] * 2),
15490 Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
15491 return DAG.getBitcast(
15493 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15494 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
15496 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
15497 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
15498 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
15499 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
15501 if (Subtarget.hasAVX2())
15502 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15505 // Try to use shift instructions.
15506 if (SDValue Shift =
15507 lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget,
15508 DAG, /*BitwiseOnly*/ false))
15511 // When loading a scalar and then shuffling it into a vector we can often do
15512 // the insertion cheaply.
15513 if (SDValue Insertion = lowerShuffleAsElementInsertion(
15514 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
15516 // Try inverting the insertion since for v2 masks it is easy to do and we
15517 // can't reliably sort the mask one way or the other.
15518 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
15519 if (SDValue Insertion = lowerShuffleAsElementInsertion(
15520 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
15523 // We have different paths for blend lowering, but they all must use the
15524 // *exact* same predicate.
15525 bool IsBlendSupported = Subtarget.hasSSE41();
15526 if (IsBlendSupported)
15527 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
15528 Zeroable, Subtarget, DAG))
15531 // Use dedicated unpack instructions for masks that match their pattern.
15532 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
15535 // Try to use byte rotation instructions.
15536 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15537 if (Subtarget.hasSSSE3()) {
15538 if (Subtarget.hasVLX())
15539 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
15543 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
15548 // If we have direct support for blends, we should lower by decomposing into
15549 // a permute. That will be faster than the domain cross.
15550 if (IsBlendSupported)
15551 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
15554 // We implement this with SHUFPD which is pretty lame because it will likely
15555 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
15556 // However, all the alternatives are still more cycles and newer chips don't
15557 // have this problem. It would be really nice if x86 had better shuffles here.
15558 V1 = DAG.getBitcast(MVT::v2f64, V1);
15559 V2 = DAG.getBitcast(MVT::v2f64, V2);
15560 return DAG.getBitcast(MVT::v2i64,
15561 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
15564 /// Lower a vector shuffle using the SHUFPS instruction.
15566 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
15567 /// It makes no assumptions about whether this is the *best* lowering, it simply
15569 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
15570 ArrayRef<int> Mask, SDValue V1,
15571 SDValue V2, SelectionDAG &DAG) {
15572 SDValue LowV = V1, HighV = V2;
15573 SmallVector<int, 4> NewMask(Mask);
15574 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15576 if (NumV2Elements == 1) {
15577 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
15579 // Compute the index adjacent to V2Index and in the same half by toggling
15581 int V2AdjIndex = V2Index ^ 1;
15583 if (Mask[V2AdjIndex] < 0) {
15584 // Handles all the cases where we have a single V2 element and an undef.
15585 // This will only ever happen in the high lanes because we commute the
15586 // vector otherwise.
15588 std::swap(LowV, HighV);
15589 NewMask[V2Index] -= 4;
15591 // Handle the case where the V2 element ends up adjacent to a V1 element.
15592 // To make this work, blend them together as the first step.
15593 int V1Index = V2AdjIndex;
15594 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
15595 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
15596 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15598 // Now proceed to reconstruct the final blend as we have the necessary
15599 // high or low half formed.
15606 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
15607 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
15609 } else if (NumV2Elements == 2) {
15610 if (Mask[0] < 4 && Mask[1] < 4) {
15611 // Handle the easy case where we have V1 in the low lanes and V2 in the
15615 } else if (Mask[2] < 4 && Mask[3] < 4) {
15616 // We also handle the reversed case because this utility may get called
15617 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
15618 // arrange things in the right direction.
15624 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
15625 // trying to place elements directly, just blend them and set up the final
15626 // shuffle to place them.
15628 // The first two blend mask elements are for V1, the second two are for
15630 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
15631 Mask[2] < 4 ? Mask[2] : Mask[3],
15632 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
15633 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
15634 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15635 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15637 // Now we do a normal shuffle of V1 by giving V1 as both operands to
15640 NewMask[0] = Mask[0] < 4 ? 0 : 2;
15641 NewMask[1] = Mask[0] < 4 ? 2 : 0;
15642 NewMask[2] = Mask[2] < 4 ? 1 : 3;
15643 NewMask[3] = Mask[2] < 4 ? 3 : 1;
15645 } else if (NumV2Elements == 3) {
15646 // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
15647 // we can get here due to other paths (e.g repeated mask matching) that we
15648 // don't want to do another round of lowerVECTOR_SHUFFLE.
15649 ShuffleVectorSDNode::commuteMask(NewMask);
15650 return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
15652 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
15653 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
15656 /// Lower 4-lane 32-bit floating point shuffles.
15658 /// Uses instructions exclusively from the floating point unit to minimize
15659 /// domain crossing penalties, as these are sufficient to implement all v4f32
15661 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15662 const APInt &Zeroable, SDValue V1, SDValue V2,
15663 const X86Subtarget &Subtarget,
15664 SelectionDAG &DAG) {
15665 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15666 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15667 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15669 if (Subtarget.hasSSE41())
15670 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
15671 Zeroable, Subtarget, DAG))
15674 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15676 if (NumV2Elements == 0) {
15677 // Check for being able to broadcast a single element.
15678 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
15679 Mask, Subtarget, DAG))
15682 // Use even/odd duplicate instructions for masks that match their pattern.
15683 if (Subtarget.hasSSE3()) {
15684 if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15685 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
15686 if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
15687 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
15690 if (Subtarget.hasAVX()) {
15691 // If we have AVX, we can use VPERMILPS which will allow folding a load
15692 // into the shuffle.
15693 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
15694 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15697 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
15698 // in SSE1 because otherwise they are widened to v2f64 and never get here.
15699 if (!Subtarget.hasSSE2()) {
15700 if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
15701 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
15702 if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
15703 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
15706 // Otherwise, use a straight shuffle of a single input vector. We pass the
15707 // input vector to both operands to simulate this with a SHUFPS.
15708 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
15709 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15712 if (Subtarget.hasSSE2())
15713 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
15714 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) {
15715 ZExt = DAG.getBitcast(MVT::v4f32, ZExt);
15719 if (Subtarget.hasAVX2())
15720 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15723 // There are special ways we can lower some single-element blends. However, we
15724 // have custom ways we can lower more complex single-element blends below that
15725 // we defer to if both this and BLENDPS fail to match, so restrict this to
15726 // when the V2 input is targeting element 0 of the mask -- that is the fast
15728 if (NumV2Elements == 1 && Mask[0] >= 4)
15729 if (SDValue V = lowerShuffleAsElementInsertion(
15730 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15733 if (Subtarget.hasSSE41()) {
15734 // Use INSERTPS if we can complete the shuffle efficiently.
15735 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
15738 if (!isSingleSHUFPSMask(Mask))
15739 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
15744 // Use low/high mov instructions. These are only valid in SSE1 because
15745 // otherwise they are widened to v2f64 and never get here.
15746 if (!Subtarget.hasSSE2()) {
15747 if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
15748 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
15749 if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
15750 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
15753 // Use dedicated unpack instructions for masks that match their pattern.
15754 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
15757 // Otherwise fall back to a SHUFPS lowering strategy.
15758 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
15761 /// Lower 4-lane i32 vector shuffles.
15763 /// We try to handle these with integer-domain shuffles where we can, but for
15764 /// blends we use the floating point domain blend instructions.
15765 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15766 const APInt &Zeroable, SDValue V1, SDValue V2,
15767 const X86Subtarget &Subtarget,
15768 SelectionDAG &DAG) {
15769 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15770 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15771 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15773 // Whenever we can lower this as a zext, that instruction is strictly faster
15774 // than any alternative. It also allows us to fold memory operands into the
15775 // shuffle in many cases.
15776 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
15777 Zeroable, Subtarget, DAG))
15780 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15782 // Try to use shift instructions if fast.
15783 if (Subtarget.preferLowerShuffleAsShift()) {
15784 if (SDValue Shift =
15785 lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable,
15786 Subtarget, DAG, /*BitwiseOnly*/ true))
15788 if (NumV2Elements == 0)
15789 if (SDValue Rotate =
15790 lowerShuffleAsBitRotate(DL, MVT::v4i32, V1, Mask, Subtarget, DAG))
15794 if (NumV2Elements == 0) {
15795 // Try to use broadcast unless the mask only has one non-undef element.
15796 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
15797 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
15798 Mask, Subtarget, DAG))
15802 // Straight shuffle of a single input vector. For everything from SSE2
15803 // onward this has a single fast instruction with no scary immediates.
15804 // We coerce the shuffle pattern to be compatible with UNPCK instructions
15805 // but we aren't actually going to use the UNPCK instruction because doing
15806 // so prevents folding a load into this instruction or making a copy.
15807 const int UnpackLoMask[] = {0, 0, 1, 1};
15808 const int UnpackHiMask[] = {2, 2, 3, 3};
15809 if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
15810 Mask = UnpackLoMask;
15811 else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
15812 Mask = UnpackHiMask;
15814 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15815 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15818 if (Subtarget.hasAVX2())
15819 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15822 // Try to use shift instructions.
15823 if (SDValue Shift =
15824 lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget,
15825 DAG, /*BitwiseOnly*/ false))
15828 // There are special ways we can lower some single-element blends.
15829 if (NumV2Elements == 1)
15830 if (SDValue V = lowerShuffleAsElementInsertion(
15831 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15834 // We have different paths for blend lowering, but they all must use the
15835 // *exact* same predicate.
15836 bool IsBlendSupported = Subtarget.hasSSE41();
15837 if (IsBlendSupported)
15838 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
15839 Zeroable, Subtarget, DAG))
15842 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
15843 Zeroable, Subtarget, DAG))
15846 // Use dedicated unpack instructions for masks that match their pattern.
15847 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
15850 // Try to use byte rotation instructions.
15851 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15852 if (Subtarget.hasSSSE3()) {
15853 if (Subtarget.hasVLX())
15854 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
15858 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
15863 // Assume that a single SHUFPS is faster than an alternative sequence of
15864 // multiple instructions (even if the CPU has a domain penalty).
15865 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15866 if (!isSingleSHUFPSMask(Mask)) {
15867 // If we have direct support for blends, we should lower by decomposing into
15868 // a permute. That will be faster than the domain cross.
15869 if (IsBlendSupported)
15870 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
15873 // Try to lower by permuting the inputs into an unpack instruction.
15874 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
15875 Mask, Subtarget, DAG))
15879 // We implement this with SHUFPS because it can blend from two vectors.
15880 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
15881 // up the inputs, bypassing domain shift penalties that we would incur if we
15882 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
15884 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
15885 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
15886 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
15887 return DAG.getBitcast(MVT::v4i32, ShufPS);
15890 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
15891 /// shuffle lowering, and the most complex part.
15893 /// The lowering strategy is to try to form pairs of input lanes which are
15894 /// targeted at the same half of the final vector, and then use a dword shuffle
15895 /// to place them onto the right half, and finally unpack the paired lanes into
15896 /// their final position.
15898 /// The exact breakdown of how to form these dword pairs and align them on the
15899 /// correct sides is really tricky. See the comments within the function for
15900 /// more of the details.
15902 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
15903 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
15904 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
15905 /// vector, form the analogous 128-bit 8-element Mask.
15906 static SDValue lowerV8I16GeneralSingleInputShuffle(
15907 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
15908 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15909 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
15910 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
15912 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
15913 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
15914 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
15916 // Attempt to directly match PSHUFLW or PSHUFHW.
15917 if (isUndefOrInRange(LoMask, 0, 4) &&
15918 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
15919 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
15920 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
15922 if (isUndefOrInRange(HiMask, 4, 8) &&
15923 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
15924 for (int i = 0; i != 4; ++i)
15925 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
15926 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
15927 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
15930 SmallVector<int, 4> LoInputs;
15931 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
15932 array_pod_sort(LoInputs.begin(), LoInputs.end());
15933 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
15934 SmallVector<int, 4> HiInputs;
15935 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
15936 array_pod_sort(HiInputs.begin(), HiInputs.end());
15937 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
15938 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
15939 int NumHToL = LoInputs.size() - NumLToL;
15940 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
15941 int NumHToH = HiInputs.size() - NumLToH;
15942 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
15943 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
15944 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
15945 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
15947 // If we are shuffling values from one half - check how many different DWORD
15948 // pairs we need to create. If only 1 or 2 then we can perform this as a
15949 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
15950 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
15951 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
15952 V = DAG.getNode(ShufWOp, DL, VT, V,
15953 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15954 V = DAG.getBitcast(PSHUFDVT, V);
15955 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
15956 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
15957 return DAG.getBitcast(VT, V);
15960 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
15961 int PSHUFDMask[4] = { -1, -1, -1, -1 };
15962 SmallVector<std::pair<int, int>, 4> DWordPairs;
15963 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
15965 // Collect the different DWORD pairs.
15966 for (int DWord = 0; DWord != 4; ++DWord) {
15967 int M0 = Mask[2 * DWord + 0];
15968 int M1 = Mask[2 * DWord + 1];
15969 M0 = (M0 >= 0 ? M0 % 4 : M0);
15970 M1 = (M1 >= 0 ? M1 % 4 : M1);
15971 if (M0 < 0 && M1 < 0)
15974 bool Match = false;
15975 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
15976 auto &DWordPair = DWordPairs[j];
15977 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
15978 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
15979 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
15980 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
15981 PSHUFDMask[DWord] = DOffset + j;
15987 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
15988 DWordPairs.push_back(std::make_pair(M0, M1));
15992 if (DWordPairs.size() <= 2) {
15993 DWordPairs.resize(2, std::make_pair(-1, -1));
15994 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
15995 DWordPairs[1].first, DWordPairs[1].second};
15996 if ((NumHToL + NumHToH) == 0)
15997 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
15998 if ((NumLToL + NumLToH) == 0)
15999 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
16003 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
16004 // such inputs we can swap two of the dwords across the half mark and end up
16005 // with <=2 inputs to each half in each half. Once there, we can fall through
16006 // to the generic code below. For example:
16008 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
16009 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
16011 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
16012 // and an existing 2-into-2 on the other half. In this case we may have to
16013 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
16014 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
16015 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
16016 // because any other situation (including a 3-into-1 or 1-into-3 in the other
16017 // half than the one we target for fixing) will be fixed when we re-enter this
16018 // path. We will also combine away any sequence of PSHUFD instructions that
16019 // result into a single instruction. Here is an example of the tricky case:
16021 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
16022 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
16024 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
16026 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
16027 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
16029 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
16030 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
16032 // The result is fine to be handled by the generic logic.
16033 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
16034 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
16035 int AOffset, int BOffset) {
16036 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
16037 "Must call this with A having 3 or 1 inputs from the A half.");
16038 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
16039 "Must call this with B having 1 or 3 inputs from the B half.");
16040 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
16041 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
16043 bool ThreeAInputs = AToAInputs.size() == 3;
16045 // Compute the index of dword with only one word among the three inputs in
16046 // a half by taking the sum of the half with three inputs and subtracting
16047 // the sum of the actual three inputs. The difference is the remaining
16049 int ADWord = 0, BDWord = 0;
16050 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
16051 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
16052 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
16053 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
16054 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
16055 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
16056 int TripleNonInputIdx =
16057 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
16058 TripleDWord = TripleNonInputIdx / 2;
16060 // We use xor with one to compute the adjacent DWord to whichever one the
16062 OneInputDWord = (OneInput / 2) ^ 1;
16064 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
16065 // and BToA inputs. If there is also such a problem with the BToB and AToB
16066 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
16067 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
16068 // is essential that we don't *create* a 3<-1 as then we might oscillate.
16069 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
16070 // Compute how many inputs will be flipped by swapping these DWords. We
16072 // to balance this to ensure we don't form a 3-1 shuffle in the other
16074 int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
16075 llvm::count(AToBInputs, 2 * ADWord + 1);
16076 int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
16077 llvm::count(BToBInputs, 2 * BDWord + 1);
16078 if ((NumFlippedAToBInputs == 1 &&
16079 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
16080 (NumFlippedBToBInputs == 1 &&
16081 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
16082 // We choose whether to fix the A half or B half based on whether that
16083 // half has zero flipped inputs. At zero, we may not be able to fix it
16084 // with that half. We also bias towards fixing the B half because that
16085 // will more commonly be the high half, and we have to bias one way.
16086 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
16087 ArrayRef<int> Inputs) {
16088 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
16089 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
16090 // Determine whether the free index is in the flipped dword or the
16091 // unflipped dword based on where the pinned index is. We use this bit
16092 // in an xor to conditionally select the adjacent dword.
16093 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
16094 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
16095 if (IsFixIdxInput == IsFixFreeIdxInput)
16097 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
16098 assert(IsFixIdxInput != IsFixFreeIdxInput &&
16099 "We need to be changing the number of flipped inputs!");
16100 int PSHUFHalfMask[] = {0, 1, 2, 3};
16101 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
16103 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
16104 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
16105 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
16107 for (int &M : Mask)
16108 if (M >= 0 && M == FixIdx)
16110 else if (M >= 0 && M == FixFreeIdx)
16113 if (NumFlippedBToBInputs != 0) {
16115 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
16116 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
16118 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
16119 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
16120 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
16125 int PSHUFDMask[] = {0, 1, 2, 3};
16126 PSHUFDMask[ADWord] = BDWord;
16127 PSHUFDMask[BDWord] = ADWord;
16128 V = DAG.getBitcast(
16130 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
16131 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16133 // Adjust the mask to match the new locations of A and B.
16134 for (int &M : Mask)
16135 if (M >= 0 && M/2 == ADWord)
16136 M = 2 * BDWord + M % 2;
16137 else if (M >= 0 && M/2 == BDWord)
16138 M = 2 * ADWord + M % 2;
16140 // Recurse back into this routine to re-compute state now that this isn't
16141 // a 3 and 1 problem.
16142 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
16144 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
16145 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
16146 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
16147 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
16149 // At this point there are at most two inputs to the low and high halves from
16150 // each half. That means the inputs can always be grouped into dwords and
16151 // those dwords can then be moved to the correct half with a dword shuffle.
16152 // We use at most one low and one high word shuffle to collect these paired
16153 // inputs into dwords, and finally a dword shuffle to place them.
16154 int PSHUFLMask[4] = {-1, -1, -1, -1};
16155 int PSHUFHMask[4] = {-1, -1, -1, -1};
16156 int PSHUFDMask[4] = {-1, -1, -1, -1};
16158 // First fix the masks for all the inputs that are staying in their
16159 // original halves. This will then dictate the targets of the cross-half
16161 auto fixInPlaceInputs =
16162 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
16163 MutableArrayRef<int> SourceHalfMask,
16164 MutableArrayRef<int> HalfMask, int HalfOffset) {
16165 if (InPlaceInputs.empty())
16167 if (InPlaceInputs.size() == 1) {
16168 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
16169 InPlaceInputs[0] - HalfOffset;
16170 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
16173 if (IncomingInputs.empty()) {
16174 // Just fix all of the in place inputs.
16175 for (int Input : InPlaceInputs) {
16176 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
16177 PSHUFDMask[Input / 2] = Input / 2;
16182 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
16183 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
16184 InPlaceInputs[0] - HalfOffset;
16185 // Put the second input next to the first so that they are packed into
16186 // a dword. We find the adjacent index by toggling the low bit.
16187 int AdjIndex = InPlaceInputs[0] ^ 1;
16188 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
16189 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
16190 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
16192 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
16193 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
16195 // Now gather the cross-half inputs and place them into a free dword of
16196 // their target half.
16197 // FIXME: This operation could almost certainly be simplified dramatically to
16198 // look more like the 3-1 fixing operation.
16199 auto moveInputsToRightHalf = [&PSHUFDMask](
16200 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
16201 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
16202 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
16204 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
16205 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
16207 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
16209 int LowWord = Word & ~1;
16210 int HighWord = Word | 1;
16211 return isWordClobbered(SourceHalfMask, LowWord) ||
16212 isWordClobbered(SourceHalfMask, HighWord);
16215 if (IncomingInputs.empty())
16218 if (ExistingInputs.empty()) {
16219 // Map any dwords with inputs from them into the right half.
16220 for (int Input : IncomingInputs) {
16221 // If the source half mask maps over the inputs, turn those into
16222 // swaps and use the swapped lane.
16223 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
16224 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
16225 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
16226 Input - SourceOffset;
16227 // We have to swap the uses in our half mask in one sweep.
16228 for (int &M : HalfMask)
16229 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
16231 else if (M == Input)
16232 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
16234 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
16235 Input - SourceOffset &&
16236 "Previous placement doesn't match!");
16238 // Note that this correctly re-maps both when we do a swap and when
16239 // we observe the other side of the swap above. We rely on that to
16240 // avoid swapping the members of the input list directly.
16241 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
16244 // Map the input's dword into the correct half.
16245 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
16246 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
16248 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
16250 "Previous placement doesn't match!");
16253 // And just directly shift any other-half mask elements to be same-half
16254 // as we will have mirrored the dword containing the element into the
16255 // same position within that half.
16256 for (int &M : HalfMask)
16257 if (M >= SourceOffset && M < SourceOffset + 4) {
16258 M = M - SourceOffset + DestOffset;
16259 assert(M >= 0 && "This should never wrap below zero!");
16264 // Ensure we have the input in a viable dword of its current half. This
16265 // is particularly tricky because the original position may be clobbered
16266 // by inputs being moved and *staying* in that half.
16267 if (IncomingInputs.size() == 1) {
16268 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
16269 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
16271 SourceHalfMask[InputFixed - SourceOffset] =
16272 IncomingInputs[0] - SourceOffset;
16273 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
16275 IncomingInputs[0] = InputFixed;
16277 } else if (IncomingInputs.size() == 2) {
16278 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
16279 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
16280 // We have two non-adjacent or clobbered inputs we need to extract from
16281 // the source half. To do this, we need to map them into some adjacent
16282 // dword slot in the source mask.
16283 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
16284 IncomingInputs[1] - SourceOffset};
16286 // If there is a free slot in the source half mask adjacent to one of
16287 // the inputs, place the other input in it. We use (Index XOR 1) to
16288 // compute an adjacent index.
16289 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
16290 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
16291 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
16292 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
16293 InputsFixed[1] = InputsFixed[0] ^ 1;
16294 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
16295 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
16296 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
16297 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
16298 InputsFixed[0] = InputsFixed[1] ^ 1;
16299 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
16300 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
16301 // The two inputs are in the same DWord but it is clobbered and the
16302 // adjacent DWord isn't used at all. Move both inputs to the free
16304 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
16305 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
16306 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
16307 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
16309 // The only way we hit this point is if there is no clobbering
16310 // (because there are no off-half inputs to this half) and there is no
16311 // free slot adjacent to one of the inputs. In this case, we have to
16312 // swap an input with a non-input.
16313 for (int i = 0; i < 4; ++i)
16314 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
16315 "We can't handle any clobbers here!");
16316 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
16317 "Cannot have adjacent inputs here!");
16319 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
16320 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
16322 // We also have to update the final source mask in this case because
16323 // it may need to undo the above swap.
16324 for (int &M : FinalSourceHalfMask)
16325 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
16326 M = InputsFixed[1] + SourceOffset;
16327 else if (M == InputsFixed[1] + SourceOffset)
16328 M = (InputsFixed[0] ^ 1) + SourceOffset;
16330 InputsFixed[1] = InputsFixed[0] ^ 1;
16333 // Point everything at the fixed inputs.
16334 for (int &M : HalfMask)
16335 if (M == IncomingInputs[0])
16336 M = InputsFixed[0] + SourceOffset;
16337 else if (M == IncomingInputs[1])
16338 M = InputsFixed[1] + SourceOffset;
16340 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
16341 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
16344 llvm_unreachable("Unhandled input size!");
16347 // Now hoist the DWord down to the right half.
16348 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
16349 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
16350 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
16351 for (int &M : HalfMask)
16352 for (int Input : IncomingInputs)
16354 M = FreeDWord * 2 + Input % 2;
16356 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
16357 /*SourceOffset*/ 4, /*DestOffset*/ 0);
16358 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
16359 /*SourceOffset*/ 0, /*DestOffset*/ 4);
16361 // Now enact all the shuffles we've computed to move the inputs into their
16363 if (!isNoopShuffleMask(PSHUFLMask))
16364 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
16365 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
16366 if (!isNoopShuffleMask(PSHUFHMask))
16367 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
16368 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
16369 if (!isNoopShuffleMask(PSHUFDMask))
16370 V = DAG.getBitcast(
16372 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
16373 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16375 // At this point, each half should contain all its inputs, and we can then
16376 // just shuffle them into their final position.
16377 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
16378 "Failed to lift all the high half inputs to the low mask!");
16379 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
16380 "Failed to lift all the low half inputs to the high mask!");
16382 // Do a half shuffle for the low mask.
16383 if (!isNoopShuffleMask(LoMask))
16384 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
16385 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
16387 // Do a half shuffle with the high mask after shifting its values down.
16388 for (int &M : HiMask)
16391 if (!isNoopShuffleMask(HiMask))
16392 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
16393 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
16398 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
16399 /// blend if only one input is used.
16400 static SDValue lowerShuffleAsBlendOfPSHUFBs(
16401 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16402 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
16403 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
16404 "Lane crossing shuffle masks not supported");
16406 int NumBytes = VT.getSizeInBits() / 8;
16407 int Size = Mask.size();
16408 int Scale = NumBytes / Size;
16410 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
16411 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
16415 for (int i = 0; i < NumBytes; ++i) {
16416 int M = Mask[i / Scale];
16420 const int ZeroMask = 0x80;
16421 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
16422 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
16423 if (Zeroable[i / Scale])
16424 V1Idx = V2Idx = ZeroMask;
16426 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
16427 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
16428 V1InUse |= (ZeroMask != V1Idx);
16429 V2InUse |= (ZeroMask != V2Idx);
16432 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
16434 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
16435 DAG.getBuildVector(ShufVT, DL, V1Mask));
16437 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
16438 DAG.getBuildVector(ShufVT, DL, V2Mask));
16440 // If we need shuffled inputs from both, blend the two.
16442 if (V1InUse && V2InUse)
16443 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
16445 V = V1InUse ? V1 : V2;
16447 // Cast the result back to the correct type.
16448 return DAG.getBitcast(VT, V);
16451 /// Generic lowering of 8-lane i16 shuffles.
16453 /// This handles both single-input shuffles and combined shuffle/blends with
16454 /// two inputs. The single input shuffles are immediately delegated to
16455 /// a dedicated lowering routine.
16457 /// The blends are lowered in one of three fundamental ways. If there are few
16458 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
16459 /// of the input is significantly cheaper when lowered as an interleaving of
16460 /// the two inputs, try to interleave them. Otherwise, blend the low and high
16461 /// halves of the inputs separately (making them have relatively few inputs)
16462 /// and then concatenate them.
16463 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16464 const APInt &Zeroable, SDValue V1, SDValue V2,
16465 const X86Subtarget &Subtarget,
16466 SelectionDAG &DAG) {
16467 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
16468 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
16469 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16471 // Whenever we can lower this as a zext, that instruction is strictly faster
16472 // than any alternative.
16473 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
16474 Zeroable, Subtarget, DAG))
16477 // Try to use lower using a truncation.
16478 if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16482 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
16484 if (NumV2Inputs == 0) {
16485 // Try to use shift instructions.
16486 if (SDValue Shift =
16487 lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, Zeroable,
16488 Subtarget, DAG, /*BitwiseOnly*/ false))
16491 // Check for being able to broadcast a single element.
16492 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
16493 Mask, Subtarget, DAG))
16496 // Try to use bit rotation instructions.
16497 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
16501 // Use dedicated unpack instructions for masks that match their pattern.
16502 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
16505 // Use dedicated pack instructions for masks that match their pattern.
16506 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16510 // Try to use byte rotation instructions.
16511 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
16515 // Make a copy of the mask so it can be modified.
16516 SmallVector<int, 8> MutableMask(Mask);
16517 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
16521 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
16522 "All single-input shuffles should be canonicalized to be V1-input "
16525 // Try to use shift instructions.
16526 if (SDValue Shift =
16527 lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget,
16528 DAG, /*BitwiseOnly*/ false))
16531 // See if we can use SSE4A Extraction / Insertion.
16532 if (Subtarget.hasSSE4A())
16533 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
16537 // There are special ways we can lower some single-element blends.
16538 if (NumV2Inputs == 1)
16539 if (SDValue V = lowerShuffleAsElementInsertion(
16540 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16543 // We have different paths for blend lowering, but they all must use the
16544 // *exact* same predicate.
16545 bool IsBlendSupported = Subtarget.hasSSE41();
16546 if (IsBlendSupported)
16547 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
16548 Zeroable, Subtarget, DAG))
16551 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
16552 Zeroable, Subtarget, DAG))
16555 // Use dedicated unpack instructions for masks that match their pattern.
16556 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
16559 // Use dedicated pack instructions for masks that match their pattern.
16560 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16564 // Try to use lower using a truncation.
16565 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16569 // Try to use byte rotation instructions.
16570 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
16574 if (SDValue BitBlend =
16575 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
16578 // Try to use byte shift instructions to mask.
16579 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
16580 Zeroable, Subtarget, DAG))
16583 // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
16584 int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
16585 if ((NumEvenDrops == 1 || (NumEvenDrops == 2 && Subtarget.hasSSE41())) &&
16586 !Subtarget.hasVLX()) {
16587 // Check if this is part of a 256-bit vector truncation.
16588 unsigned PackOpc = 0;
16589 if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
16590 peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
16591 peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
16592 SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
16593 V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
16594 getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
16595 DAG.getTargetConstant(0xEE, DL, MVT::i8));
16596 V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
16597 V1 = extract128BitVector(V1V2, 0, DAG, DL);
16598 V2 = extract128BitVector(V1V2, 4, DAG, DL);
16599 PackOpc = X86ISD::PACKUS;
16600 } else if (Subtarget.hasSSE41()) {
16601 SmallVector<SDValue, 4> DWordClearOps(4,
16602 DAG.getConstant(0, DL, MVT::i32));
16603 for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
16604 DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
16605 SDValue DWordClearMask =
16606 DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
16607 V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
16609 V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
16611 PackOpc = X86ISD::PACKUS;
16612 } else if (!Subtarget.hasSSSE3()) {
16613 SDValue ShAmt = DAG.getTargetConstant(16, DL, MVT::i8);
16614 V1 = DAG.getBitcast(MVT::v4i32, V1);
16615 V2 = DAG.getBitcast(MVT::v4i32, V2);
16616 V1 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V1, ShAmt);
16617 V2 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V2, ShAmt);
16618 V1 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V1, ShAmt);
16619 V2 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V2, ShAmt);
16620 PackOpc = X86ISD::PACKSS;
16623 // Now pack things back together.
16624 SDValue Result = DAG.getNode(PackOpc, DL, MVT::v8i16, V1, V2);
16625 if (NumEvenDrops == 2) {
16626 Result = DAG.getBitcast(MVT::v4i32, Result);
16627 Result = DAG.getNode(PackOpc, DL, MVT::v8i16, Result, Result);
16633 // When compacting odd (upper) elements, use PACKSS pre-SSE41.
16634 int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
16635 if (NumOddDrops == 1) {
16636 bool HasSSE41 = Subtarget.hasSSE41();
16637 V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16638 DAG.getBitcast(MVT::v4i32, V1),
16639 DAG.getTargetConstant(16, DL, MVT::i8));
16640 V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16641 DAG.getBitcast(MVT::v4i32, V2),
16642 DAG.getTargetConstant(16, DL, MVT::i8));
16643 return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
16644 MVT::v8i16, V1, V2);
16647 // Try to lower by permuting the inputs into an unpack instruction.
16648 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
16649 Mask, Subtarget, DAG))
16652 // If we can't directly blend but can use PSHUFB, that will be better as it
16653 // can both shuffle and set up the inefficient blend.
16654 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
16655 bool V1InUse, V2InUse;
16656 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
16657 Zeroable, DAG, V1InUse, V2InUse);
16660 // We can always bit-blend if we have to so the fallback strategy is to
16661 // decompose into single-input permutes and blends/unpacks.
16662 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
16663 Mask, Subtarget, DAG);
16666 /// Lower 8-lane 16-bit floating point shuffles.
16667 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16668 const APInt &Zeroable, SDValue V1, SDValue V2,
16669 const X86Subtarget &Subtarget,
16670 SelectionDAG &DAG) {
16671 assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16672 assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16673 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16674 int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16676 if (Subtarget.hasFP16()) {
16677 if (NumV2Elements == 0) {
16678 // Check for being able to broadcast a single element.
16679 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
16680 Mask, Subtarget, DAG))
16683 if (NumV2Elements == 1 && Mask[0] >= 8)
16684 if (SDValue V = lowerShuffleAsElementInsertion(
16685 DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16689 V1 = DAG.getBitcast(MVT::v8i16, V1);
16690 V2 = DAG.getBitcast(MVT::v8i16, V2);
16691 return DAG.getBitcast(MVT::v8f16,
16692 DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
16695 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
16696 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
16697 // the active subvector is extracted.
16698 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
16699 ArrayRef<int> Mask, SDValue V1, SDValue V2,
16700 const X86Subtarget &Subtarget,
16701 SelectionDAG &DAG) {
16702 MVT MaskVT = VT.changeTypeToInteger();
16704 MVT ShuffleVT = VT;
16705 if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
16706 V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
16707 V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
16708 ShuffleVT = V1.getSimpleValueType();
16710 // Adjust mask to correct indices for the second input.
16711 int NumElts = VT.getVectorNumElements();
16712 unsigned Scale = 512 / VT.getSizeInBits();
16713 SmallVector<int, 32> AdjustedMask(Mask);
16714 for (int &M : AdjustedMask)
16716 M += (Scale - 1) * NumElts;
16717 MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
16718 MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
16720 MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
16725 Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
16727 Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
16729 if (VT != ShuffleVT)
16730 Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
16735 /// Generic lowering of v16i8 shuffles.
16737 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
16738 /// detect any complexity reducing interleaving. If that doesn't help, it uses
16739 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
16740 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
16742 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16743 const APInt &Zeroable, SDValue V1, SDValue V2,
16744 const X86Subtarget &Subtarget,
16745 SelectionDAG &DAG) {
16746 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16747 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16748 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16750 // Try to use shift instructions.
16751 if (SDValue Shift =
16752 lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget,
16753 DAG, /*BitwiseOnly*/ false))
16756 // Try to use byte rotation instructions.
16757 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
16761 // Use dedicated pack instructions for masks that match their pattern.
16762 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
16766 // Try to use a zext lowering.
16767 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
16768 Zeroable, Subtarget, DAG))
16771 // Try to use lower using a truncation.
16772 if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16776 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16780 // See if we can use SSE4A Extraction / Insertion.
16781 if (Subtarget.hasSSE4A())
16782 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
16786 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16788 // For single-input shuffles, there are some nicer lowering tricks we can use.
16789 if (NumV2Elements == 0) {
16790 // Check for being able to broadcast a single element.
16791 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
16792 Mask, Subtarget, DAG))
16795 // Try to use bit rotation instructions.
16796 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
16800 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16803 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
16804 // Notably, this handles splat and partial-splat shuffles more efficiently.
16805 // However, it only makes sense if the pre-duplication shuffle simplifies
16806 // things significantly. Currently, this means we need to be able to
16807 // express the pre-duplication shuffle as an i16 shuffle.
16809 // FIXME: We should check for other patterns which can be widened into an
16810 // i16 shuffle as well.
16811 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
16812 for (int i = 0; i < 16; i += 2)
16813 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
16818 auto tryToWidenViaDuplication = [&]() -> SDValue {
16819 if (!canWidenViaDuplication(Mask))
16821 SmallVector<int, 4> LoInputs;
16822 copy_if(Mask, std::back_inserter(LoInputs),
16823 [](int M) { return M >= 0 && M < 8; });
16824 array_pod_sort(LoInputs.begin(), LoInputs.end());
16825 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
16827 SmallVector<int, 4> HiInputs;
16828 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
16829 array_pod_sort(HiInputs.begin(), HiInputs.end());
16830 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
16833 bool TargetLo = LoInputs.size() >= HiInputs.size();
16834 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
16835 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
16837 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
16838 SmallDenseMap<int, int, 8> LaneMap;
16839 for (int I : InPlaceInputs) {
16840 PreDupI16Shuffle[I/2] = I/2;
16843 int j = TargetLo ? 0 : 4, je = j + 4;
16844 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
16845 // Check if j is already a shuffle of this input. This happens when
16846 // there are two adjacent bytes after we move the low one.
16847 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
16848 // If we haven't yet mapped the input, search for a slot into which
16850 while (j < je && PreDupI16Shuffle[j] >= 0)
16854 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
16857 // Map this input with the i16 shuffle.
16858 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
16861 // Update the lane map based on the mapping we ended up with.
16862 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
16864 V1 = DAG.getBitcast(
16866 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16867 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
16869 // Unpack the bytes to form the i16s that will be shuffled into place.
16870 bool EvenInUse = false, OddInUse = false;
16871 for (int i = 0; i < 16; i += 2) {
16872 EvenInUse |= (Mask[i + 0] >= 0);
16873 OddInUse |= (Mask[i + 1] >= 0);
16874 if (EvenInUse && OddInUse)
16877 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
16878 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
16879 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
16881 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
16882 for (int i = 0; i < 16; ++i)
16883 if (Mask[i] >= 0) {
16884 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
16885 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
16886 if (PostDupI16Shuffle[i / 2] < 0)
16887 PostDupI16Shuffle[i / 2] = MappedMask;
16889 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
16890 "Conflicting entries in the original shuffle!");
16892 return DAG.getBitcast(
16894 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16895 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
16897 if (SDValue V = tryToWidenViaDuplication())
16901 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
16902 Zeroable, Subtarget, DAG))
16905 // Use dedicated unpack instructions for masks that match their pattern.
16906 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16909 // Try to use byte shift instructions to mask.
16910 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
16911 Zeroable, Subtarget, DAG))
16914 // Check for compaction patterns.
16915 bool IsSingleInput = V2.isUndef();
16916 int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
16918 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
16919 // with PSHUFB. It is important to do this before we attempt to generate any
16920 // blends but after all of the single-input lowerings. If the single input
16921 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
16922 // want to preserve that and we can DAG combine any longer sequences into
16923 // a PSHUFB in the end. But once we start blending from multiple inputs,
16924 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
16925 // and there are *very* few patterns that would actually be faster than the
16926 // PSHUFB approach because of its ability to zero lanes.
16928 // If the mask is a binary compaction, we can more efficiently perform this
16929 // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
16931 // FIXME: The only exceptions to the above are blends which are exact
16932 // interleavings with direct instructions supporting them. We currently don't
16933 // handle those well here.
16934 if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
16935 bool V1InUse = false;
16936 bool V2InUse = false;
16938 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
16939 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
16941 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
16942 // do so. This avoids using them to handle blends-with-zero which is
16943 // important as a single pshufb is significantly faster for that.
16944 if (V1InUse && V2InUse) {
16945 if (Subtarget.hasSSE41())
16946 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
16947 Zeroable, Subtarget, DAG))
16950 // We can use an unpack to do the blending rather than an or in some
16951 // cases. Even though the or may be (very minorly) more efficient, we
16952 // preference this lowering because there are common cases where part of
16953 // the complexity of the shuffles goes away when we do the final blend as
16955 // FIXME: It might be worth trying to detect if the unpack-feeding
16956 // shuffles will both be pshufb, in which case we shouldn't bother with
16958 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
16959 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16962 // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16963 if (Subtarget.hasVBMI())
16964 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
16967 // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
16968 if (Subtarget.hasXOP()) {
16969 SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
16970 return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
16973 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
16974 // PALIGNR will be cheaper than the second PSHUFB+OR.
16975 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
16976 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16983 // There are special ways we can lower some single-element blends.
16984 if (NumV2Elements == 1)
16985 if (SDValue V = lowerShuffleAsElementInsertion(
16986 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16989 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
16992 // Check whether a compaction lowering can be done. This handles shuffles
16993 // which take every Nth element for some even N. See the helper function for
16996 // We special case these as they can be particularly efficiently handled with
16997 // the PACKUSB instruction on x86 and they show up in common patterns of
16998 // rearranging bytes to truncate wide elements.
16999 if (NumEvenDrops) {
17000 // NumEvenDrops is the power of two stride of the elements. Another way of
17001 // thinking about it is that we need to drop the even elements this many
17002 // times to get the original input.
17004 // First we need to zero all the dropped bytes.
17005 assert(NumEvenDrops <= 3 &&
17006 "No support for dropping even elements more than 3 times.");
17007 SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
17008 for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
17009 WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
17010 SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
17011 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
17013 if (!IsSingleInput)
17014 V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
17017 // Now pack things back together.
17018 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
17019 IsSingleInput ? V1 : V2);
17020 for (int i = 1; i < NumEvenDrops; ++i) {
17021 Result = DAG.getBitcast(MVT::v8i16, Result);
17022 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
17027 int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
17028 if (NumOddDrops == 1) {
17029 V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
17030 DAG.getBitcast(MVT::v8i16, V1),
17031 DAG.getTargetConstant(8, DL, MVT::i8));
17032 if (!IsSingleInput)
17033 V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
17034 DAG.getBitcast(MVT::v8i16, V2),
17035 DAG.getTargetConstant(8, DL, MVT::i8));
17036 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
17037 IsSingleInput ? V1 : V2);
17040 // Handle multi-input cases by blending/unpacking single-input shuffles.
17041 if (NumV2Elements > 0)
17042 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
17045 // The fallback path for single-input shuffles widens this into two v8i16
17046 // vectors with unpacks, shuffles those, and then pulls them back together
17050 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
17051 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
17052 for (int i = 0; i < 16; ++i)
17054 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
17056 SDValue VLoHalf, VHiHalf;
17057 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
17058 // them out and avoid using UNPCK{L,H} to extract the elements of V as
17060 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
17061 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
17062 // Use a mask to drop the high bytes.
17063 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
17064 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
17065 DAG.getConstant(0x00FF, DL, MVT::v8i16));
17067 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
17068 VHiHalf = DAG.getUNDEF(MVT::v8i16);
17070 // Squash the masks to point directly into VLoHalf.
17071 for (int &M : LoBlendMask)
17074 for (int &M : HiBlendMask)
17078 // Otherwise just unpack the low half of V into VLoHalf and the high half into
17079 // VHiHalf so that we can blend them as i16s.
17080 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
17082 VLoHalf = DAG.getBitcast(
17083 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
17084 VHiHalf = DAG.getBitcast(
17085 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
17088 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
17089 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
17091 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
17094 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
17096 /// This routine breaks down the specific type of 128-bit shuffle and
17097 /// dispatches to the lowering routines accordingly.
17098 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17099 MVT VT, SDValue V1, SDValue V2,
17100 const APInt &Zeroable,
17101 const X86Subtarget &Subtarget,
17102 SelectionDAG &DAG) {
17103 switch (VT.SimpleTy) {
17105 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17107 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17109 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17111 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17113 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17115 return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17117 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17120 llvm_unreachable("Unimplemented!");
17124 /// Generic routine to split vector shuffle into half-sized shuffles.
17126 /// This routine just extracts two subvectors, shuffles them independently, and
17127 /// then concatenates them back together. This should work effectively with all
17128 /// AVX vector shuffle types.
17129 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
17130 SDValue V2, ArrayRef<int> Mask,
17131 SelectionDAG &DAG, bool SimpleOnly) {
17132 assert(VT.getSizeInBits() >= 256 &&
17133 "Only for 256-bit or wider vector shuffles!");
17134 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
17135 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
17137 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
17138 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
17140 int NumElements = VT.getVectorNumElements();
17141 int SplitNumElements = NumElements / 2;
17142 MVT ScalarVT = VT.getVectorElementType();
17143 MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
17145 // Use splitVector/extractSubVector so that split build-vectors just build two
17146 // narrower build vectors. This helps shuffling with splats and zeros.
17147 auto SplitVector = [&](SDValue V) {
17149 std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
17150 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
17151 DAG.getBitcast(SplitVT, HiV));
17154 SDValue LoV1, HiV1, LoV2, HiV2;
17155 std::tie(LoV1, HiV1) = SplitVector(V1);
17156 std::tie(LoV2, HiV2) = SplitVector(V2);
17158 // Now create two 4-way blends of these half-width vectors.
17159 auto GetHalfBlendPiecesReq = [&](const ArrayRef<int> &HalfMask, bool &UseLoV1,
17160 bool &UseHiV1, bool &UseLoV2,
17162 UseLoV1 = UseHiV1 = UseLoV2 = UseHiV2 = false;
17163 for (int i = 0; i < SplitNumElements; ++i) {
17164 int M = HalfMask[i];
17165 if (M >= NumElements) {
17166 if (M >= NumElements + SplitNumElements)
17170 } else if (M >= 0) {
17171 if (M >= SplitNumElements)
17179 auto CheckHalfBlendUsable = [&](const ArrayRef<int> &HalfMask) -> bool {
17183 bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
17184 GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
17186 return !(UseHiV1 || UseHiV2);
17189 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
17190 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
17191 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
17192 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
17193 for (int i = 0; i < SplitNumElements; ++i) {
17194 int M = HalfMask[i];
17195 if (M >= NumElements) {
17196 V2BlendMask[i] = M - NumElements;
17197 BlendMask[i] = SplitNumElements + i;
17198 } else if (M >= 0) {
17199 V1BlendMask[i] = M;
17204 bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
17205 GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
17207 // Because the lowering happens after all combining takes place, we need to
17208 // manually combine these blend masks as much as possible so that we create
17209 // a minimal number of high-level vector shuffle nodes.
17210 assert((!SimpleOnly || (!UseHiV1 && !UseHiV2)) && "Shuffle isn't simple");
17212 // First try just blending the halves of V1 or V2.
17213 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
17214 return DAG.getUNDEF(SplitVT);
17215 if (!UseLoV2 && !UseHiV2)
17216 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
17217 if (!UseLoV1 && !UseHiV1)
17218 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
17220 SDValue V1Blend, V2Blend;
17221 if (UseLoV1 && UseHiV1) {
17222 V1Blend = DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
17224 // We only use half of V1 so map the usage down into the final blend mask.
17225 V1Blend = UseLoV1 ? LoV1 : HiV1;
17226 for (int i = 0; i < SplitNumElements; ++i)
17227 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
17228 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
17230 if (UseLoV2 && UseHiV2) {
17231 V2Blend = DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
17233 // We only use half of V2 so map the usage down into the final blend mask.
17234 V2Blend = UseLoV2 ? LoV2 : HiV2;
17235 for (int i = 0; i < SplitNumElements; ++i)
17236 if (BlendMask[i] >= SplitNumElements)
17237 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
17239 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
17242 if (!CheckHalfBlendUsable(LoMask) || !CheckHalfBlendUsable(HiMask))
17245 SDValue Lo = HalfBlend(LoMask);
17246 SDValue Hi = HalfBlend(HiMask);
17247 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
17250 /// Either split a vector in halves or decompose the shuffles and the
17253 /// This is provided as a good fallback for many lowerings of non-single-input
17254 /// shuffles with more than one 128-bit lane. In those cases, we want to select
17255 /// between splitting the shuffle into 128-bit components and stitching those
17256 /// back together vs. extracting the single-input shuffles and blending those
17258 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
17259 SDValue V2, ArrayRef<int> Mask,
17260 const X86Subtarget &Subtarget,
17261 SelectionDAG &DAG) {
17262 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
17263 "shuffles as it could then recurse on itself.");
17264 int Size = Mask.size();
17266 // If this can be modeled as a broadcast of two elements followed by a blend,
17267 // prefer that lowering. This is especially important because broadcasts can
17268 // often fold with memory operands.
17269 auto DoBothBroadcast = [&] {
17270 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
17273 if (V2BroadcastIdx < 0)
17274 V2BroadcastIdx = M - Size;
17275 else if (M - Size != V2BroadcastIdx)
17277 } else if (M >= 0) {
17278 if (V1BroadcastIdx < 0)
17279 V1BroadcastIdx = M;
17280 else if (M != V1BroadcastIdx)
17285 if (DoBothBroadcast())
17286 return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
17289 // If the inputs all stem from a single 128-bit lane of each input, then we
17290 // split them rather than blending because the split will decompose to
17291 // unusually few instructions.
17292 int LaneCount = VT.getSizeInBits() / 128;
17293 int LaneSize = Size / LaneCount;
17294 SmallBitVector LaneInputs[2];
17295 LaneInputs[0].resize(LaneCount, false);
17296 LaneInputs[1].resize(LaneCount, false);
17297 for (int i = 0; i < Size; ++i)
17299 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
17300 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
17301 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
17302 /*SimpleOnly*/ false);
17304 // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
17305 // requires that the decomposed single-input shuffles don't end up here.
17306 return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
17310 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
17311 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
17312 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
17313 SDValue V1, SDValue V2,
17314 ArrayRef<int> Mask,
17315 SelectionDAG &DAG) {
17316 assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
17318 int LHSMask[4] = {-1, -1, -1, -1};
17319 int RHSMask[4] = {-1, -1, -1, -1};
17320 unsigned SHUFPMask = 0;
17322 // As SHUFPD uses a single LHS/RHS element per lane, we can always
17323 // perform the shuffle once the lanes have been shuffled in place.
17324 for (int i = 0; i != 4; ++i) {
17328 int LaneBase = i & ~1;
17329 auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
17330 LaneMask[LaneBase + (M & 1)] = M;
17331 SHUFPMask |= (M & 1) << i;
17334 SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
17335 SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
17336 return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
17337 DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
17340 /// Lower a vector shuffle crossing multiple 128-bit lanes as
17341 /// a lane permutation followed by a per-lane permutation.
17343 /// This is mainly for cases where we can have non-repeating permutes
17346 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
17347 /// we should investigate merging them.
17348 static SDValue lowerShuffleAsLanePermuteAndPermute(
17349 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17350 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
17351 int NumElts = VT.getVectorNumElements();
17352 int NumLanes = VT.getSizeInBits() / 128;
17353 int NumEltsPerLane = NumElts / NumLanes;
17354 bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
17356 /// Attempts to find a sublane permute with the given size
17357 /// that gets all elements into their target lanes.
17359 /// If successful, fills CrossLaneMask and InLaneMask and returns true.
17360 /// If unsuccessful, returns false and may overwrite InLaneMask.
17361 auto getSublanePermute = [&](int NumSublanes) -> SDValue {
17362 int NumSublanesPerLane = NumSublanes / NumLanes;
17363 int NumEltsPerSublane = NumElts / NumSublanes;
17365 SmallVector<int, 16> CrossLaneMask;
17366 SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
17367 // CrossLaneMask but one entry == one sublane.
17368 SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
17370 for (int i = 0; i != NumElts; ++i) {
17375 int SrcSublane = M / NumEltsPerSublane;
17376 int DstLane = i / NumEltsPerLane;
17378 // We only need to get the elements into the right lane, not sublane.
17379 // So search all sublanes that make up the destination lane.
17380 bool Found = false;
17381 int DstSubStart = DstLane * NumSublanesPerLane;
17382 int DstSubEnd = DstSubStart + NumSublanesPerLane;
17383 for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
17384 if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
17388 CrossLaneMaskLarge[DstSublane] = SrcSublane;
17389 int DstSublaneOffset = DstSublane * NumEltsPerSublane;
17390 InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
17397 // Fill CrossLaneMask using CrossLaneMaskLarge.
17398 narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
17400 if (!CanUseSublanes) {
17401 // If we're only shuffling a single lowest lane and the rest are identity
17402 // then don't bother.
17403 // TODO - isShuffleMaskInputInPlace could be extended to something like
17405 int NumIdentityLanes = 0;
17406 bool OnlyShuffleLowestLane = true;
17407 for (int i = 0; i != NumLanes; ++i) {
17408 int LaneOffset = i * NumEltsPerLane;
17409 if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
17410 i * NumEltsPerLane))
17411 NumIdentityLanes++;
17412 else if (CrossLaneMask[LaneOffset] != 0)
17413 OnlyShuffleLowestLane = false;
17415 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
17419 // Avoid returning the same shuffle operation. For example,
17420 // t7: v16i16 = vector_shuffle<8,9,10,11,4,5,6,7,0,1,2,3,12,13,14,15> t5,
17422 if (CrossLaneMask == Mask || InLaneMask == Mask)
17425 SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
17426 return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
17430 // First attempt a solution with full lanes.
17431 if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
17434 // The rest of the solutions use sublanes.
17435 if (!CanUseSublanes)
17438 // Then attempt a solution with 64-bit sublanes (vpermq).
17439 if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
17442 // If that doesn't work and we have fast variable cross-lane shuffle,
17443 // attempt 32-bit sublanes (vpermd).
17444 if (!Subtarget.hasFastVariableCrossLaneShuffle())
17447 return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
17450 /// Helper to get compute inlane shuffle mask for a complete shuffle mask.
17451 static void computeInLaneShuffleMask(const ArrayRef<int> &Mask, int LaneSize,
17452 SmallVector<int> &InLaneMask) {
17453 int Size = Mask.size();
17454 InLaneMask.assign(Mask.begin(), Mask.end());
17455 for (int i = 0; i < Size; ++i) {
17456 int &M = InLaneMask[i];
17459 if (((M % Size) / LaneSize) != (i / LaneSize))
17460 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
17464 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
17465 /// source with a lane permutation.
17467 /// This lowering strategy results in four instructions in the worst case for a
17468 /// single-input cross lane shuffle which is lower than any other fully general
17469 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
17470 /// shuffle pattern should be handled prior to trying this lowering.
17471 static SDValue lowerShuffleAsLanePermuteAndShuffle(
17472 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17473 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
17474 // FIXME: This should probably be generalized for 512-bit vectors as well.
17475 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
17476 int Size = Mask.size();
17477 int LaneSize = Size / 2;
17479 // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
17480 // Only do this if the elements aren't all from the lower lane,
17481 // otherwise we're (probably) better off doing a split.
17482 if (VT == MVT::v4f64 &&
17483 !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
17484 return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
17486 // If there are only inputs from one 128-bit lane, splitting will in fact be
17487 // less expensive. The flags track whether the given lane contains an element
17488 // that crosses to another lane.
17490 if (!Subtarget.hasAVX2()) {
17491 bool LaneCrossing[2] = {false, false};
17492 for (int i = 0; i < Size; ++i)
17493 if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
17494 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
17495 AllLanes = LaneCrossing[0] && LaneCrossing[1];
17497 bool LaneUsed[2] = {false, false};
17498 for (int i = 0; i < Size; ++i)
17500 LaneUsed[(Mask[i] % Size) / LaneSize] = true;
17501 AllLanes = LaneUsed[0] && LaneUsed[1];
17504 // TODO - we could support shuffling V2 in the Flipped input.
17505 assert(V2.isUndef() &&
17506 "This last part of this routine only works on single input shuffles");
17508 SmallVector<int> InLaneMask;
17509 computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
17511 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
17512 "In-lane shuffle mask expected");
17514 // If we're not using both lanes in each lane and the inlane mask is not
17515 // repeating, then we're better off splitting.
17516 if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
17517 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
17518 /*SimpleOnly*/ false);
17520 // Flip the lanes, and shuffle the results which should now be in-lane.
17521 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
17522 SDValue Flipped = DAG.getBitcast(PVT, V1);
17524 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
17525 Flipped = DAG.getBitcast(VT, Flipped);
17526 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
17529 /// Handle lowering 2-lane 128-bit shuffles.
17530 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
17531 SDValue V2, ArrayRef<int> Mask,
17532 const APInt &Zeroable,
17533 const X86Subtarget &Subtarget,
17534 SelectionDAG &DAG) {
17535 if (V2.isUndef()) {
17536 // Attempt to match VBROADCAST*128 subvector broadcast load.
17537 bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
17538 bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
17539 if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
17540 X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
17541 MVT MemVT = VT.getHalfNumVectorElementsVT();
17542 unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
17543 auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
17544 if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
17545 VT, MemVT, Ld, Ofs, DAG))
17549 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
17550 if (Subtarget.hasAVX2())
17554 bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
17556 SmallVector<int, 4> WidenedMask;
17557 if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
17560 bool IsLowZero = (Zeroable & 0x3) == 0x3;
17561 bool IsHighZero = (Zeroable & 0xc) == 0xc;
17563 // Try to use an insert into a zero vector.
17564 if (WidenedMask[0] == 0 && IsHighZero) {
17565 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17566 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
17567 DAG.getIntPtrConstant(0, DL));
17568 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17569 getZeroVector(VT, Subtarget, DAG, DL), LoV,
17570 DAG.getIntPtrConstant(0, DL));
17573 // TODO: If minimizing size and one of the inputs is a zero vector and the
17574 // the zero vector has only one use, we could use a VPERM2X128 to save the
17575 // instruction bytes needed to explicitly generate the zero vector.
17577 // Blends are faster and handle all the non-lane-crossing cases.
17578 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
17582 // If either input operand is a zero vector, use VPERM2X128 because its mask
17583 // allows us to replace the zero input with an implicit zero.
17584 if (!IsLowZero && !IsHighZero) {
17585 // Check for patterns which can be matched with a single insert of a 128-bit
17587 bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
17588 if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
17590 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
17591 // this will likely become vinsertf128 which can't fold a 256-bit memop.
17592 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
17593 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17594 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
17595 OnlyUsesV1 ? V1 : V2,
17596 DAG.getIntPtrConstant(0, DL));
17597 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
17598 DAG.getIntPtrConstant(2, DL));
17602 // Try to use SHUF128 if possible.
17603 if (Subtarget.hasVLX()) {
17604 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
17605 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
17606 ((WidenedMask[1] % 2) << 1);
17607 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
17608 DAG.getTargetConstant(PermMask, DL, MVT::i8));
17613 // Otherwise form a 128-bit permutation. After accounting for undefs,
17614 // convert the 64-bit shuffle mask selection values into 128-bit
17615 // selection bits by dividing the indexes by 2 and shifting into positions
17616 // defined by a vperm2*128 instruction's immediate control byte.
17618 // The immediate permute control byte looks like this:
17619 // [1:0] - select 128 bits from sources for low half of destination
17621 // [3] - zero low half of destination
17622 // [5:4] - select 128 bits from sources for high half of destination
17624 // [7] - zero high half of destination
17626 assert((WidenedMask[0] >= 0 || IsLowZero) &&
17627 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
17629 unsigned PermMask = 0;
17630 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
17631 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
17633 // Check the immediate mask and replace unused sources with undef.
17634 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
17635 V1 = DAG.getUNDEF(VT);
17636 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
17637 V2 = DAG.getUNDEF(VT);
17639 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
17640 DAG.getTargetConstant(PermMask, DL, MVT::i8));
17643 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
17644 /// shuffling each lane.
17646 /// This attempts to create a repeated lane shuffle where each lane uses one
17647 /// or two of the lanes of the inputs. The lanes of the input vectors are
17648 /// shuffled in one or two independent shuffles to get the lanes into the
17649 /// position needed by the final shuffle.
17650 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
17651 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17652 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17653 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
17655 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17658 int NumElts = Mask.size();
17659 int NumLanes = VT.getSizeInBits() / 128;
17660 int NumLaneElts = 128 / VT.getScalarSizeInBits();
17661 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
17662 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
17664 // First pass will try to fill in the RepeatMask from lanes that need two
17666 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17667 int Srcs[2] = {-1, -1};
17668 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
17669 for (int i = 0; i != NumLaneElts; ++i) {
17670 int M = Mask[(Lane * NumLaneElts) + i];
17673 // Determine which of the possible input lanes (NumLanes from each source)
17674 // this element comes from. Assign that as one of the sources for this
17675 // lane. We can assign up to 2 sources for this lane. If we run out
17676 // sources we can't do anything.
17677 int LaneSrc = M / NumLaneElts;
17679 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
17681 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
17686 Srcs[Src] = LaneSrc;
17687 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
17690 // If this lane has two sources, see if it fits with the repeat mask so far.
17694 LaneSrcs[Lane][0] = Srcs[0];
17695 LaneSrcs[Lane][1] = Srcs[1];
17697 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
17698 assert(M1.size() == M2.size() && "Unexpected mask size");
17699 for (int i = 0, e = M1.size(); i != e; ++i)
17700 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
17705 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
17706 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
17707 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
17711 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
17712 "Unexpected mask element");
17717 if (MatchMasks(InLaneMask, RepeatMask)) {
17718 // Merge this lane mask into the final repeat mask.
17719 MergeMasks(InLaneMask, RepeatMask);
17723 // Didn't find a match. Swap the operands and try again.
17724 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
17725 ShuffleVectorSDNode::commuteMask(InLaneMask);
17727 if (MatchMasks(InLaneMask, RepeatMask)) {
17728 // Merge this lane mask into the final repeat mask.
17729 MergeMasks(InLaneMask, RepeatMask);
17733 // Couldn't find a match with the operands in either order.
17737 // Now handle any lanes with only one source.
17738 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17739 // If this lane has already been processed, skip it.
17740 if (LaneSrcs[Lane][0] >= 0)
17743 for (int i = 0; i != NumLaneElts; ++i) {
17744 int M = Mask[(Lane * NumLaneElts) + i];
17748 // If RepeatMask isn't defined yet we can define it ourself.
17749 if (RepeatMask[i] < 0)
17750 RepeatMask[i] = M % NumLaneElts;
17752 if (RepeatMask[i] < NumElts) {
17753 if (RepeatMask[i] != M % NumLaneElts)
17755 LaneSrcs[Lane][0] = M / NumLaneElts;
17757 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
17759 LaneSrcs[Lane][1] = M / NumLaneElts;
17763 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
17767 SmallVector<int, 16> NewMask(NumElts, -1);
17768 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17769 int Src = LaneSrcs[Lane][0];
17770 for (int i = 0; i != NumLaneElts; ++i) {
17773 M = Src * NumLaneElts + i;
17774 NewMask[Lane * NumLaneElts + i] = M;
17777 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17778 // Ensure we didn't get back the shuffle we started with.
17779 // FIXME: This is a hack to make up for some splat handling code in
17780 // getVectorShuffle.
17781 if (isa<ShuffleVectorSDNode>(NewV1) &&
17782 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
17785 for (int Lane = 0; Lane != NumLanes; ++Lane) {
17786 int Src = LaneSrcs[Lane][1];
17787 for (int i = 0; i != NumLaneElts; ++i) {
17790 M = Src * NumLaneElts + i;
17791 NewMask[Lane * NumLaneElts + i] = M;
17794 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17795 // Ensure we didn't get back the shuffle we started with.
17796 // FIXME: This is a hack to make up for some splat handling code in
17797 // getVectorShuffle.
17798 if (isa<ShuffleVectorSDNode>(NewV2) &&
17799 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
17802 for (int i = 0; i != NumElts; ++i) {
17807 NewMask[i] = RepeatMask[i % NumLaneElts];
17808 if (NewMask[i] < 0)
17811 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
17813 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
17816 /// If the input shuffle mask results in a vector that is undefined in all upper
17817 /// or lower half elements and that mask accesses only 2 halves of the
17818 /// shuffle's operands, return true. A mask of half the width with mask indexes
17819 /// adjusted to access the extracted halves of the original shuffle operands is
17820 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
17821 /// lower half of each input operand is accessed.
17823 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
17824 int &HalfIdx1, int &HalfIdx2) {
17825 assert((Mask.size() == HalfMask.size() * 2) &&
17826 "Expected input mask to be twice as long as output");
17828 // Exactly one half of the result must be undef to allow narrowing.
17829 bool UndefLower = isUndefLowerHalf(Mask);
17830 bool UndefUpper = isUndefUpperHalf(Mask);
17831 if (UndefLower == UndefUpper)
17834 unsigned HalfNumElts = HalfMask.size();
17835 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
17838 for (unsigned i = 0; i != HalfNumElts; ++i) {
17839 int M = Mask[i + MaskIndexOffset];
17845 // Determine which of the 4 half vectors this element is from.
17846 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
17847 int HalfIdx = M / HalfNumElts;
17849 // Determine the element index into its half vector source.
17850 int HalfElt = M % HalfNumElts;
17852 // We can shuffle with up to 2 half vectors, set the new 'half'
17853 // shuffle mask accordingly.
17854 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
17855 HalfMask[i] = HalfElt;
17856 HalfIdx1 = HalfIdx;
17859 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
17860 HalfMask[i] = HalfElt + HalfNumElts;
17861 HalfIdx2 = HalfIdx;
17865 // Too many half vectors referenced.
17872 /// Given the output values from getHalfShuffleMask(), create a half width
17873 /// shuffle of extracted vectors followed by an insert back to full width.
17874 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
17875 ArrayRef<int> HalfMask, int HalfIdx1,
17876 int HalfIdx2, bool UndefLower,
17877 SelectionDAG &DAG, bool UseConcat = false) {
17878 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
17879 assert(V1.getValueType().isSimple() && "Expecting only simple types");
17881 MVT VT = V1.getSimpleValueType();
17882 MVT HalfVT = VT.getHalfNumVectorElementsVT();
17883 unsigned HalfNumElts = HalfVT.getVectorNumElements();
17885 auto getHalfVector = [&](int HalfIdx) {
17887 return DAG.getUNDEF(HalfVT);
17888 SDValue V = (HalfIdx < 2 ? V1 : V2);
17889 HalfIdx = (HalfIdx % 2) * HalfNumElts;
17890 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
17891 DAG.getIntPtrConstant(HalfIdx, DL));
17894 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
17895 SDValue Half1 = getHalfVector(HalfIdx1);
17896 SDValue Half2 = getHalfVector(HalfIdx2);
17897 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
17900 SDValue Op1 = DAG.getUNDEF(HalfVT);
17902 std::swap(Op0, Op1);
17903 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
17906 unsigned Offset = UndefLower ? HalfNumElts : 0;
17907 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
17908 DAG.getIntPtrConstant(Offset, DL));
17911 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
17912 /// This allows for fast cases such as subvector extraction/insertion
17913 /// or shuffling smaller vector types which can lower more efficiently.
17914 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
17915 SDValue V2, ArrayRef<int> Mask,
17916 const X86Subtarget &Subtarget,
17917 SelectionDAG &DAG) {
17918 assert((VT.is256BitVector() || VT.is512BitVector()) &&
17919 "Expected 256-bit or 512-bit vector");
17921 bool UndefLower = isUndefLowerHalf(Mask);
17922 if (!UndefLower && !isUndefUpperHalf(Mask))
17925 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
17926 "Completely undef shuffle mask should have been simplified already");
17928 // Upper half is undef and lower half is whole upper subvector.
17929 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
17930 MVT HalfVT = VT.getHalfNumVectorElementsVT();
17931 unsigned HalfNumElts = HalfVT.getVectorNumElements();
17933 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
17934 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17935 DAG.getIntPtrConstant(HalfNumElts, DL));
17936 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17937 DAG.getIntPtrConstant(0, DL));
17940 // Lower half is undef and upper half is whole lower subvector.
17941 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
17943 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
17944 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17945 DAG.getIntPtrConstant(0, DL));
17946 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17947 DAG.getIntPtrConstant(HalfNumElts, DL));
17950 int HalfIdx1, HalfIdx2;
17951 SmallVector<int, 8> HalfMask(HalfNumElts);
17952 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
17955 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
17957 // Only shuffle the halves of the inputs when useful.
17958 unsigned NumLowerHalves =
17959 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
17960 unsigned NumUpperHalves =
17961 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
17962 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
17964 // Determine the larger pattern of undef/halves, then decide if it's worth
17965 // splitting the shuffle based on subtarget capabilities and types.
17966 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
17968 // XXXXuuuu: no insert is needed.
17969 // Always extract lowers when setting lower - these are all free subreg ops.
17970 if (NumUpperHalves == 0)
17971 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17974 if (NumUpperHalves == 1) {
17975 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
17976 if (Subtarget.hasAVX2()) {
17977 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
17978 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
17979 !is128BitUnpackShuffleMask(HalfMask, DAG) &&
17980 (!isSingleSHUFPSMask(HalfMask) ||
17981 Subtarget.hasFastVariableCrossLaneShuffle()))
17983 // If this is a unary shuffle (assume that the 2nd operand is
17984 // canonicalized to undef), then we can use vpermpd. Otherwise, we
17985 // are better off extracting the upper half of 1 operand and using a
17987 if (EltWidth == 64 && V2.isUndef())
17990 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17991 if (Subtarget.hasAVX512() && VT.is512BitVector())
17993 // Extract + narrow shuffle is better than the wide alternative.
17994 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17998 // Don't extract both uppers, instead shuffle and then extract.
17999 assert(NumUpperHalves == 2 && "Half vector count went wrong");
18003 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
18004 if (NumUpperHalves == 0) {
18005 // AVX2 has efficient 64-bit element cross-lane shuffles.
18006 // TODO: Refine to account for unary shuffle, splat, and other masks?
18007 if (Subtarget.hasAVX2() && EltWidth == 64)
18009 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
18010 if (Subtarget.hasAVX512() && VT.is512BitVector())
18012 // Narrow shuffle + insert is better than the wide alternative.
18013 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
18017 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
18021 /// Handle case where shuffle sources are coming from the same 128-bit lane and
18022 /// every lane can be represented as the same repeating mask - allowing us to
18023 /// shuffle the sources with the repeating shuffle and then permute the result
18024 /// to the destination lanes.
18025 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
18026 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
18027 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
18028 int NumElts = VT.getVectorNumElements();
18029 int NumLanes = VT.getSizeInBits() / 128;
18030 int NumLaneElts = NumElts / NumLanes;
18032 // On AVX2 we may be able to just shuffle the lowest elements and then
18033 // broadcast the result.
18034 if (Subtarget.hasAVX2()) {
18035 for (unsigned BroadcastSize : {16, 32, 64}) {
18036 if (BroadcastSize <= VT.getScalarSizeInBits())
18038 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
18040 // Attempt to match a repeating pattern every NumBroadcastElts,
18041 // accounting for UNDEFs but only references the lowest 128-bit
18042 // lane of the inputs.
18043 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
18044 for (int i = 0; i != NumElts; i += NumBroadcastElts)
18045 for (int j = 0; j != NumBroadcastElts; ++j) {
18046 int M = Mask[i + j];
18049 int &R = RepeatMask[j];
18050 if (0 != ((M % NumElts) / NumLaneElts))
18052 if (0 <= R && R != M)
18059 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
18060 if (!FindRepeatingBroadcastMask(RepeatMask))
18063 // Shuffle the (lowest) repeated elements in place for broadcast.
18064 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
18066 // Shuffle the actual broadcast.
18067 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
18068 for (int i = 0; i != NumElts; i += NumBroadcastElts)
18069 for (int j = 0; j != NumBroadcastElts; ++j)
18070 BroadcastMask[i + j] = j;
18071 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
18076 // Bail if the shuffle mask doesn't cross 128-bit lanes.
18077 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
18080 // Bail if we already have a repeated lane shuffle mask.
18081 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
18084 // Helper to look for repeated mask in each split sublane, and that those
18085 // sublanes can then be permuted into place.
18086 auto ShuffleSubLanes = [&](int SubLaneScale) {
18087 int NumSubLanes = NumLanes * SubLaneScale;
18088 int NumSubLaneElts = NumLaneElts / SubLaneScale;
18090 // Check that all the sources are coming from the same lane and see if we
18091 // can form a repeating shuffle mask (local to each sub-lane). At the same
18092 // time, determine the source sub-lane for each destination sub-lane.
18093 int TopSrcSubLane = -1;
18094 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
18095 SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
18097 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
18099 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
18100 // Extract the sub-lane mask, check that it all comes from the same lane
18101 // and normalize the mask entries to come from the first lane.
18103 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
18104 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
18105 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
18108 int Lane = (M % NumElts) / NumLaneElts;
18109 if ((0 <= SrcLane) && (SrcLane != Lane))
18112 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
18113 SubLaneMask[Elt] = LocalM;
18116 // Whole sub-lane is UNDEF.
18120 // Attempt to match against the candidate repeated sub-lane masks.
18121 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
18122 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
18123 for (int i = 0; i != NumSubLaneElts; ++i) {
18124 if (M1[i] < 0 || M2[i] < 0)
18126 if (M1[i] != M2[i])
18132 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
18133 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
18136 // Merge the sub-lane mask into the matching repeated sub-lane mask.
18137 for (int i = 0; i != NumSubLaneElts; ++i) {
18138 int M = SubLaneMask[i];
18141 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
18142 "Unexpected mask element");
18143 RepeatedSubLaneMask[i] = M;
18146 // Track the top most source sub-lane - by setting the remaining to
18147 // UNDEF we can greatly simplify shuffle matching.
18148 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
18149 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
18150 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
18154 // Bail if we failed to find a matching repeated sub-lane mask.
18155 if (Dst2SrcSubLanes[DstSubLane] < 0)
18158 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
18159 "Unexpected source lane");
18161 // Create a repeating shuffle mask for the entire vector.
18162 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
18163 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
18164 int Lane = SubLane / SubLaneScale;
18165 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
18166 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
18167 int M = RepeatedSubLaneMask[Elt];
18170 int Idx = (SubLane * NumSubLaneElts) + Elt;
18171 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
18175 // Shuffle each source sub-lane to its destination.
18176 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
18177 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
18178 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
18179 if (SrcSubLane < 0)
18181 for (int j = 0; j != NumSubLaneElts; ++j)
18182 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
18185 // Avoid returning the same shuffle operation.
18186 // v8i32 = vector_shuffle<0,1,4,5,2,3,6,7> t5, undef:v8i32
18187 if (RepeatedMask == Mask || SubLaneMask == Mask)
18190 SDValue RepeatedShuffle =
18191 DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
18193 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
18197 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
18198 // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
18199 // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
18200 // Otherwise we can only permute whole 128-bit lanes.
18201 int MinSubLaneScale = 1, MaxSubLaneScale = 1;
18202 if (Subtarget.hasAVX2() && VT.is256BitVector()) {
18203 bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
18204 MinSubLaneScale = 2;
18206 (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
18208 if (Subtarget.hasBWI() && VT == MVT::v64i8)
18209 MinSubLaneScale = MaxSubLaneScale = 4;
18211 for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
18212 if (SDValue Shuffle = ShuffleSubLanes(Scale))
18218 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
18219 bool &ForceV1Zero, bool &ForceV2Zero,
18220 unsigned &ShuffleImm, ArrayRef<int> Mask,
18221 const APInt &Zeroable) {
18222 int NumElts = VT.getVectorNumElements();
18223 assert(VT.getScalarSizeInBits() == 64 &&
18224 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
18225 "Unexpected data type for VSHUFPD");
18226 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
18227 "Illegal shuffle mask");
18229 bool ZeroLane[2] = { true, true };
18230 for (int i = 0; i < NumElts; ++i)
18231 ZeroLane[i & 1] &= Zeroable[i];
18233 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
18234 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
18236 bool ShufpdMask = true;
18237 bool CommutableMask = true;
18238 for (int i = 0; i < NumElts; ++i) {
18239 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
18243 int Val = (i & 6) + NumElts * (i & 1);
18244 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
18245 if (Mask[i] < Val || Mask[i] > Val + 1)
18246 ShufpdMask = false;
18247 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
18248 CommutableMask = false;
18249 ShuffleImm |= (Mask[i] % 2) << i;
18252 if (!ShufpdMask && !CommutableMask)
18255 if (!ShufpdMask && CommutableMask)
18258 ForceV1Zero = ZeroLane[0];
18259 ForceV2Zero = ZeroLane[1];
18263 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
18264 SDValue V2, ArrayRef<int> Mask,
18265 const APInt &Zeroable,
18266 const X86Subtarget &Subtarget,
18267 SelectionDAG &DAG) {
18268 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
18269 "Unexpected data type for VSHUFPD");
18271 unsigned Immediate = 0;
18272 bool ForceV1Zero = false, ForceV2Zero = false;
18273 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
18277 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
18279 V1 = getZeroVector(VT, Subtarget, DAG, DL);
18281 V2 = getZeroVector(VT, Subtarget, DAG, DL);
18283 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
18284 DAG.getTargetConstant(Immediate, DL, MVT::i8));
18287 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
18288 // by zeroable elements in the remaining 24 elements. Turn this into two
18289 // vmovqb instructions shuffled together.
18290 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
18291 SDValue V1, SDValue V2,
18292 ArrayRef<int> Mask,
18293 const APInt &Zeroable,
18294 SelectionDAG &DAG) {
18295 assert(VT == MVT::v32i8 && "Unexpected type!");
18297 // The first 8 indices should be every 8th element.
18298 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
18301 // Remaining elements need to be zeroable.
18302 if (Zeroable.countl_one() < (Mask.size() - 8))
18305 V1 = DAG.getBitcast(MVT::v4i64, V1);
18306 V2 = DAG.getBitcast(MVT::v4i64, V2);
18308 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
18309 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
18311 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
18312 // the upper bits of the result using an unpckldq.
18313 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
18314 { 0, 1, 2, 3, 16, 17, 18, 19,
18315 4, 5, 6, 7, 20, 21, 22, 23 });
18316 // Insert the unpckldq into a zero vector to widen to v32i8.
18317 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
18318 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
18319 DAG.getIntPtrConstant(0, DL));
18322 // a = shuffle v1, v2, mask1 ; interleaving lower lanes of v1 and v2
18323 // b = shuffle v1, v2, mask2 ; interleaving higher lanes of v1 and v2
18325 // ul = unpckl v1, v2
18326 // uh = unpckh v1, v2
18327 // a = vperm ul, uh
18328 // b = vperm ul, uh
18330 // Pattern-match interleave(256b v1, 256b v2) -> 512b v3 and lower it into unpck
18331 // and permute. We cannot directly match v3 because it is split into two
18332 // 256-bit vectors in earlier isel stages. Therefore, this function matches a
18333 // pair of 256-bit shuffles and makes sure the masks are consecutive.
18335 // Once unpck and permute nodes are created, the permute corresponding to this
18336 // shuffle is returned, while the other permute replaces the other half of the
18337 // shuffle in the selection dag.
18338 static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
18339 SDValue V1, SDValue V2,
18340 ArrayRef<int> Mask,
18341 SelectionDAG &DAG) {
18342 if (VT != MVT::v8f32 && VT != MVT::v8i32 && VT != MVT::v16i16 &&
18345 // <B0, B1, B0+1, B1+1, ..., >
18346 auto IsInterleavingPattern = [&](ArrayRef<int> Mask, unsigned Begin0,
18348 size_t Size = Mask.size();
18349 assert(Size % 2 == 0 && "Expected even mask size");
18350 for (unsigned I = 0; I < Size; I += 2) {
18351 if (Mask[I] != (int)(Begin0 + I / 2) ||
18352 Mask[I + 1] != (int)(Begin1 + I / 2))
18357 // Check which half is this shuffle node
18358 int NumElts = VT.getVectorNumElements();
18359 size_t FirstQtr = NumElts / 2;
18360 size_t ThirdQtr = NumElts + NumElts / 2;
18361 bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
18362 bool IsSecondHalf = IsInterleavingPattern(Mask, FirstQtr, ThirdQtr);
18363 if (!IsFirstHalf && !IsSecondHalf)
18366 // Find the intersection between shuffle users of V1 and V2.
18367 SmallVector<SDNode *, 2> Shuffles;
18368 for (SDNode *User : V1->uses())
18369 if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 &&
18370 User->getOperand(1) == V2)
18371 Shuffles.push_back(User);
18372 // Limit user size to two for now.
18373 if (Shuffles.size() != 2)
18375 // Find out which half of the 512-bit shuffles is each smaller shuffle
18376 auto *SVN1 = cast<ShuffleVectorSDNode>(Shuffles[0]);
18377 auto *SVN2 = cast<ShuffleVectorSDNode>(Shuffles[1]);
18379 SDNode *SecondHalf;
18380 if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
18381 IsInterleavingPattern(SVN2->getMask(), FirstQtr, ThirdQtr)) {
18382 FirstHalf = Shuffles[0];
18383 SecondHalf = Shuffles[1];
18384 } else if (IsInterleavingPattern(SVN1->getMask(), FirstQtr, ThirdQtr) &&
18385 IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
18386 FirstHalf = Shuffles[1];
18387 SecondHalf = Shuffles[0];
18391 // Lower into unpck and perm. Return the perm of this shuffle and replace
18393 SDValue Unpckl = DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
18394 SDValue Unpckh = DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
18395 SDValue Perm1 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
18396 DAG.getTargetConstant(0x20, DL, MVT::i8));
18397 SDValue Perm2 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
18398 DAG.getTargetConstant(0x31, DL, MVT::i8));
18400 DAG.ReplaceAllUsesWith(SecondHalf, &Perm2);
18403 DAG.ReplaceAllUsesWith(FirstHalf, &Perm1);
18407 /// Handle lowering of 4-lane 64-bit floating point shuffles.
18409 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
18410 /// isn't available.
18411 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18412 const APInt &Zeroable, SDValue V1, SDValue V2,
18413 const X86Subtarget &Subtarget,
18414 SelectionDAG &DAG) {
18415 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
18416 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
18417 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
18419 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
18423 if (V2.isUndef()) {
18424 // Check for being able to broadcast a single element.
18425 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
18426 Mask, Subtarget, DAG))
18429 // Use low duplicate instructions for masks that match their pattern.
18430 if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
18431 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
18433 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
18434 // Non-half-crossing single input shuffles can be lowered with an
18435 // interleaved permutation.
18436 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
18437 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
18438 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
18439 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
18442 // With AVX2 we have direct support for this permutation.
18443 if (Subtarget.hasAVX2())
18444 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
18445 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
18447 // Try to create an in-lane repeating shuffle mask and then shuffle the
18448 // results into the target lanes.
18449 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18450 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18453 // Try to permute the lanes and then use a per-lane permute.
18454 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
18455 Mask, DAG, Subtarget))
18458 // Otherwise, fall back.
18459 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
18463 // Use dedicated unpack instructions for masks that match their pattern.
18464 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
18467 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
18468 Zeroable, Subtarget, DAG))
18471 // Check if the blend happens to exactly fit that of SHUFPD.
18472 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
18473 Zeroable, Subtarget, DAG))
18476 bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
18477 bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
18479 // If we have lane crossing shuffles AND they don't all come from the lower
18480 // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
18481 // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
18482 // canonicalize to a blend of splat which isn't necessary for this combine.
18483 if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
18484 !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
18485 (V1.getOpcode() != ISD::BUILD_VECTOR) &&
18486 (V2.getOpcode() != ISD::BUILD_VECTOR))
18487 return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
18489 // If we have one input in place, then we can permute the other input and
18490 // blend the result.
18491 if (V1IsInPlace || V2IsInPlace)
18492 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
18495 // Try to create an in-lane repeating shuffle mask and then shuffle the
18496 // results into the target lanes.
18497 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18498 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18501 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18502 // shuffle. However, if we have AVX2 and either inputs are already in place,
18503 // we will be able to shuffle even across lanes the other input in a single
18504 // instruction so skip this pattern.
18505 if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
18506 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
18507 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18510 // If we have VLX support, we can use VEXPAND.
18511 if (Subtarget.hasVLX())
18512 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
18516 // If we have AVX2 then we always want to lower with a blend because an v4 we
18517 // can fully permute the elements.
18518 if (Subtarget.hasAVX2())
18519 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
18522 // Otherwise fall back on generic lowering.
18523 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
18527 /// Handle lowering of 4-lane 64-bit integer shuffles.
18529 /// This routine is only called when we have AVX2 and thus a reasonable
18530 /// instruction set for v4i64 shuffling..
18531 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18532 const APInt &Zeroable, SDValue V1, SDValue V2,
18533 const X86Subtarget &Subtarget,
18534 SelectionDAG &DAG) {
18535 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
18536 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
18537 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
18538 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
18540 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
18544 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
18545 Zeroable, Subtarget, DAG))
18548 // Check for being able to broadcast a single element.
18549 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
18553 // Try to use shift instructions if fast.
18554 if (Subtarget.preferLowerShuffleAsShift())
18555 if (SDValue Shift =
18556 lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
18557 Subtarget, DAG, /*BitwiseOnly*/ true))
18560 if (V2.isUndef()) {
18561 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
18562 // can use lower latency instructions that will operate on both lanes.
18563 SmallVector<int, 2> RepeatedMask;
18564 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
18565 SmallVector<int, 4> PSHUFDMask;
18566 narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
18567 return DAG.getBitcast(
18569 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
18570 DAG.getBitcast(MVT::v8i32, V1),
18571 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
18574 // AVX2 provides a direct instruction for permuting a single input across
18576 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
18577 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
18580 // Try to use shift instructions.
18581 if (SDValue Shift =
18582 lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable, Subtarget,
18583 DAG, /*BitwiseOnly*/ false))
18586 // If we have VLX support, we can use VALIGN or VEXPAND.
18587 if (Subtarget.hasVLX()) {
18588 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
18592 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
18597 // Try to use PALIGNR.
18598 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
18602 // Use dedicated unpack instructions for masks that match their pattern.
18603 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
18606 bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
18607 bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
18609 // If we have one input in place, then we can permute the other input and
18610 // blend the result.
18611 if (V1IsInPlace || V2IsInPlace)
18612 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
18615 // Try to create an in-lane repeating shuffle mask and then shuffle the
18616 // results into the target lanes.
18617 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18618 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
18621 // Try to lower to PERMQ(BLENDD(V1,V2)).
18623 lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
18626 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18627 // shuffle. However, if we have AVX2 and either inputs are already in place,
18628 // we will be able to shuffle even across lanes the other input in a single
18629 // instruction so skip this pattern.
18630 if (!V1IsInPlace && !V2IsInPlace)
18631 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18632 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
18635 // Otherwise fall back on generic blend lowering.
18636 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
18640 /// Handle lowering of 8-lane 32-bit floating point shuffles.
18642 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
18643 /// isn't available.
18644 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18645 const APInt &Zeroable, SDValue V1, SDValue V2,
18646 const X86Subtarget &Subtarget,
18647 SelectionDAG &DAG) {
18648 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
18649 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
18650 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18652 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
18653 Zeroable, Subtarget, DAG))
18656 // Check for being able to broadcast a single element.
18657 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
18661 if (!Subtarget.hasAVX2()) {
18662 SmallVector<int> InLaneMask;
18663 computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
18665 if (!is128BitLaneRepeatedShuffleMask(MVT::v8f32, InLaneMask))
18666 if (SDValue R = splitAndLowerShuffle(DL, MVT::v8f32, V1, V2, Mask, DAG,
18667 /*SimpleOnly*/ true))
18670 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
18671 Zeroable, Subtarget, DAG))
18672 return DAG.getBitcast(MVT::v8f32, ZExt);
18674 // If the shuffle mask is repeated in each 128-bit lane, we have many more
18675 // options to efficiently lower the shuffle.
18676 SmallVector<int, 4> RepeatedMask;
18677 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
18678 assert(RepeatedMask.size() == 4 &&
18679 "Repeated masks must be half the mask width!");
18681 // Use even/odd duplicate instructions for masks that match their pattern.
18682 if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
18683 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
18684 if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
18685 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
18688 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
18689 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18691 // Use dedicated unpack instructions for masks that match their pattern.
18692 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
18695 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
18696 // have already handled any direct blends.
18697 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
18700 // Try to create an in-lane repeating shuffle mask and then shuffle the
18701 // results into the target lanes.
18702 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18703 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18706 // If we have a single input shuffle with different shuffle patterns in the
18707 // two 128-bit lanes use the variable mask to VPERMILPS.
18708 if (V2.isUndef()) {
18709 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
18710 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18711 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
18713 if (Subtarget.hasAVX2()) {
18714 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18715 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
18717 // Otherwise, fall back.
18718 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
18722 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18724 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18725 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18728 // If we have VLX support, we can use VEXPAND.
18729 if (Subtarget.hasVLX())
18730 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
18734 // Try to match an interleave of two v8f32s and lower them as unpck and
18735 // permutes using ymms. This needs to go before we try to split the vectors.
18737 // TODO: Expand this to AVX1. Currently v8i32 is casted to v8f32 and hits
18738 // this path inadvertently.
18739 if (Subtarget.hasAVX2() && !Subtarget.hasAVX512())
18740 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8f32, V1, V2,
18744 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18745 // since after split we get a more efficient code using vpunpcklwd and
18746 // vpunpckhwd instrs than vblend.
18747 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
18748 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
18751 // If we have AVX2 then we always want to lower with a blend because at v8 we
18752 // can fully permute the elements.
18753 if (Subtarget.hasAVX2())
18754 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
18757 // Otherwise fall back on generic lowering.
18758 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
18762 /// Handle lowering of 8-lane 32-bit integer shuffles.
18764 /// This routine is only called when we have AVX2 and thus a reasonable
18765 /// instruction set for v8i32 shuffling..
18766 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18767 const APInt &Zeroable, SDValue V1, SDValue V2,
18768 const X86Subtarget &Subtarget,
18769 SelectionDAG &DAG) {
18770 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18771 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18772 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18773 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
18775 int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
18777 // Whenever we can lower this as a zext, that instruction is strictly faster
18778 // than any alternative. It also allows us to fold memory operands into the
18779 // shuffle in many cases.
18780 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
18781 Zeroable, Subtarget, DAG))
18784 // Try to match an interleave of two v8i32s and lower them as unpck and
18785 // permutes using ymms. This needs to go before we try to split the vectors.
18786 if (!Subtarget.hasAVX512())
18787 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8i32, V1, V2,
18791 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18792 // since after split we get a more efficient code than vblend by using
18793 // vpunpcklwd and vpunpckhwd instrs.
18794 if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
18795 !Subtarget.hasAVX512())
18796 return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
18799 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
18800 Zeroable, Subtarget, DAG))
18803 // Check for being able to broadcast a single element.
18804 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
18808 // Try to use shift instructions if fast.
18809 if (Subtarget.preferLowerShuffleAsShift()) {
18810 if (SDValue Shift =
18811 lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable,
18812 Subtarget, DAG, /*BitwiseOnly*/ true))
18814 if (NumV2Elements == 0)
18815 if (SDValue Rotate =
18816 lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
18820 // If the shuffle mask is repeated in each 128-bit lane we can use more
18821 // efficient instructions that mirror the shuffles across the two 128-bit
18823 SmallVector<int, 4> RepeatedMask;
18824 bool Is128BitLaneRepeatedShuffle =
18825 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
18826 if (Is128BitLaneRepeatedShuffle) {
18827 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18829 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
18830 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18832 // Use dedicated unpack instructions for masks that match their pattern.
18833 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
18837 // Try to use shift instructions.
18838 if (SDValue Shift =
18839 lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget,
18840 DAG, /*BitwiseOnly*/ false))
18843 if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements == 0)
18844 if (SDValue Rotate =
18845 lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
18848 // If we have VLX support, we can use VALIGN or EXPAND.
18849 if (Subtarget.hasVLX()) {
18850 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
18854 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
18859 // Try to use byte rotation instructions.
18860 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
18864 // Try to create an in-lane repeating shuffle mask and then shuffle the
18865 // results into the target lanes.
18866 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18867 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18870 if (V2.isUndef()) {
18871 // Try to produce a fixed cross-128-bit lane permute followed by unpack
18872 // because that should be faster than the variable permute alternatives.
18873 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
18876 // If the shuffle patterns aren't repeated but it's a single input, directly
18877 // generate a cross-lane VPERMD instruction.
18878 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18879 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
18882 // Assume that a single SHUFPS is faster than an alternative sequence of
18883 // multiple instructions (even if the CPU has a domain penalty).
18884 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
18885 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
18886 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
18887 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
18888 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
18889 CastV1, CastV2, DAG);
18890 return DAG.getBitcast(MVT::v8i32, ShufPS);
18893 // Try to simplify this by merging 128-bit lanes to enable a lane-based
18895 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18896 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18899 // Otherwise fall back on generic blend lowering.
18900 return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
18904 /// Handle lowering of 16-lane 16-bit integer shuffles.
18906 /// This routine is only called when we have AVX2 and thus a reasonable
18907 /// instruction set for v16i16 shuffling..
18908 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18909 const APInt &Zeroable, SDValue V1, SDValue V2,
18910 const X86Subtarget &Subtarget,
18911 SelectionDAG &DAG) {
18912 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18913 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18914 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18915 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
18917 // Whenever we can lower this as a zext, that instruction is strictly faster
18918 // than any alternative. It also allows us to fold memory operands into the
18919 // shuffle in many cases.
18920 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18921 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
18924 // Check for being able to broadcast a single element.
18925 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
18929 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
18930 Zeroable, Subtarget, DAG))
18933 // Use dedicated unpack instructions for masks that match their pattern.
18934 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
18937 // Use dedicated pack instructions for masks that match their pattern.
18938 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
18942 // Try to use lower using a truncation.
18943 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
18947 // Try to use shift instructions.
18948 if (SDValue Shift =
18949 lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
18950 Subtarget, DAG, /*BitwiseOnly*/ false))
18953 // Try to use byte rotation instructions.
18954 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
18958 // Try to create an in-lane repeating shuffle mask and then shuffle the
18959 // results into the target lanes.
18960 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18961 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18964 if (V2.isUndef()) {
18965 // Try to use bit rotation instructions.
18966 if (SDValue Rotate =
18967 lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
18970 // Try to produce a fixed cross-128-bit lane permute followed by unpack
18971 // because that should be faster than the variable permute alternatives.
18972 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
18975 // There are no generalized cross-lane shuffle operations available on i16
18977 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
18978 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18979 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18982 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
18986 SmallVector<int, 8> RepeatedMask;
18987 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
18988 // As this is a single-input shuffle, the repeated mask should be
18989 // a strictly valid v8i16 mask that we can pass through to the v8i16
18990 // lowering to handle even the v16 case.
18991 return lowerV8I16GeneralSingleInputShuffle(
18992 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
18996 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
18997 Zeroable, Subtarget, DAG))
19000 // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
19001 if (Subtarget.hasBWI())
19002 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
19004 // Try to simplify this by merging 128-bit lanes to enable a lane-based
19006 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
19007 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
19010 // Try to permute the lanes and then use a per-lane permute.
19011 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
19012 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
19015 // Try to match an interleave of two v16i16s and lower them as unpck and
19016 // permutes using ymms.
19017 if (!Subtarget.hasAVX512())
19018 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v16i16, V1, V2,
19022 // Otherwise fall back on generic lowering.
19023 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
19027 /// Handle lowering of 32-lane 8-bit integer shuffles.
19029 /// This routine is only called when we have AVX2 and thus a reasonable
19030 /// instruction set for v32i8 shuffling..
19031 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19032 const APInt &Zeroable, SDValue V1, SDValue V2,
19033 const X86Subtarget &Subtarget,
19034 SelectionDAG &DAG) {
19035 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
19036 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
19037 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
19038 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
19040 // Whenever we can lower this as a zext, that instruction is strictly faster
19041 // than any alternative. It also allows us to fold memory operands into the
19042 // shuffle in many cases.
19043 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
19044 Zeroable, Subtarget, DAG))
19047 // Check for being able to broadcast a single element.
19048 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
19052 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
19053 Zeroable, Subtarget, DAG))
19056 // Use dedicated unpack instructions for masks that match their pattern.
19057 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
19060 // Use dedicated pack instructions for masks that match their pattern.
19061 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
19065 // Try to use lower using a truncation.
19066 if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
19070 // Try to use shift instructions.
19071 if (SDValue Shift =
19072 lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget,
19073 DAG, /*BitwiseOnly*/ false))
19076 // Try to use byte rotation instructions.
19077 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
19081 // Try to use bit rotation instructions.
19083 if (SDValue Rotate =
19084 lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
19087 // Try to create an in-lane repeating shuffle mask and then shuffle the
19088 // results into the target lanes.
19089 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19090 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
19093 // There are no generalized cross-lane shuffle operations available on i8
19095 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
19096 // Try to produce a fixed cross-128-bit lane permute followed by unpack
19097 // because that should be faster than the variable permute alternatives.
19098 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
19101 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
19102 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
19105 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
19109 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
19110 Zeroable, Subtarget, DAG))
19113 // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
19114 if (Subtarget.hasVBMI())
19115 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
19117 // Try to simplify this by merging 128-bit lanes to enable a lane-based
19119 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
19120 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
19123 // Try to permute the lanes and then use a per-lane permute.
19124 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
19125 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
19128 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
19129 // by zeroable elements in the remaining 24 elements. Turn this into two
19130 // vmovqb instructions shuffled together.
19131 if (Subtarget.hasVLX())
19132 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
19133 Mask, Zeroable, DAG))
19136 // Try to match an interleave of two v32i8s and lower them as unpck and
19137 // permutes using ymms.
19138 if (!Subtarget.hasAVX512())
19139 if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v32i8, V1, V2,
19143 // Otherwise fall back on generic lowering.
19144 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
19148 /// High-level routine to lower various 256-bit x86 vector shuffles.
19150 /// This routine either breaks down the specific type of a 256-bit x86 vector
19151 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
19152 /// together based on the available instructions.
19153 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
19154 SDValue V1, SDValue V2, const APInt &Zeroable,
19155 const X86Subtarget &Subtarget,
19156 SelectionDAG &DAG) {
19157 // If we have a single input to the zero element, insert that into V1 if we
19158 // can do so cheaply.
19159 int NumElts = VT.getVectorNumElements();
19160 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
19162 if (NumV2Elements == 1 && Mask[0] >= NumElts)
19163 if (SDValue Insertion = lowerShuffleAsElementInsertion(
19164 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
19167 // Handle special cases where the lower or upper half is UNDEF.
19169 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
19172 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
19173 // can check for those subtargets here and avoid much of the subtarget
19174 // querying in the per-vector-type lowering routines. With AVX1 we have
19175 // essentially *zero* ability to manipulate a 256-bit vector with integer
19176 // types. Since we'll use floating point types there eventually, just
19177 // immediately cast everything to a float and operate entirely in that domain.
19178 if (VT.isInteger() && !Subtarget.hasAVX2()) {
19179 int ElementBits = VT.getScalarSizeInBits();
19180 if (ElementBits < 32) {
19181 // No floating point type available, if we can't use the bit operations
19182 // for masking/blending then decompose into 128-bit vectors.
19183 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
19186 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
19188 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
19191 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
19192 VT.getVectorNumElements());
19193 V1 = DAG.getBitcast(FpVT, V1);
19194 V2 = DAG.getBitcast(FpVT, V2);
19195 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
19198 if (VT == MVT::v16f16 || VT == MVT::v16bf16) {
19199 V1 = DAG.getBitcast(MVT::v16i16, V1);
19200 V2 = DAG.getBitcast(MVT::v16i16, V2);
19201 return DAG.getBitcast(VT,
19202 DAG.getVectorShuffle(MVT::v16i16, DL, V1, V2, Mask));
19205 switch (VT.SimpleTy) {
19207 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19209 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19211 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19213 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19215 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19217 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19220 llvm_unreachable("Not a valid 256-bit x86 vector type!");
19224 /// Try to lower a vector shuffle as a 128-bit shuffles.
19225 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
19226 const APInt &Zeroable, SDValue V1, SDValue V2,
19227 const X86Subtarget &Subtarget,
19228 SelectionDAG &DAG) {
19229 assert(VT.getScalarSizeInBits() == 64 &&
19230 "Unexpected element type size for 128bit shuffle.");
19232 // To handle 256 bit vector requires VLX and most probably
19233 // function lowerV2X128VectorShuffle() is better solution.
19234 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
19236 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
19237 SmallVector<int, 4> Widened128Mask;
19238 if (!canWidenShuffleElements(Mask, Widened128Mask))
19240 assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
19242 // Try to use an insert into a zero vector.
19243 if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
19244 (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
19245 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
19246 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
19247 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
19248 DAG.getIntPtrConstant(0, DL));
19249 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
19250 getZeroVector(VT, Subtarget, DAG, DL), LoV,
19251 DAG.getIntPtrConstant(0, DL));
19254 // Check for patterns which can be matched with a single insert of a 256-bit
19256 bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
19258 isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
19259 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
19261 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
19262 DAG.getIntPtrConstant(0, DL));
19263 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
19264 DAG.getIntPtrConstant(4, DL));
19267 // See if this is an insertion of the lower 128-bits of V2 into V1.
19268 bool IsInsert = true;
19270 for (int i = 0; i < 4; ++i) {
19271 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
19272 if (Widened128Mask[i] < 0)
19275 // Make sure all V1 subvectors are in place.
19276 if (Widened128Mask[i] < 4) {
19277 if (Widened128Mask[i] != i) {
19282 // Make sure we only have a single V2 index and its the lowest 128-bits.
19283 if (V2Index >= 0 || Widened128Mask[i] != 4) {
19290 if (IsInsert && V2Index >= 0) {
19291 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
19292 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
19293 DAG.getIntPtrConstant(0, DL));
19294 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
19297 // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
19298 // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
19299 // possible we at least ensure the lanes stay sequential to help later
19301 SmallVector<int, 2> Widened256Mask;
19302 if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
19303 Widened128Mask.clear();
19304 narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
19307 // Try to lower to vshuf64x2/vshuf32x4.
19308 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
19309 unsigned PermMask = 0;
19310 // Insure elements came from the same Op.
19311 for (int i = 0; i < 4; ++i) {
19312 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
19313 if (Widened128Mask[i] < 0)
19316 SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
19317 unsigned OpIndex = i / 2;
19318 if (Ops[OpIndex].isUndef())
19320 else if (Ops[OpIndex] != Op)
19323 // Convert the 128-bit shuffle mask selection values into 128-bit selection
19324 // bits defined by a vshuf64x2 instruction's immediate control byte.
19325 PermMask |= (Widened128Mask[i] % 4) << (i * 2);
19328 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
19329 DAG.getTargetConstant(PermMask, DL, MVT::i8));
19332 /// Handle lowering of 8-lane 64-bit floating point shuffles.
19333 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19334 const APInt &Zeroable, SDValue V1, SDValue V2,
19335 const X86Subtarget &Subtarget,
19336 SelectionDAG &DAG) {
19337 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
19338 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
19339 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
19341 if (V2.isUndef()) {
19342 // Use low duplicate instructions for masks that match their pattern.
19343 if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
19344 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
19346 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
19347 // Non-half-crossing single input shuffles can be lowered with an
19348 // interleaved permutation.
19349 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
19350 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
19351 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
19352 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
19353 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
19354 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
19357 SmallVector<int, 4> RepeatedMask;
19358 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
19359 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
19360 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
19363 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
19364 V2, Subtarget, DAG))
19367 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
19370 // Check if the blend happens to exactly fit that of SHUFPD.
19371 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
19372 Zeroable, Subtarget, DAG))
19375 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
19379 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
19380 Zeroable, Subtarget, DAG))
19383 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
19386 /// Handle lowering of 16-lane 32-bit floating point shuffles.
19387 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19388 const APInt &Zeroable, SDValue V1, SDValue V2,
19389 const X86Subtarget &Subtarget,
19390 SelectionDAG &DAG) {
19391 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
19392 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
19393 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
19395 // If the shuffle mask is repeated in each 128-bit lane, we have many more
19396 // options to efficiently lower the shuffle.
19397 SmallVector<int, 4> RepeatedMask;
19398 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
19399 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
19401 // Use even/odd duplicate instructions for masks that match their pattern.
19402 if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
19403 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
19404 if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
19405 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
19408 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
19409 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
19411 // Use dedicated unpack instructions for masks that match their pattern.
19412 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
19415 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
19416 Zeroable, Subtarget, DAG))
19419 // Otherwise, fall back to a SHUFPS sequence.
19420 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
19423 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
19424 Zeroable, Subtarget, DAG))
19427 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19428 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
19429 return DAG.getBitcast(MVT::v16f32, ZExt);
19431 // Try to create an in-lane repeating shuffle mask and then shuffle the
19432 // results into the target lanes.
19433 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19434 DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
19437 // If we have a single input shuffle with different shuffle patterns in the
19438 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
19439 if (V2.isUndef() &&
19440 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
19441 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
19442 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
19445 // If we have AVX512F support, we can use VEXPAND.
19446 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
19447 V1, V2, DAG, Subtarget))
19450 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
19453 /// Handle lowering of 8-lane 64-bit integer shuffles.
19454 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19455 const APInt &Zeroable, SDValue V1, SDValue V2,
19456 const X86Subtarget &Subtarget,
19457 SelectionDAG &DAG) {
19458 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
19459 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
19460 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
19462 // Try to use shift instructions if fast.
19463 if (Subtarget.preferLowerShuffleAsShift())
19464 if (SDValue Shift =
19465 lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable,
19466 Subtarget, DAG, /*BitwiseOnly*/ true))
19469 if (V2.isUndef()) {
19470 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
19471 // can use lower latency instructions that will operate on all four
19473 SmallVector<int, 2> Repeated128Mask;
19474 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
19475 SmallVector<int, 4> PSHUFDMask;
19476 narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
19477 return DAG.getBitcast(
19479 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
19480 DAG.getBitcast(MVT::v16i32, V1),
19481 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
19484 SmallVector<int, 4> Repeated256Mask;
19485 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
19486 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
19487 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
19490 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
19491 V2, Subtarget, DAG))
19494 // Try to use shift instructions.
19495 if (SDValue Shift =
19496 lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable, Subtarget,
19497 DAG, /*BitwiseOnly*/ false))
19500 // Try to use VALIGN.
19501 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
19505 // Try to use PALIGNR.
19506 if (Subtarget.hasBWI())
19507 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
19511 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
19514 // If we have AVX512F support, we can use VEXPAND.
19515 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
19519 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
19520 Zeroable, Subtarget, DAG))
19523 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
19526 /// Handle lowering of 16-lane 32-bit integer shuffles.
19527 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19528 const APInt &Zeroable, SDValue V1, SDValue V2,
19529 const X86Subtarget &Subtarget,
19530 SelectionDAG &DAG) {
19531 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
19532 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
19533 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
19535 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
19537 // Whenever we can lower this as a zext, that instruction is strictly faster
19538 // than any alternative. It also allows us to fold memory operands into the
19539 // shuffle in many cases.
19540 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19541 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
19544 // Try to use shift instructions if fast.
19545 if (Subtarget.preferLowerShuffleAsShift()) {
19546 if (SDValue Shift =
19547 lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
19548 Subtarget, DAG, /*BitwiseOnly*/ true))
19550 if (NumV2Elements == 0)
19551 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask,
19556 // If the shuffle mask is repeated in each 128-bit lane we can use more
19557 // efficient instructions that mirror the shuffles across the four 128-bit
19559 SmallVector<int, 4> RepeatedMask;
19560 bool Is128BitLaneRepeatedShuffle =
19561 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
19562 if (Is128BitLaneRepeatedShuffle) {
19563 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
19565 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
19566 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
19568 // Use dedicated unpack instructions for masks that match their pattern.
19569 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
19573 // Try to use shift instructions.
19574 if (SDValue Shift =
19575 lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
19576 Subtarget, DAG, /*BitwiseOnly*/ false))
19579 if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements != 0)
19580 if (SDValue Rotate =
19581 lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask, Subtarget, DAG))
19584 // Try to use VALIGN.
19585 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
19589 // Try to use byte rotation instructions.
19590 if (Subtarget.hasBWI())
19591 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
19595 // Assume that a single SHUFPS is faster than using a permv shuffle.
19596 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
19597 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
19598 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
19599 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
19600 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
19601 CastV1, CastV2, DAG);
19602 return DAG.getBitcast(MVT::v16i32, ShufPS);
19605 // Try to create an in-lane repeating shuffle mask and then shuffle the
19606 // results into the target lanes.
19607 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19608 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
19611 // If we have AVX512F support, we can use VEXPAND.
19612 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
19616 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
19617 Zeroable, Subtarget, DAG))
19620 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
19623 /// Handle lowering of 32-lane 16-bit integer shuffles.
19624 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19625 const APInt &Zeroable, SDValue V1, SDValue V2,
19626 const X86Subtarget &Subtarget,
19627 SelectionDAG &DAG) {
19628 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
19629 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
19630 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
19631 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
19633 // Whenever we can lower this as a zext, that instruction is strictly faster
19634 // than any alternative. It also allows us to fold memory operands into the
19635 // shuffle in many cases.
19636 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19637 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
19640 // Use dedicated unpack instructions for masks that match their pattern.
19641 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
19644 // Use dedicated pack instructions for masks that match their pattern.
19646 lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
19649 // Try to use shift instructions.
19650 if (SDValue Shift =
19651 lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask, Zeroable,
19652 Subtarget, DAG, /*BitwiseOnly*/ false))
19655 // Try to use byte rotation instructions.
19656 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
19660 if (V2.isUndef()) {
19661 // Try to use bit rotation instructions.
19662 if (SDValue Rotate =
19663 lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
19666 SmallVector<int, 8> RepeatedMask;
19667 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
19668 // As this is a single-input shuffle, the repeated mask should be
19669 // a strictly valid v8i16 mask that we can pass through to the v8i16
19670 // lowering to handle even the v32 case.
19671 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
19672 RepeatedMask, Subtarget, DAG);
19676 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
19677 Zeroable, Subtarget, DAG))
19680 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
19681 Zeroable, Subtarget, DAG))
19684 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
19687 /// Handle lowering of 64-lane 8-bit integer shuffles.
19688 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19689 const APInt &Zeroable, SDValue V1, SDValue V2,
19690 const X86Subtarget &Subtarget,
19691 SelectionDAG &DAG) {
19692 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
19693 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
19694 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
19695 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
19697 // Whenever we can lower this as a zext, that instruction is strictly faster
19698 // than any alternative. It also allows us to fold memory operands into the
19699 // shuffle in many cases.
19700 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19701 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
19704 // Use dedicated unpack instructions for masks that match their pattern.
19705 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
19708 // Use dedicated pack instructions for masks that match their pattern.
19709 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
19713 // Try to use shift instructions.
19714 if (SDValue Shift =
19715 lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget,
19716 DAG, /*BitwiseOnly*/ false))
19719 // Try to use byte rotation instructions.
19720 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
19724 // Try to use bit rotation instructions.
19726 if (SDValue Rotate =
19727 lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
19730 // Lower as AND if possible.
19731 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
19732 Zeroable, Subtarget, DAG))
19735 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
19736 Zeroable, Subtarget, DAG))
19739 // Try to create an in-lane repeating shuffle mask and then shuffle the
19740 // results into the target lanes.
19741 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19742 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
19745 if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
19746 DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
19749 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
19750 Zeroable, Subtarget, DAG))
19753 if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
19754 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
19755 // PALIGNR will be cheaper than the second PSHUFB+OR.
19756 if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
19757 Mask, Subtarget, DAG))
19760 // If we can't directly blend but can use PSHUFB, that will be better as it
19761 // can both shuffle and set up the inefficient blend.
19762 bool V1InUse, V2InUse;
19763 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
19764 DAG, V1InUse, V2InUse);
19767 // Try to simplify this by merging 128-bit lanes to enable a lane-based
19770 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
19771 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
19774 // VBMI can use VPERMV/VPERMV3 byte shuffles.
19775 if (Subtarget.hasVBMI())
19776 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
19778 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
19781 /// High-level routine to lower various 512-bit x86 vector shuffles.
19783 /// This routine either breaks down the specific type of a 512-bit x86 vector
19784 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
19785 /// together based on the available instructions.
19786 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19787 MVT VT, SDValue V1, SDValue V2,
19788 const APInt &Zeroable,
19789 const X86Subtarget &Subtarget,
19790 SelectionDAG &DAG) {
19791 assert(Subtarget.hasAVX512() &&
19792 "Cannot lower 512-bit vectors w/ basic ISA!");
19794 // If we have a single input to the zero element, insert that into V1 if we
19795 // can do so cheaply.
19796 int NumElts = Mask.size();
19797 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
19799 if (NumV2Elements == 1 && Mask[0] >= NumElts)
19800 if (SDValue Insertion = lowerShuffleAsElementInsertion(
19801 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
19804 // Handle special cases where the lower or upper half is UNDEF.
19806 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
19809 // Check for being able to broadcast a single element.
19810 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
19814 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
19815 // Try using bit ops for masking and blending before falling back to
19817 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
19820 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
19823 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
19826 if (VT == MVT::v32f16) {
19827 if (!Subtarget.hasBWI())
19828 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
19829 /*SimpleOnly*/ false);
19831 V1 = DAG.getBitcast(MVT::v32i16, V1);
19832 V2 = DAG.getBitcast(MVT::v32i16, V2);
19833 return DAG.getBitcast(MVT::v32f16,
19834 DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
19837 // Dispatch to each element type for lowering. If we don't have support for
19838 // specific element type shuffles at 512 bits, immediately split them and
19839 // lower them. Each lowering routine of a given type is allowed to assume that
19840 // the requisite ISA extensions for that element type are available.
19841 switch (VT.SimpleTy) {
19843 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19845 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19847 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19849 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19851 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19853 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19856 llvm_unreachable("Not a valid 512-bit x86 vector type!");
19860 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
19861 MVT VT, SDValue V1, SDValue V2,
19862 const X86Subtarget &Subtarget,
19863 SelectionDAG &DAG) {
19864 // Shuffle should be unary.
19869 int NumElts = Mask.size();
19870 for (int i = 0; i != NumElts; ++i) {
19872 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
19873 "Unexpected mask index.");
19877 // The first non-undef element determines our shift amount.
19878 if (ShiftAmt < 0) {
19880 // Need to be shifting right.
19884 // All non-undef elements must shift by the same amount.
19885 if (ShiftAmt != M - i)
19888 assert(ShiftAmt >= 0 && "All undef?");
19890 // Great we found a shift right.
19892 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19893 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19894 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19895 DAG.getUNDEF(WideVT), V1,
19896 DAG.getIntPtrConstant(0, DL));
19897 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
19898 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19899 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19900 DAG.getIntPtrConstant(0, DL));
19903 // Determine if this shuffle can be implemented with a KSHIFT instruction.
19904 // Returns the shift amount if possible or -1 if not. This is a simplified
19905 // version of matchShuffleAsShift.
19906 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
19907 int MaskOffset, const APInt &Zeroable) {
19908 int Size = Mask.size();
19910 auto CheckZeros = [&](int Shift, bool Left) {
19911 for (int j = 0; j < Shift; ++j)
19912 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
19918 auto MatchShift = [&](int Shift, bool Left) {
19919 unsigned Pos = Left ? Shift : 0;
19920 unsigned Low = Left ? 0 : Shift;
19921 unsigned Len = Size - Shift;
19922 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
19925 for (int Shift = 1; Shift != Size; ++Shift)
19926 for (bool Left : {true, false})
19927 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
19928 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
19936 // Lower vXi1 vector shuffles.
19937 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
19938 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
19939 // vector, shuffle and then truncate it back.
19940 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19941 MVT VT, SDValue V1, SDValue V2,
19942 const APInt &Zeroable,
19943 const X86Subtarget &Subtarget,
19944 SelectionDAG &DAG) {
19945 assert(Subtarget.hasAVX512() &&
19946 "Cannot lower 512-bit vectors w/o basic ISA!");
19948 int NumElts = Mask.size();
19950 // Try to recognize shuffles that are just padding a subvector with zeros.
19951 int SubvecElts = 0;
19953 for (int i = 0; i != NumElts; ++i) {
19954 if (Mask[i] >= 0) {
19955 // Grab the source from the first valid mask. All subsequent elements need
19956 // to use this same source.
19958 Src = Mask[i] / NumElts;
19959 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
19965 assert(SubvecElts != NumElts && "Identity shuffle?");
19967 // Clip to a power 2.
19968 SubvecElts = llvm::bit_floor<uint32_t>(SubvecElts);
19970 // Make sure the number of zeroable bits in the top at least covers the bits
19971 // not covered by the subvector.
19972 if ((int)Zeroable.countl_one() >= (NumElts - SubvecElts)) {
19973 assert(Src >= 0 && "Expected a source!");
19974 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
19975 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
19976 Src == 0 ? V1 : V2,
19977 DAG.getIntPtrConstant(0, DL));
19978 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
19979 DAG.getConstant(0, DL, VT),
19980 Extract, DAG.getIntPtrConstant(0, DL));
19983 // Try a simple shift right with undef elements. Later we'll try with zeros.
19984 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
19988 // Try to match KSHIFTs.
19989 unsigned Offset = 0;
19990 for (SDValue V : { V1, V2 }) {
19992 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
19993 if (ShiftAmt >= 0) {
19995 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19996 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19997 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19998 DAG.getUNDEF(WideVT), V,
19999 DAG.getIntPtrConstant(0, DL));
20000 // Widened right shifts need two shifts to ensure we shift in zeroes.
20001 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
20002 int WideElts = WideVT.getVectorNumElements();
20003 // Shift left to put the original vector in the MSBs of the new size.
20004 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
20005 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
20006 // Increase the shift amount to account for the left shift.
20007 ShiftAmt += WideElts - NumElts;
20010 Res = DAG.getNode(Opcode, DL, WideVT, Res,
20011 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
20012 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
20013 DAG.getIntPtrConstant(0, DL));
20015 Offset += NumElts; // Increment for next iteration.
20018 // If we're broadcasting a SETCC result, try to broadcast the ops instead.
20019 // TODO: What other unary shuffles would benefit from this?
20020 if (isBroadcastShuffleMask(Mask) && V1.getOpcode() == ISD::SETCC &&
20022 SDValue Op0 = V1.getOperand(0);
20023 SDValue Op1 = V1.getOperand(1);
20024 ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
20025 EVT OpVT = Op0.getValueType();
20026 return DAG.getSetCC(
20027 DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
20028 DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
20032 switch (VT.SimpleTy) {
20034 llvm_unreachable("Expected a vector of i1 elements");
20036 ExtVT = MVT::v2i64;
20039 ExtVT = MVT::v4i32;
20042 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
20044 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
20047 // Take 512-bit type, unless we are avoiding 512-bit types and have the
20048 // 256-bit operation available.
20049 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
20052 // Take 512-bit type, unless we are avoiding 512-bit types and have the
20053 // 256-bit operation available.
20054 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
20055 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
20058 // Fall back to scalarization. FIXME: We can do better if the shuffle
20059 // can be partitioned cleanly.
20060 if (!Subtarget.useBWIRegs())
20062 ExtVT = MVT::v64i8;
20066 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
20067 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
20069 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
20070 // i1 was sign extended we can use X86ISD::CVT2MASK.
20071 int NumElems = VT.getVectorNumElements();
20072 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
20073 (Subtarget.hasDQI() && (NumElems < 32)))
20074 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
20075 Shuffle, ISD::SETGT);
20077 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
20080 /// Helper function that returns true if the shuffle mask should be
20081 /// commuted to improve canonicalization.
20082 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
20083 int NumElements = Mask.size();
20085 int NumV1Elements = 0, NumV2Elements = 0;
20089 else if (M < NumElements)
20094 // Commute the shuffle as needed such that more elements come from V1 than
20095 // V2. This allows us to match the shuffle pattern strictly on how many
20096 // elements come from V1 without handling the symmetric cases.
20097 if (NumV2Elements > NumV1Elements)
20100 assert(NumV1Elements > 0 && "No V1 indices");
20102 if (NumV2Elements == 0)
20105 // When the number of V1 and V2 elements are the same, try to minimize the
20106 // number of uses of V2 in the low half of the vector. When that is tied,
20107 // ensure that the sum of indices for V1 is equal to or lower than the sum
20108 // indices for V2. When those are equal, try to ensure that the number of odd
20109 // indices for V1 is lower than the number of odd indices for V2.
20110 if (NumV1Elements == NumV2Elements) {
20111 int LowV1Elements = 0, LowV2Elements = 0;
20112 for (int M : Mask.slice(0, NumElements / 2))
20113 if (M >= NumElements)
20117 if (LowV2Elements > LowV1Elements)
20119 if (LowV2Elements == LowV1Elements) {
20120 int SumV1Indices = 0, SumV2Indices = 0;
20121 for (int i = 0, Size = Mask.size(); i < Size; ++i)
20122 if (Mask[i] >= NumElements)
20124 else if (Mask[i] >= 0)
20126 if (SumV2Indices < SumV1Indices)
20128 if (SumV2Indices == SumV1Indices) {
20129 int NumV1OddIndices = 0, NumV2OddIndices = 0;
20130 for (int i = 0, Size = Mask.size(); i < Size; ++i)
20131 if (Mask[i] >= NumElements)
20132 NumV2OddIndices += i % 2;
20133 else if (Mask[i] >= 0)
20134 NumV1OddIndices += i % 2;
20135 if (NumV2OddIndices < NumV1OddIndices)
20144 static bool canCombineAsMaskOperation(SDValue V,
20145 const X86Subtarget &Subtarget) {
20146 if (!Subtarget.hasAVX512())
20149 if (!V.getValueType().isSimple())
20152 MVT VT = V.getSimpleValueType().getScalarType();
20153 if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
20156 // If vec width < 512, widen i8/i16 even with BWI as blendd/blendps/blendpd
20157 // are preferable to blendw/blendvb/masked-mov.
20158 if ((VT == MVT::i16 || VT == MVT::i8) &&
20159 V.getSimpleValueType().getSizeInBits() < 512)
20162 auto HasMaskOperation = [&](SDValue V) {
20163 // TODO: Currently we only check limited opcode. We probably extend
20164 // it to all binary operation by checking TLI.isBinOp().
20165 switch (V->getOpcode()) {
20184 if (!V->hasOneUse())
20190 if (HasMaskOperation(V))
20196 // Forward declaration.
20197 static SDValue canonicalizeShuffleMaskWithHorizOp(
20198 MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
20199 unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
20200 const X86Subtarget &Subtarget);
20202 /// Top-level lowering for x86 vector shuffles.
20204 /// This handles decomposition, canonicalization, and lowering of all x86
20205 /// vector shuffles. Most of the specific lowering strategies are encapsulated
20206 /// above in helper routines. The canonicalization attempts to widen shuffles
20207 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
20208 /// s.t. only one of the two inputs needs to be tested, etc.
20209 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
20210 SelectionDAG &DAG) {
20211 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
20212 ArrayRef<int> OrigMask = SVOp->getMask();
20213 SDValue V1 = Op.getOperand(0);
20214 SDValue V2 = Op.getOperand(1);
20215 MVT VT = Op.getSimpleValueType();
20216 int NumElements = VT.getVectorNumElements();
20218 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
20220 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
20221 "Can't lower MMX shuffles");
20223 bool V1IsUndef = V1.isUndef();
20224 bool V2IsUndef = V2.isUndef();
20225 if (V1IsUndef && V2IsUndef)
20226 return DAG.getUNDEF(VT);
20228 // When we create a shuffle node we put the UNDEF node to second operand,
20229 // but in some cases the first operand may be transformed to UNDEF.
20230 // In this case we should just commute the node.
20232 return DAG.getCommutedVectorShuffle(*SVOp);
20234 // Check for non-undef masks pointing at an undef vector and make the masks
20235 // undef as well. This makes it easier to match the shuffle based solely on
20238 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
20239 SmallVector<int, 8> NewMask(OrigMask);
20240 for (int &M : NewMask)
20241 if (M >= NumElements)
20243 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
20246 // Check for illegal shuffle mask element index values.
20247 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
20248 (void)MaskUpperLimit;
20249 assert(llvm::all_of(OrigMask,
20250 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
20251 "Out of bounds shuffle index");
20253 // We actually see shuffles that are entirely re-arrangements of a set of
20254 // zero inputs. This mostly happens while decomposing complex shuffles into
20255 // simple ones. Directly lower these as a buildvector of zeros.
20256 APInt KnownUndef, KnownZero;
20257 computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
20259 APInt Zeroable = KnownUndef | KnownZero;
20260 if (Zeroable.isAllOnes())
20261 return getZeroVector(VT, Subtarget, DAG, DL);
20263 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
20265 // Try to collapse shuffles into using a vector type with fewer elements but
20266 // wider element types. We cap this to not form integers or floating point
20267 // elements wider than 64 bits. It does not seem beneficial to form i128
20268 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
20269 SmallVector<int, 16> WidenedMask;
20270 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
20271 !canCombineAsMaskOperation(V1, Subtarget) &&
20272 !canCombineAsMaskOperation(V2, Subtarget) &&
20273 canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
20274 // Shuffle mask widening should not interfere with a broadcast opportunity
20275 // by obfuscating the operands with bitcasts.
20276 // TODO: Avoid lowering directly from this top-level function: make this
20277 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
20278 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
20282 MVT NewEltVT = VT.isFloatingPoint()
20283 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
20284 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
20285 int NewNumElts = NumElements / 2;
20286 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
20287 // Make sure that the new vector type is legal. For example, v2f64 isn't
20289 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
20291 // Modify the new Mask to take all zeros from the all-zero vector.
20292 // Choose indices that are blend-friendly.
20293 bool UsedZeroVector = false;
20294 assert(is_contained(WidenedMask, SM_SentinelZero) &&
20295 "V2's non-undef elements are used?!");
20296 for (int i = 0; i != NewNumElts; ++i)
20297 if (WidenedMask[i] == SM_SentinelZero) {
20298 WidenedMask[i] = i + NewNumElts;
20299 UsedZeroVector = true;
20301 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
20302 // some elements to be undef.
20303 if (UsedZeroVector)
20304 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
20306 V1 = DAG.getBitcast(NewVT, V1);
20307 V2 = DAG.getBitcast(NewVT, V2);
20308 return DAG.getBitcast(
20309 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
20313 SmallVector<SDValue> Ops = {V1, V2};
20314 SmallVector<int> Mask(OrigMask);
20316 // Canonicalize the shuffle with any horizontal ops inputs.
20317 // NOTE: This may update Ops and Mask.
20318 if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
20319 Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
20320 return DAG.getBitcast(VT, HOp);
20322 V1 = DAG.getBitcast(VT, Ops[0]);
20323 V2 = DAG.getBitcast(VT, Ops[1]);
20324 assert(NumElements == (int)Mask.size() &&
20325 "canonicalizeShuffleMaskWithHorizOp "
20326 "shouldn't alter the shuffle mask size");
20328 // Commute the shuffle if it will improve canonicalization.
20329 if (canonicalizeShuffleMaskWithCommute(Mask)) {
20330 ShuffleVectorSDNode::commuteMask(Mask);
20334 // For each vector width, delegate to a specialized lowering routine.
20335 if (VT.is128BitVector())
20336 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
20338 if (VT.is256BitVector())
20339 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
20341 if (VT.is512BitVector())
20342 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
20345 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
20347 llvm_unreachable("Unimplemented!");
20350 /// Try to lower a VSELECT instruction to a vector shuffle.
20351 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
20352 const X86Subtarget &Subtarget,
20353 SelectionDAG &DAG) {
20354 SDValue Cond = Op.getOperand(0);
20355 SDValue LHS = Op.getOperand(1);
20356 SDValue RHS = Op.getOperand(2);
20357 MVT VT = Op.getSimpleValueType();
20359 // Only non-legal VSELECTs reach this lowering, convert those into generic
20360 // shuffles and re-use the shuffle lowering path for blends.
20361 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
20362 SmallVector<int, 32> Mask;
20363 if (createShuffleMaskFromVSELECT(Mask, Cond))
20364 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
20370 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
20371 SDValue Cond = Op.getOperand(0);
20372 SDValue LHS = Op.getOperand(1);
20373 SDValue RHS = Op.getOperand(2);
20376 MVT VT = Op.getSimpleValueType();
20377 if (isSoftFP16(VT)) {
20378 MVT NVT = VT.changeVectorElementTypeToInteger();
20379 return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
20380 DAG.getBitcast(NVT, LHS),
20381 DAG.getBitcast(NVT, RHS)));
20384 // A vselect where all conditions and data are constants can be optimized into
20385 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
20386 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
20387 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
20388 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
20391 // Try to lower this to a blend-style vector shuffle. This can handle all
20392 // constant condition cases.
20393 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
20396 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
20397 // with patterns on the mask registers on AVX-512.
20398 MVT CondVT = Cond.getSimpleValueType();
20399 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
20400 if (CondEltSize == 1)
20403 // Variable blends are only legal from SSE4.1 onward.
20404 if (!Subtarget.hasSSE41())
20407 unsigned EltSize = VT.getScalarSizeInBits();
20408 unsigned NumElts = VT.getVectorNumElements();
20410 // Expand v32i16/v64i8 without BWI.
20411 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
20414 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
20415 // into an i1 condition so that we can use the mask-based 512-bit blend
20417 if (VT.getSizeInBits() == 512) {
20418 // Build a mask by testing the condition against zero.
20419 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
20420 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
20421 DAG.getConstant(0, dl, CondVT),
20423 // Now return a new VSELECT using the mask.
20424 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
20427 // SEXT/TRUNC cases where the mask doesn't match the destination size.
20428 if (CondEltSize != EltSize) {
20429 // If we don't have a sign splat, rely on the expansion.
20430 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
20433 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
20434 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
20435 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
20436 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
20439 // Only some types will be legal on some subtargets. If we can emit a legal
20440 // VSELECT-matching blend, return Op, and but if we need to expand, return
20442 switch (VT.SimpleTy) {
20444 // Most of the vector types have blends past SSE4.1.
20448 // The byte blends for AVX vectors were introduced only in AVX2.
20449 if (Subtarget.hasAVX2())
20455 case MVT::v16i16: {
20456 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
20457 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
20458 Cond = DAG.getBitcast(CastVT, Cond);
20459 LHS = DAG.getBitcast(CastVT, LHS);
20460 RHS = DAG.getBitcast(CastVT, RHS);
20461 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
20462 return DAG.getBitcast(VT, Select);
20467 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
20468 MVT VT = Op.getSimpleValueType();
20469 SDValue Vec = Op.getOperand(0);
20470 SDValue Idx = Op.getOperand(1);
20471 assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
20474 if (!Vec.getSimpleValueType().is128BitVector())
20477 if (VT.getSizeInBits() == 8) {
20478 // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
20479 // we're going to zero extend the register or fold the store.
20480 if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
20481 !X86::mayFoldIntoStore(Op))
20482 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
20483 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20484 DAG.getBitcast(MVT::v4i32, Vec), Idx));
20486 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
20487 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
20488 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20489 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
20492 if (VT == MVT::f32) {
20493 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
20494 // the result back to FR32 register. It's only worth matching if the
20495 // result has a single use which is a store or a bitcast to i32. And in
20496 // the case of a store, it's not worth it if the index is a constant 0,
20497 // because a MOVSSmr can be used instead, which is smaller and faster.
20498 if (!Op.hasOneUse())
20500 SDNode *User = *Op.getNode()->use_begin();
20501 if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
20502 (User->getOpcode() != ISD::BITCAST ||
20503 User->getValueType(0) != MVT::i32))
20505 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20506 DAG.getBitcast(MVT::v4i32, Vec), Idx);
20507 return DAG.getBitcast(MVT::f32, Extract);
20510 if (VT == MVT::i32 || VT == MVT::i64)
20516 /// Extract one bit from mask vector, like v16i1 or v8i1.
20517 /// AVX-512 feature.
20518 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
20519 const X86Subtarget &Subtarget) {
20520 SDValue Vec = Op.getOperand(0);
20522 MVT VecVT = Vec.getSimpleValueType();
20523 SDValue Idx = Op.getOperand(1);
20524 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
20525 MVT EltVT = Op.getSimpleValueType();
20527 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
20528 "Unexpected vector type in ExtractBitFromMaskVector");
20530 // variable index can't be handled in mask registers,
20531 // extend vector to VR512/128
20533 unsigned NumElts = VecVT.getVectorNumElements();
20534 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
20535 // than extending to 128/256bit.
20536 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
20537 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
20538 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
20539 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
20540 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
20543 unsigned IdxVal = IdxC->getZExtValue();
20544 if (IdxVal == 0) // the operation is legal
20547 // Extend to natively supported kshift.
20548 unsigned NumElems = VecVT.getVectorNumElements();
20549 MVT WideVecVT = VecVT;
20550 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
20551 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
20552 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
20553 DAG.getUNDEF(WideVecVT), Vec,
20554 DAG.getIntPtrConstant(0, dl));
20557 // Use kshiftr instruction to move to the lower element.
20558 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
20559 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20561 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
20562 DAG.getIntPtrConstant(0, dl));
20566 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
20567 SelectionDAG &DAG) const {
20569 SDValue Vec = Op.getOperand(0);
20570 MVT VecVT = Vec.getSimpleValueType();
20571 SDValue Idx = Op.getOperand(1);
20572 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
20574 if (VecVT.getVectorElementType() == MVT::i1)
20575 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
20578 // Its more profitable to go through memory (1 cycles throughput)
20579 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
20580 // IACA tool was used to get performance estimation
20581 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
20583 // example : extractelement <16 x i8> %a, i32 %i
20585 // Block Throughput: 3.00 Cycles
20586 // Throughput Bottleneck: Port5
20588 // | Num Of | Ports pressure in cycles | |
20589 // | Uops | 0 - DV | 5 | 6 | 7 | |
20590 // ---------------------------------------------
20591 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
20592 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
20593 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
20594 // Total Num Of Uops: 4
20597 // Block Throughput: 1.00 Cycles
20598 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
20600 // | | Ports pressure in cycles | |
20601 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
20602 // ---------------------------------------------------------
20603 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
20604 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
20605 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
20606 // Total Num Of Uops: 4
20611 unsigned IdxVal = IdxC->getZExtValue();
20613 // If this is a 256-bit vector result, first extract the 128-bit vector and
20614 // then extract the element from the 128-bit vector.
20615 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
20616 // Get the 128-bit vector.
20617 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
20618 MVT EltVT = VecVT.getVectorElementType();
20620 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
20621 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
20623 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
20624 // this can be done with a mask.
20625 IdxVal &= ElemsPerChunk - 1;
20626 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
20627 DAG.getIntPtrConstant(IdxVal, dl));
20630 assert(VecVT.is128BitVector() && "Unexpected vector length");
20632 MVT VT = Op.getSimpleValueType();
20634 if (VT == MVT::i16) {
20635 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
20636 // we're going to zero extend the register or fold the store (SSE41 only).
20637 if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
20638 !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
20639 if (Subtarget.hasFP16())
20642 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
20643 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20644 DAG.getBitcast(MVT::v4i32, Vec), Idx));
20647 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
20648 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20649 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
20652 if (Subtarget.hasSSE41())
20653 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
20656 // TODO: We only extract a single element from v16i8, we can probably afford
20657 // to be more aggressive here before using the default approach of spilling to
20659 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
20660 // Extract either the lowest i32 or any i16, and extract the sub-byte.
20661 int DWordIdx = IdxVal / 4;
20662 if (DWordIdx == 0) {
20663 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20664 DAG.getBitcast(MVT::v4i32, Vec),
20665 DAG.getIntPtrConstant(DWordIdx, dl));
20666 int ShiftVal = (IdxVal % 4) * 8;
20668 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
20669 DAG.getConstant(ShiftVal, dl, MVT::i8));
20670 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20673 int WordIdx = IdxVal / 2;
20674 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
20675 DAG.getBitcast(MVT::v8i16, Vec),
20676 DAG.getIntPtrConstant(WordIdx, dl));
20677 int ShiftVal = (IdxVal % 2) * 8;
20679 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
20680 DAG.getConstant(ShiftVal, dl, MVT::i8));
20681 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20684 if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
20688 // Shuffle the element to the lowest element, then movss or movsh.
20689 SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
20690 Mask[0] = static_cast<int>(IdxVal);
20691 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
20692 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
20693 DAG.getIntPtrConstant(0, dl));
20696 if (VT.getSizeInBits() == 64) {
20697 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
20698 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
20699 // to match extract_elt for f64.
20703 // UNPCKHPD the element to the lowest double word, then movsd.
20704 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
20705 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
20706 int Mask[2] = { 1, -1 };
20707 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
20708 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
20709 DAG.getIntPtrConstant(0, dl));
20715 /// Insert one bit to mask vector, like v16i1 or v8i1.
20716 /// AVX-512 feature.
20717 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
20718 const X86Subtarget &Subtarget) {
20720 SDValue Vec = Op.getOperand(0);
20721 SDValue Elt = Op.getOperand(1);
20722 SDValue Idx = Op.getOperand(2);
20723 MVT VecVT = Vec.getSimpleValueType();
20725 if (!isa<ConstantSDNode>(Idx)) {
20726 // Non constant index. Extend source and destination,
20727 // insert element and then truncate the result.
20728 unsigned NumElts = VecVT.getVectorNumElements();
20729 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
20730 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
20731 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
20732 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
20733 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
20734 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
20737 // Copy into a k-register, extract to v1i1 and insert_subvector.
20738 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
20739 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
20742 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
20743 SelectionDAG &DAG) const {
20744 MVT VT = Op.getSimpleValueType();
20745 MVT EltVT = VT.getVectorElementType();
20746 unsigned NumElts = VT.getVectorNumElements();
20747 unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
20749 if (EltVT == MVT::i1)
20750 return InsertBitToMaskVector(Op, DAG, Subtarget);
20753 SDValue N0 = Op.getOperand(0);
20754 SDValue N1 = Op.getOperand(1);
20755 SDValue N2 = Op.getOperand(2);
20756 auto *N2C = dyn_cast<ConstantSDNode>(N2);
20758 if (EltVT == MVT::bf16) {
20759 MVT IVT = VT.changeVectorElementTypeToInteger();
20760 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVT,
20761 DAG.getBitcast(IVT, N0),
20762 DAG.getBitcast(MVT::i16, N1), N2);
20763 return DAG.getBitcast(VT, Res);
20767 // Variable insertion indices, usually we're better off spilling to stack,
20768 // but AVX512 can use a variable compare+select by comparing against all
20769 // possible vector indices, and FP insertion has less gpr->simd traffic.
20770 if (!(Subtarget.hasBWI() ||
20771 (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
20772 (Subtarget.hasSSE41() && (EltVT == MVT::f32 || EltVT == MVT::f64))))
20775 MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
20776 MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
20777 if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
20780 SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
20781 SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
20782 SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
20784 SmallVector<SDValue, 16> RawIndices;
20785 for (unsigned I = 0; I != NumElts; ++I)
20786 RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
20787 SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
20789 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
20790 return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
20791 ISD::CondCode::SETEQ);
20794 if (N2C->getAPIntValue().uge(NumElts))
20796 uint64_t IdxVal = N2C->getZExtValue();
20798 bool IsZeroElt = X86::isZeroNode(N1);
20799 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
20801 if (IsZeroElt || IsAllOnesElt) {
20802 // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
20803 // We don't deal with i8 0 since it appears to be handled elsewhere.
20804 if (IsAllOnesElt &&
20805 ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
20806 ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
20807 SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
20808 SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
20809 SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
20810 CstVectorElts[IdxVal] = OnesCst;
20811 SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
20812 return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
20814 // See if we can do this more efficiently with a blend shuffle with a
20815 // rematerializable vector.
20816 if (Subtarget.hasSSE41() &&
20817 (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
20818 SmallVector<int, 8> BlendMask;
20819 for (unsigned i = 0; i != NumElts; ++i)
20820 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
20821 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
20822 : getOnesVector(VT, DAG, dl);
20823 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
20827 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
20828 // into that, and then insert the subvector back into the result.
20829 if (VT.is256BitVector() || VT.is512BitVector()) {
20830 // With a 256-bit vector, we can insert into the zero element efficiently
20831 // using a blend if we have AVX or AVX2 and the right data type.
20832 if (VT.is256BitVector() && IdxVal == 0) {
20833 // TODO: It is worthwhile to cast integer to floating point and back
20834 // and incur a domain crossing penalty if that's what we'll end up
20835 // doing anyway after extracting to a 128-bit vector.
20836 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
20837 (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
20838 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20839 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
20840 DAG.getTargetConstant(1, dl, MVT::i8));
20844 unsigned NumEltsIn128 = 128 / EltSizeInBits;
20845 assert(isPowerOf2_32(NumEltsIn128) &&
20846 "Vectors will always have power-of-two number of elements.");
20848 // If we are not inserting into the low 128-bit vector chunk,
20849 // then prefer the broadcast+blend sequence.
20850 // FIXME: relax the profitability check iff all N1 uses are insertions.
20851 if (IdxVal >= NumEltsIn128 &&
20852 ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
20853 (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
20854 X86::mayFoldLoad(N1, Subtarget)))) {
20855 SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
20856 SmallVector<int, 8> BlendMask;
20857 for (unsigned i = 0; i != NumElts; ++i)
20858 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
20859 return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
20862 // Get the desired 128-bit vector chunk.
20863 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
20865 // Insert the element into the desired chunk.
20866 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
20867 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
20869 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
20870 DAG.getIntPtrConstant(IdxIn128, dl));
20872 // Insert the changed part back into the bigger vector
20873 return insert128BitVector(N0, V, IdxVal, DAG, dl);
20875 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
20877 // This will be just movw/movd/movq/movsh/movss/movsd.
20878 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
20879 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
20880 EltVT == MVT::f16 || EltVT == MVT::i64) {
20881 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20882 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20885 // We can't directly insert an i8 or i16 into a vector, so zero extend
20886 // it to i32 first.
20887 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
20888 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
20889 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
20890 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
20891 N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20892 return DAG.getBitcast(VT, N1);
20896 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
20897 // argument. SSE41 required for pinsrb.
20898 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
20900 if (VT == MVT::v8i16) {
20901 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
20902 Opc = X86ISD::PINSRW;
20904 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
20905 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
20906 Opc = X86ISD::PINSRB;
20909 assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
20910 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
20911 N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
20912 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
20915 if (Subtarget.hasSSE41()) {
20916 if (EltVT == MVT::f32) {
20917 // Bits [7:6] of the constant are the source select. This will always be
20918 // zero here. The DAG Combiner may combine an extract_elt index into
20919 // these bits. For example (insert (extract, 3), 2) could be matched by
20920 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
20921 // Bits [5:4] of the constant are the destination select. This is the
20922 // value of the incoming immediate.
20923 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
20924 // combine either bitwise AND or insert of float 0.0 to set these bits.
20926 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
20927 if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
20928 // If this is an insertion of 32-bits into the low 32-bits of
20929 // a vector, we prefer to generate a blend with immediate rather
20930 // than an insertps. Blends are simpler operations in hardware and so
20931 // will always have equal or better performance than insertps.
20932 // But if optimizing for size and there's a load folding opportunity,
20933 // generate insertps because blendps does not have a 32-bit memory
20935 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20936 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
20937 DAG.getTargetConstant(1, dl, MVT::i8));
20939 // Create this as a scalar to vector..
20940 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20941 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
20942 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
20945 // PINSR* works with constant index.
20946 if (EltVT == MVT::i32 || EltVT == MVT::i64)
20953 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
20954 SelectionDAG &DAG) {
20956 MVT OpVT = Op.getSimpleValueType();
20958 // It's always cheaper to replace a xor+movd with xorps and simplifies further
20960 if (X86::isZeroNode(Op.getOperand(0)))
20961 return getZeroVector(OpVT, Subtarget, DAG, dl);
20963 // If this is a 256-bit vector result, first insert into a 128-bit
20964 // vector and then insert into the 256-bit vector.
20965 if (!OpVT.is128BitVector()) {
20966 // Insert into a 128-bit vector.
20967 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
20968 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
20969 OpVT.getVectorNumElements() / SizeFactor);
20971 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
20973 // Insert the 128-bit vector.
20974 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
20976 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
20977 "Expected an SSE type!");
20979 // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
20981 if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
20984 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
20985 return DAG.getBitcast(
20986 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
20989 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
20990 // simple superregister reference or explicit instructions to insert
20991 // the upper bits of a vector.
20992 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20993 SelectionDAG &DAG) {
20994 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
20996 return insert1BitVector(Op, DAG, Subtarget);
20999 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
21000 SelectionDAG &DAG) {
21001 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
21002 "Only vXi1 extract_subvectors need custom lowering");
21005 SDValue Vec = Op.getOperand(0);
21006 uint64_t IdxVal = Op.getConstantOperandVal(1);
21008 if (IdxVal == 0) // the operation is legal
21011 MVT VecVT = Vec.getSimpleValueType();
21012 unsigned NumElems = VecVT.getVectorNumElements();
21014 // Extend to natively supported kshift.
21015 MVT WideVecVT = VecVT;
21016 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
21017 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
21018 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
21019 DAG.getUNDEF(WideVecVT), Vec,
21020 DAG.getIntPtrConstant(0, dl));
21023 // Shift to the LSB.
21024 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
21025 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
21027 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
21028 DAG.getIntPtrConstant(0, dl));
21031 // Returns the appropriate wrapper opcode for a global reference.
21032 unsigned X86TargetLowering::getGlobalWrapperKind(
21033 const GlobalValue *GV, const unsigned char OpFlags) const {
21034 // References to absolute symbols are never PC-relative.
21035 if (GV && GV->isAbsoluteSymbolRef())
21036 return X86ISD::Wrapper;
21038 CodeModel::Model M = getTargetMachine().getCodeModel();
21039 if (Subtarget.isPICStyleRIPRel() &&
21040 (M == CodeModel::Small || M == CodeModel::Kernel))
21041 return X86ISD::WrapperRIP;
21043 // In the medium model, functions can always be referenced RIP-relatively,
21044 // since they must be within 2GiB. This is also possible in non-PIC mode, and
21045 // shorter than the 64-bit absolute immediate that would otherwise be emitted.
21046 if (M == CodeModel::Medium && isa_and_nonnull<Function>(GV))
21047 return X86ISD::WrapperRIP;
21049 // GOTPCREL references must always use RIP.
21050 if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
21051 return X86ISD::WrapperRIP;
21053 return X86ISD::Wrapper;
21056 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
21057 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
21058 // one of the above mentioned nodes. It has to be wrapped because otherwise
21059 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
21060 // be used to form addressing mode. These wrapped nodes will be selected
21063 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
21064 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
21066 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
21067 // global base reg.
21068 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
21070 auto PtrVT = getPointerTy(DAG.getDataLayout());
21071 SDValue Result = DAG.getTargetConstantPool(
21072 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
21074 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
21075 // With PIC, the address is actually $g + Offset.
21078 DAG.getNode(ISD::ADD, DL, PtrVT,
21079 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
21085 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
21086 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
21088 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
21089 // global base reg.
21090 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
21092 auto PtrVT = getPointerTy(DAG.getDataLayout());
21093 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
21095 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
21097 // With PIC, the address is actually $g + Offset.
21100 DAG.getNode(ISD::ADD, DL, PtrVT,
21101 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
21106 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
21107 SelectionDAG &DAG) const {
21108 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
21112 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
21113 // Create the TargetBlockAddressAddress node.
21114 unsigned char OpFlags =
21115 Subtarget.classifyBlockAddressReference();
21116 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
21117 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
21119 auto PtrVT = getPointerTy(DAG.getDataLayout());
21120 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
21121 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
21123 // With PIC, the address is actually $g + Offset.
21124 if (isGlobalRelativeToPICBase(OpFlags)) {
21125 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
21126 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
21132 /// Creates target global address or external symbol nodes for calls or
21134 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
21135 bool ForCall) const {
21136 // Unpack the global address or external symbol.
21137 const SDLoc &dl = SDLoc(Op);
21138 const GlobalValue *GV = nullptr;
21139 int64_t Offset = 0;
21140 const char *ExternalSym = nullptr;
21141 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
21142 GV = G->getGlobal();
21143 Offset = G->getOffset();
21145 const auto *ES = cast<ExternalSymbolSDNode>(Op);
21146 ExternalSym = ES->getSymbol();
21149 // Calculate some flags for address lowering.
21150 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
21151 unsigned char OpFlags;
21153 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
21155 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
21156 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
21157 bool NeedsLoad = isGlobalStubReference(OpFlags);
21159 CodeModel::Model M = DAG.getTarget().getCodeModel();
21160 auto PtrVT = getPointerTy(DAG.getDataLayout());
21164 // Create a target global address if this is a global. If possible, fold the
21165 // offset into the global address reference. Otherwise, ADD it on later.
21166 // Suppress the folding if Offset is negative: movl foo-1, %eax is not
21167 // allowed because if the address of foo is 0, the ELF R_X86_64_32
21168 // relocation will compute to a negative value, which is invalid.
21169 int64_t GlobalOffset = 0;
21170 if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
21171 X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
21172 std::swap(GlobalOffset, Offset);
21174 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
21176 // If this is not a global address, this must be an external symbol.
21177 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
21180 // If this is a direct call, avoid the wrapper if we don't need to do any
21181 // loads or adds. This allows SDAG ISel to match direct calls.
21182 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
21185 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
21187 // With PIC, the address is actually $g + Offset.
21189 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
21190 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
21193 // For globals that require a load from a stub to get the address, emit the
21196 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
21197 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
21199 // If there was a non-zero offset that we didn't fold, create an explicit
21200 // addition for it.
21202 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
21203 DAG.getConstant(Offset, dl, PtrVT));
21209 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
21210 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
21214 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
21215 SDValue *InGlue, const EVT PtrVT, unsigned ReturnReg,
21216 unsigned char OperandFlags, bool LocalDynamic = false) {
21217 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
21218 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
21220 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
21221 GA->getValueType(0),
21225 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
21229 SDValue Ops[] = { Chain, TGA, *InGlue };
21230 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
21232 SDValue Ops[] = { Chain, TGA };
21233 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
21236 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
21237 MFI.setAdjustsStack(true);
21238 MFI.setHasCalls(true);
21240 SDValue Glue = Chain.getValue(1);
21241 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
21244 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
21246 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
21249 SDLoc dl(GA); // ? function entry point might be better
21250 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
21251 DAG.getNode(X86ISD::GlobalBaseReg,
21252 SDLoc(), PtrVT), InGlue);
21253 InGlue = Chain.getValue(1);
21255 return GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX, X86II::MO_TLSGD);
21258 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
21260 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
21262 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
21263 X86::RAX, X86II::MO_TLSGD);
21266 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
21268 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
21270 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
21271 X86::EAX, X86II::MO_TLSGD);
21274 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
21275 SelectionDAG &DAG, const EVT PtrVT,
21276 bool Is64Bit, bool Is64BitLP64) {
21279 // Get the start address of the TLS block for this module.
21280 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
21281 .getInfo<X86MachineFunctionInfo>();
21282 MFI->incNumLocalDynamicTLSAccesses();
21286 unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
21287 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
21288 X86II::MO_TLSLD, /*LocalDynamic=*/true);
21291 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
21292 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InGlue);
21293 InGlue = Chain.getValue(1);
21294 Base = GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX,
21295 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
21298 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
21302 unsigned char OperandFlags = X86II::MO_DTPOFF;
21303 unsigned WrapperKind = X86ISD::Wrapper;
21304 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
21305 GA->getValueType(0),
21306 GA->getOffset(), OperandFlags);
21307 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
21309 // Add x@dtpoff with the base.
21310 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
21313 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
21314 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
21315 const EVT PtrVT, TLSModel::Model model,
21316 bool is64Bit, bool isPIC) {
21319 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
21320 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
21321 is64Bit ? 257 : 256));
21323 SDValue ThreadPointer =
21324 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
21325 MachinePointerInfo(Ptr));
21327 unsigned char OperandFlags = 0;
21328 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
21330 unsigned WrapperKind = X86ISD::Wrapper;
21331 if (model == TLSModel::LocalExec) {
21332 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
21333 } else if (model == TLSModel::InitialExec) {
21335 OperandFlags = X86II::MO_GOTTPOFF;
21336 WrapperKind = X86ISD::WrapperRIP;
21338 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
21341 llvm_unreachable("Unexpected model");
21344 // emit "addl x@ntpoff,%eax" (local exec)
21345 // or "addl x@indntpoff,%eax" (initial exec)
21346 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
21348 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
21349 GA->getOffset(), OperandFlags);
21350 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
21352 if (model == TLSModel::InitialExec) {
21353 if (isPIC && !is64Bit) {
21354 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
21355 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
21359 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
21360 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
21363 // The address of the thread local variable is the add of the thread
21364 // pointer with the offset of the variable.
21365 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
21369 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
21371 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
21373 if (DAG.getTarget().useEmulatedTLS())
21374 return LowerToTLSEmulatedModel(GA, DAG);
21376 const GlobalValue *GV = GA->getGlobal();
21377 auto PtrVT = getPointerTy(DAG.getDataLayout());
21378 bool PositionIndependent = isPositionIndependent();
21380 if (Subtarget.isTargetELF()) {
21381 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
21383 case TLSModel::GeneralDynamic:
21384 if (Subtarget.is64Bit()) {
21385 if (Subtarget.isTarget64BitLP64())
21386 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
21387 return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
21389 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
21390 case TLSModel::LocalDynamic:
21391 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
21392 Subtarget.isTarget64BitLP64());
21393 case TLSModel::InitialExec:
21394 case TLSModel::LocalExec:
21395 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
21396 PositionIndependent);
21398 llvm_unreachable("Unknown TLS model.");
21401 if (Subtarget.isTargetDarwin()) {
21402 // Darwin only has one model of TLS. Lower to that.
21403 unsigned char OpFlag = 0;
21404 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
21405 X86ISD::WrapperRIP : X86ISD::Wrapper;
21407 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
21408 // global base reg.
21409 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
21411 OpFlag = X86II::MO_TLVP_PIC_BASE;
21413 OpFlag = X86II::MO_TLVP;
21415 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
21416 GA->getValueType(0),
21417 GA->getOffset(), OpFlag);
21418 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
21420 // With PIC32, the address is actually $g + Offset.
21422 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
21423 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
21426 // Lowering the machine isd will make sure everything is in the right
21428 SDValue Chain = DAG.getEntryNode();
21429 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
21430 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
21431 SDValue Args[] = { Chain, Offset };
21432 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
21433 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, Chain.getValue(1), DL);
21435 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
21436 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
21437 MFI.setAdjustsStack(true);
21439 // And our return value (tls address) is in the standard call return value
21441 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
21442 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
21445 if (Subtarget.isOSWindows()) {
21446 // Just use the implicit TLS architecture
21447 // Need to generate something similar to:
21448 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
21450 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
21451 // mov rcx, qword [rdx+rcx*8]
21452 // mov eax, .tls$:tlsvar
21453 // [rax+rcx] contains the address
21454 // Windows 64bit: gs:0x58
21455 // Windows 32bit: fs:__tls_array
21458 SDValue Chain = DAG.getEntryNode();
21460 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
21461 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
21462 // use its literal value of 0x2C.
21463 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
21464 ? Type::getInt8PtrTy(*DAG.getContext(),
21466 : Type::getInt32PtrTy(*DAG.getContext(),
21469 SDValue TlsArray = Subtarget.is64Bit()
21470 ? DAG.getIntPtrConstant(0x58, dl)
21471 : (Subtarget.isTargetWindowsGNU()
21472 ? DAG.getIntPtrConstant(0x2C, dl)
21473 : DAG.getExternalSymbol("_tls_array", PtrVT));
21475 SDValue ThreadPointer =
21476 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
21479 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
21480 res = ThreadPointer;
21482 // Load the _tls_index variable
21483 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
21484 if (Subtarget.is64Bit())
21485 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
21486 MachinePointerInfo(), MVT::i32);
21488 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
21490 const DataLayout &DL = DAG.getDataLayout();
21492 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
21493 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
21495 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
21498 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
21500 // Get the offset of start of .tls section
21501 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
21502 GA->getValueType(0),
21503 GA->getOffset(), X86II::MO_SECREL);
21504 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
21506 // The address of the thread local variable is the add of the thread
21507 // pointer with the offset of the variable.
21508 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
21511 llvm_unreachable("TLS not implemented for this target.");
21514 /// Lower SRA_PARTS and friends, which return two i32 values
21515 /// and take a 2 x i32 value to shift plus a shift amount.
21516 /// TODO: Can this be moved to general expansion code?
21517 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
21519 DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
21520 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
21523 // Try to use a packed vector operation to handle i64 on 32-bit targets when
21524 // AVX512DQ is enabled.
21525 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
21526 const X86Subtarget &Subtarget) {
21527 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
21528 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
21529 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
21530 Op.getOpcode() == ISD::UINT_TO_FP) &&
21531 "Unexpected opcode!");
21532 bool IsStrict = Op->isStrictFPOpcode();
21533 unsigned OpNo = IsStrict ? 1 : 0;
21534 SDValue Src = Op.getOperand(OpNo);
21535 MVT SrcVT = Src.getSimpleValueType();
21536 MVT VT = Op.getSimpleValueType();
21538 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
21539 (VT != MVT::f32 && VT != MVT::f64))
21542 // Pack the i64 into a vector, do the operation and extract.
21544 // Using 256-bit to ensure result is 128-bits for f32 case.
21545 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
21546 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
21547 MVT VecVT = MVT::getVectorVT(VT, NumElts);
21550 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
21552 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
21553 {Op.getOperand(0), InVec});
21554 SDValue Chain = CvtVec.getValue(1);
21555 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21556 DAG.getIntPtrConstant(0, dl));
21557 return DAG.getMergeValues({Value, Chain}, dl);
21560 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
21562 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21563 DAG.getIntPtrConstant(0, dl));
21566 // Try to use a packed vector operation to handle i64 on 32-bit targets.
21567 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
21568 const X86Subtarget &Subtarget) {
21569 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
21570 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
21571 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
21572 Op.getOpcode() == ISD::UINT_TO_FP) &&
21573 "Unexpected opcode!");
21574 bool IsStrict = Op->isStrictFPOpcode();
21575 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21576 MVT SrcVT = Src.getSimpleValueType();
21577 MVT VT = Op.getSimpleValueType();
21579 if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
21582 // Pack the i64 into a vector, do the operation and extract.
21584 assert(Subtarget.hasFP16() && "Expected FP16");
21587 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
21589 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
21590 {Op.getOperand(0), InVec});
21591 SDValue Chain = CvtVec.getValue(1);
21592 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21593 DAG.getIntPtrConstant(0, dl));
21594 return DAG.getMergeValues({Value, Chain}, dl);
21597 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
21599 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21600 DAG.getIntPtrConstant(0, dl));
21603 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
21604 const X86Subtarget &Subtarget) {
21606 case ISD::SINT_TO_FP:
21607 // TODO: Handle wider types with AVX/AVX512.
21608 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
21610 // CVTDQ2PS or (V)CVTDQ2PD
21611 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
21613 case ISD::UINT_TO_FP:
21614 // TODO: Handle wider types and i64 elements.
21615 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
21617 // VCVTUDQ2PS or VCVTUDQ2PD
21618 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
21625 /// Given a scalar cast operation that is extracted from a vector, try to
21626 /// vectorize the cast op followed by extraction. This will avoid an expensive
21627 /// round-trip between XMM and GPR.
21628 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
21629 const X86Subtarget &Subtarget) {
21630 // TODO: This could be enhanced to handle smaller integer types by peeking
21631 // through an extend.
21632 SDValue Extract = Cast.getOperand(0);
21633 MVT DestVT = Cast.getSimpleValueType();
21634 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21635 !isa<ConstantSDNode>(Extract.getOperand(1)))
21638 // See if we have a 128-bit vector cast op for this type of cast.
21639 SDValue VecOp = Extract.getOperand(0);
21640 MVT FromVT = VecOp.getSimpleValueType();
21641 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
21642 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
21643 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
21644 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
21647 // If we are extracting from a non-zero element, first shuffle the source
21648 // vector to allow extracting from element zero.
21650 if (!isNullConstant(Extract.getOperand(1))) {
21651 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
21652 Mask[0] = Extract.getConstantOperandVal(1);
21653 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
21655 // If the source vector is wider than 128-bits, extract the low part. Do not
21656 // create an unnecessarily wide vector cast op.
21657 if (FromVT != Vec128VT)
21658 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
21660 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
21661 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
21662 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
21663 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
21664 DAG.getIntPtrConstant(0, DL));
21667 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
21668 /// try to vectorize the cast ops. This will avoid an expensive round-trip
21669 /// between XMM and GPR.
21670 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
21671 const X86Subtarget &Subtarget) {
21672 // TODO: Allow FP_TO_UINT.
21673 SDValue CastToInt = CastToFP.getOperand(0);
21674 MVT VT = CastToFP.getSimpleValueType();
21675 if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
21678 MVT IntVT = CastToInt.getSimpleValueType();
21679 SDValue X = CastToInt.getOperand(0);
21680 MVT SrcVT = X.getSimpleValueType();
21681 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
21684 // See if we have 128-bit vector cast instructions for this type of cast.
21685 // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
21686 if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
21690 unsigned SrcSize = SrcVT.getSizeInBits();
21691 unsigned IntSize = IntVT.getSizeInBits();
21692 unsigned VTSize = VT.getSizeInBits();
21693 MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
21694 MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
21695 MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
21697 // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
21698 unsigned ToIntOpcode =
21699 SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
21700 unsigned ToFPOpcode =
21701 IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
21703 // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
21705 // We are not defining the high elements (for example, zero them) because
21706 // that could nullify any performance advantage that we hoped to gain from
21707 // this vector op hack. We do not expect any adverse effects (like denorm
21708 // penalties) with cast ops.
21709 SDLoc DL(CastToFP);
21710 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
21711 SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
21712 SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
21713 SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
21714 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
21717 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
21718 const X86Subtarget &Subtarget) {
21720 bool IsStrict = Op->isStrictFPOpcode();
21721 MVT VT = Op->getSimpleValueType(0);
21722 SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
21724 if (Subtarget.hasDQI()) {
21725 assert(!Subtarget.hasVLX() && "Unexpected features");
21727 assert((Src.getSimpleValueType() == MVT::v2i64 ||
21728 Src.getSimpleValueType() == MVT::v4i64) &&
21729 "Unsupported custom type");
21731 // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
21732 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
21734 MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
21736 // Need to concat with zero vector for strict fp to avoid spurious
21738 SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
21739 : DAG.getUNDEF(MVT::v8i64);
21740 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
21741 DAG.getIntPtrConstant(0, DL));
21742 SDValue Res, Chain;
21744 Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
21745 {Op->getOperand(0), Src});
21746 Chain = Res.getValue(1);
21748 Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
21751 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
21752 DAG.getIntPtrConstant(0, DL));
21755 return DAG.getMergeValues({Res, Chain}, DL);
21759 bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
21760 Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
21761 if (VT != MVT::v4f32 || IsSigned)
21764 SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
21765 SDValue One = DAG.getConstant(1, DL, MVT::v4i64);
21766 SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
21767 DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
21768 DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
21769 SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
21770 SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
21771 SmallVector<SDValue, 4> SignCvts(4);
21772 SmallVector<SDValue, 4> Chains(4);
21773 for (int i = 0; i != 4; ++i) {
21774 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
21775 DAG.getIntPtrConstant(i, DL));
21778 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
21779 {Op.getOperand(0), Elt});
21780 Chains[i] = SignCvts[i].getValue(1);
21782 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
21785 SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
21787 SDValue Slow, Chain;
21789 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
21790 Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
21791 {Chain, SignCvt, SignCvt});
21792 Chain = Slow.getValue(1);
21794 Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
21797 IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
21798 SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
21801 return DAG.getMergeValues({Cvt, Chain}, DL);
21806 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
21807 bool IsStrict = Op->isStrictFPOpcode();
21808 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21809 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
21810 MVT VT = Op.getSimpleValueType();
21811 MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
21814 SDValue Rnd = DAG.getIntPtrConstant(0, dl);
21816 return DAG.getNode(
21817 ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
21819 DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
21821 return DAG.getNode(ISD::FP_ROUND, dl, VT,
21822 DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
21825 static bool isLegalConversion(MVT VT, bool IsSigned,
21826 const X86Subtarget &Subtarget) {
21827 if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
21829 if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
21831 if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
21833 if (Subtarget.useAVX512Regs()) {
21834 if (VT == MVT::v16i32)
21836 if (VT == MVT::v8i64 && Subtarget.hasDQI())
21839 if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
21840 (VT == MVT::v2i64 || VT == MVT::v4i64))
21845 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
21846 SelectionDAG &DAG) const {
21847 bool IsStrict = Op->isStrictFPOpcode();
21848 unsigned OpNo = IsStrict ? 1 : 0;
21849 SDValue Src = Op.getOperand(OpNo);
21850 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
21851 MVT SrcVT = Src.getSimpleValueType();
21852 MVT VT = Op.getSimpleValueType();
21855 if (isSoftFP16(VT))
21856 return promoteXINT_TO_FP(Op, DAG);
21857 else if (isLegalConversion(SrcVT, true, Subtarget))
21860 if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
21861 return LowerWin64_INT128_TO_FP(Op, DAG);
21863 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
21866 if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
21869 if (SrcVT.isVector()) {
21870 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
21871 // Note: Since v2f64 is a legal type. We don't need to zero extend the
21872 // source for strict FP.
21874 return DAG.getNode(
21875 X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
21876 {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21877 DAG.getUNDEF(SrcVT))});
21878 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
21879 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21880 DAG.getUNDEF(SrcVT)));
21882 if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
21883 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21888 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
21889 "Unknown SINT_TO_FP to lower!");
21891 bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
21893 // These are really Legal; return the operand so the caller accepts it as
21895 if (SrcVT == MVT::i32 && UseSSEReg)
21897 if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
21900 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21902 if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21905 // SSE doesn't have an i16 conversion so we need to promote.
21906 if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
21907 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
21909 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
21912 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
21915 if (VT == MVT::f128 || !Subtarget.hasX87())
21918 SDValue ValueToStore = Src;
21919 if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
21920 // Bitcasting to f64 here allows us to do a single 64-bit store from
21921 // an SSE register, avoiding the store forwarding penalty that would come
21922 // with two 32-bit stores.
21923 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21925 unsigned Size = SrcVT.getStoreSize();
21926 Align Alignment(Size);
21927 MachineFunction &MF = DAG.getMachineFunction();
21928 auto PtrVT = getPointerTy(MF.getDataLayout());
21929 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
21930 MachinePointerInfo MPI =
21931 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21932 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21933 Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
21934 std::pair<SDValue, SDValue> Tmp =
21935 BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
21938 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21943 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
21944 EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
21945 MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
21948 bool useSSE = isScalarFPTypeInSSEReg(DstVT);
21950 Tys = DAG.getVTList(MVT::f80, MVT::Other);
21952 Tys = DAG.getVTList(DstVT, MVT::Other);
21954 SDValue FILDOps[] = {Chain, Pointer};
21956 DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
21957 Alignment, MachineMemOperand::MOLoad);
21958 Chain = Result.getValue(1);
21961 MachineFunction &MF = DAG.getMachineFunction();
21962 unsigned SSFISize = DstVT.getStoreSize();
21964 MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
21965 auto PtrVT = getPointerTy(MF.getDataLayout());
21966 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21967 Tys = DAG.getVTList(MVT::Other);
21968 SDValue FSTOps[] = {Chain, Result, StackSlot};
21969 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
21970 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
21971 MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
21974 DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
21975 Result = DAG.getLoad(
21976 DstVT, DL, Chain, StackSlot,
21977 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
21978 Chain = Result.getValue(1);
21981 return { Result, Chain };
21984 /// Horizontal vector math instructions may be slower than normal math with
21985 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
21986 /// implementation, and likely shuffle complexity of the alternate sequence.
21987 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
21988 const X86Subtarget &Subtarget) {
21989 bool IsOptimizingSize = DAG.shouldOptForSize();
21990 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
21991 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
21994 /// 64-bit unsigned integer to double expansion.
21995 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
21996 const X86Subtarget &Subtarget) {
21997 // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
21998 // when converting 0 when rounding toward negative infinity. Caller will
21999 // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
22000 assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
22001 // This algorithm is not obvious. Here it is what we're trying to output:
22004 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
22005 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
22007 haddpd %xmm0, %xmm0
22009 pshufd $0x4e, %xmm0, %xmm1
22015 LLVMContext *Context = DAG.getContext();
22017 // Build some magic constants.
22018 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
22019 Constant *C0 = ConstantDataVector::get(*Context, CV0);
22020 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
22021 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
22023 SmallVector<Constant*,2> CV1;
22025 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
22026 APInt(64, 0x4330000000000000ULL))));
22028 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
22029 APInt(64, 0x4530000000000000ULL))));
22030 Constant *C1 = ConstantVector::get(CV1);
22031 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
22033 // Load the 64-bit value into an XMM register.
22035 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
22036 SDValue CLod0 = DAG.getLoad(
22037 MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
22038 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
22040 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
22042 SDValue CLod1 = DAG.getLoad(
22043 MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
22044 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
22045 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
22046 // TODO: Are there any fast-math-flags to propagate here?
22047 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
22050 if (Subtarget.hasSSE3() &&
22051 shouldUseHorizontalOp(true, DAG, Subtarget)) {
22052 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
22054 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
22055 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
22057 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
22058 DAG.getIntPtrConstant(0, dl));
22062 /// 32-bit unsigned integer to float expansion.
22063 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
22064 const X86Subtarget &Subtarget) {
22065 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
22067 // FP constant to bias correct the final result.
22068 SDValue Bias = DAG.getConstantFP(
22069 llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::f64);
22071 // Load the 32-bit value into an XMM register.
22073 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
22075 // Zero out the upper parts of the register.
22076 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
22078 // Or the load with the bias.
22079 SDValue Or = DAG.getNode(
22080 ISD::OR, dl, MVT::v2i64,
22081 DAG.getBitcast(MVT::v2i64, Load),
22082 DAG.getBitcast(MVT::v2i64,
22083 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
22085 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
22086 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
22088 if (Op.getNode()->isStrictFPOpcode()) {
22089 // Subtract the bias.
22090 // TODO: Are there any fast-math-flags to propagate here?
22091 SDValue Chain = Op.getOperand(0);
22092 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
22093 {Chain, Or, Bias});
22095 if (Op.getValueType() == Sub.getValueType())
22098 // Handle final rounding.
22099 std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
22100 Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
22102 return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
22105 // Subtract the bias.
22106 // TODO: Are there any fast-math-flags to propagate here?
22107 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
22109 // Handle final rounding.
22110 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
22113 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
22114 const X86Subtarget &Subtarget,
22116 if (Op.getSimpleValueType() != MVT::v2f64)
22119 bool IsStrict = Op->isStrictFPOpcode();
22121 SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
22122 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
22124 if (Subtarget.hasAVX512()) {
22125 if (!Subtarget.hasVLX()) {
22126 // Let generic type legalization widen this.
22129 // Otherwise pad the integer input with 0s and widen the operation.
22130 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
22131 DAG.getConstant(0, DL, MVT::v2i32));
22132 SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
22133 {Op.getOperand(0), N0});
22134 SDValue Chain = Res.getValue(1);
22135 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
22136 DAG.getIntPtrConstant(0, DL));
22137 return DAG.getMergeValues({Res, Chain}, DL);
22140 // Legalize to v4i32 type.
22141 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
22142 DAG.getUNDEF(MVT::v2i32));
22144 return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
22145 {Op.getOperand(0), N0});
22146 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
22149 // Zero extend to 2i64, OR with the floating point representation of 2^52.
22150 // This gives us the floating point equivalent of 2^52 + the i32 integer
22151 // since double has 52-bits of mantissa. Then subtract 2^52 in floating
22152 // point leaving just our i32 integers in double format.
22153 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
22154 SDValue VBias = DAG.getConstantFP(
22155 llvm::bit_cast<double>(0x4330000000000000ULL), DL, MVT::v2f64);
22156 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
22157 DAG.getBitcast(MVT::v2i64, VBias));
22158 Or = DAG.getBitcast(MVT::v2f64, Or);
22161 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
22162 {Op.getOperand(0), Or, VBias});
22163 return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
22166 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
22167 const X86Subtarget &Subtarget) {
22169 bool IsStrict = Op->isStrictFPOpcode();
22170 SDValue V = Op->getOperand(IsStrict ? 1 : 0);
22171 MVT VecIntVT = V.getSimpleValueType();
22172 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
22173 "Unsupported custom type");
22175 if (Subtarget.hasAVX512()) {
22176 // With AVX512, but not VLX we need to widen to get a 512-bit result type.
22177 assert(!Subtarget.hasVLX() && "Unexpected features");
22178 MVT VT = Op->getSimpleValueType(0);
22180 // v8i32->v8f64 is legal with AVX512 so just return it.
22181 if (VT == MVT::v8f64)
22184 assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
22186 MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
22187 MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
22188 // Need to concat with zero vector for strict fp to avoid spurious
22191 IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
22192 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
22193 DAG.getIntPtrConstant(0, DL));
22194 SDValue Res, Chain;
22196 Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
22197 {Op->getOperand(0), V});
22198 Chain = Res.getValue(1);
22200 Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
22203 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
22204 DAG.getIntPtrConstant(0, DL));
22207 return DAG.getMergeValues({Res, Chain}, DL);
22211 if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
22212 Op->getSimpleValueType(0) == MVT::v4f64) {
22213 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
22214 Constant *Bias = ConstantFP::get(
22216 APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
22217 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
22218 SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
22219 SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
22220 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
22221 SDValue VBias = DAG.getMemIntrinsicNode(
22222 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
22223 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
22224 MachineMemOperand::MOLoad);
22226 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
22227 DAG.getBitcast(MVT::v4i64, VBias));
22228 Or = DAG.getBitcast(MVT::v4f64, Or);
22231 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
22232 {Op.getOperand(0), Or, VBias});
22233 return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
22236 // The algorithm is the following:
22237 // #ifdef __SSE4_1__
22238 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
22239 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
22240 // (uint4) 0x53000000, 0xaa);
22242 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
22243 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
22245 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
22246 // return (float4) lo + fhi;
22248 bool Is128 = VecIntVT == MVT::v4i32;
22249 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
22250 // If we convert to something else than the supported type, e.g., to v4f64,
22252 if (VecFloatVT != Op->getSimpleValueType(0))
22255 // In the #idef/#else code, we have in common:
22256 // - The vector of constants:
22262 // Create the splat vector for 0x4b000000.
22263 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
22264 // Create the splat vector for 0x53000000.
22265 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
22267 // Create the right shift.
22268 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
22269 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
22272 if (Subtarget.hasSSE41()) {
22273 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
22274 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
22275 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
22276 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
22277 // Low will be bitcasted right away, so do not bother bitcasting back to its
22279 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
22280 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
22281 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
22282 // (uint4) 0x53000000, 0xaa);
22283 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
22284 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
22285 // High will be bitcasted right away, so do not bother bitcasting back to
22286 // its original type.
22287 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
22288 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
22290 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
22291 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
22292 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
22293 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
22295 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
22296 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
22299 // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
22300 SDValue VecCstFSub = DAG.getConstantFP(
22301 APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
22303 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
22304 // NOTE: By using fsub of a positive constant instead of fadd of a negative
22305 // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
22306 // enabled. See PR24512.
22307 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
22308 // TODO: Are there any fast-math-flags to propagate here?
22310 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
22311 // return (float4) lo + fhi;
22313 SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
22314 {Op.getOperand(0), HighBitcast, VecCstFSub});
22315 return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
22316 {FHigh.getValue(1), LowBitcast, FHigh});
22320 DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
22321 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
22324 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
22325 const X86Subtarget &Subtarget) {
22326 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
22327 SDValue N0 = Op.getOperand(OpNo);
22328 MVT SrcVT = N0.getSimpleValueType();
22331 switch (SrcVT.SimpleTy) {
22333 llvm_unreachable("Custom UINT_TO_FP is not supported!");
22335 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
22338 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
22341 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
22345 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
22346 SelectionDAG &DAG) const {
22347 bool IsStrict = Op->isStrictFPOpcode();
22348 unsigned OpNo = IsStrict ? 1 : 0;
22349 SDValue Src = Op.getOperand(OpNo);
22351 auto PtrVT = getPointerTy(DAG.getDataLayout());
22352 MVT SrcVT = Src.getSimpleValueType();
22353 MVT DstVT = Op->getSimpleValueType(0);
22354 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
22356 // Bail out when we don't have native conversion instructions.
22357 if (DstVT == MVT::f128)
22360 if (isSoftFP16(DstVT))
22361 return promoteXINT_TO_FP(Op, DAG);
22362 else if (isLegalConversion(SrcVT, false, Subtarget))
22365 if (DstVT.isVector())
22366 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
22368 if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
22369 return LowerWin64_INT128_TO_FP(Op, DAG);
22371 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
22374 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
22375 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
22376 // Conversions from unsigned i32 to f32/f64 are legal,
22377 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
22381 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
22382 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
22383 Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
22385 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
22387 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
22390 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
22392 if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
22395 // The transform for i64->f64 isn't correct for 0 when rounding to negative
22396 // infinity. It produces -0.0, so disable under strictfp.
22397 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
22399 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
22400 // The transform for i32->f64/f32 isn't correct for 0 when rounding to
22401 // negative infinity. So disable under strictfp. Using FILD instead.
22402 if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
22404 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
22405 if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
22406 (DstVT == MVT::f32 || DstVT == MVT::f64))
22409 // Make a 64-bit buffer, and use it to build an FILD.
22410 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
22411 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
22412 Align SlotAlign(8);
22413 MachinePointerInfo MPI =
22414 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
22415 if (SrcVT == MVT::i32) {
22416 SDValue OffsetSlot =
22417 DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
22418 SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
22419 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
22420 OffsetSlot, MPI.getWithOffset(4), SlotAlign);
22421 std::pair<SDValue, SDValue> Tmp =
22422 BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
22424 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
22429 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
22430 SDValue ValueToStore = Src;
22431 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
22432 // Bitcasting to f64 here allows us to do a single 64-bit store from
22433 // an SSE register, avoiding the store forwarding penalty that would come
22434 // with two 32-bit stores.
22435 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
22438 DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
22439 // For i64 source, we need to add the appropriate power of 2 if the input
22440 // was negative. We must be careful to do the computation in x87 extended
22441 // precision, not in SSE.
22442 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
22443 SDValue Ops[] = { Store, StackSlot };
22445 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
22446 SlotAlign, MachineMemOperand::MOLoad);
22447 Chain = Fild.getValue(1);
22450 // Check whether the sign bit is set.
22451 SDValue SignSet = DAG.getSetCC(
22452 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
22453 Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
22455 // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
22456 APInt FF(64, 0x5F80000000000000ULL);
22457 SDValue FudgePtr = DAG.getConstantPool(
22458 ConstantInt::get(*DAG.getContext(), FF), PtrVT);
22459 Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
22461 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
22462 SDValue Zero = DAG.getIntPtrConstant(0, dl);
22463 SDValue Four = DAG.getIntPtrConstant(4, dl);
22464 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
22465 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
22467 // Load the value out, extending it from f32 to f80.
22468 SDValue Fudge = DAG.getExtLoad(
22469 ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
22470 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
22472 Chain = Fudge.getValue(1);
22473 // Extend everything to 80 bits to force it to be done on x87.
22474 // TODO: Are there any fast-math-flags to propagate here?
22476 unsigned Opc = ISD::STRICT_FADD;
22477 // Windows needs the precision control changed to 80bits around this add.
22478 if (Subtarget.isOSWindows() && DstVT == MVT::f32)
22479 Opc = X86ISD::STRICT_FP80_ADD;
22482 DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
22483 // STRICT_FP_ROUND can't handle equal types.
22484 if (DstVT == MVT::f80)
22486 return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
22487 {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
22489 unsigned Opc = ISD::FADD;
22490 // Windows needs the precision control changed to 80bits around this add.
22491 if (Subtarget.isOSWindows() && DstVT == MVT::f32)
22492 Opc = X86ISD::FP80_ADD;
22494 SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
22495 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
22496 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
22499 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
22500 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
22501 // just return an SDValue().
22502 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
22503 // to i16, i32 or i64, and we lower it to a legal sequence and return the
22506 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
22507 bool IsSigned, SDValue &Chain) const {
22508 bool IsStrict = Op->isStrictFPOpcode();
22511 EVT DstTy = Op.getValueType();
22512 SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
22513 EVT TheVT = Value.getValueType();
22514 auto PtrVT = getPointerTy(DAG.getDataLayout());
22516 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
22517 // f16 must be promoted before using the lowering in this routine.
22518 // fp128 does not use this lowering.
22522 // If using FIST to compute an unsigned i64, we'll need some fixup
22523 // to handle values above the maximum signed i64. A FIST is always
22524 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
22525 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
22527 // FIXME: This does not generate an invalid exception if the input does not
22528 // fit in i32. PR44019
22529 if (!IsSigned && DstTy != MVT::i64) {
22530 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
22531 // The low 32 bits of the fist result will have the correct uint32 result.
22532 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
22536 assert(DstTy.getSimpleVT() <= MVT::i64 &&
22537 DstTy.getSimpleVT() >= MVT::i16 &&
22538 "Unknown FP_TO_INT to lower!");
22540 // We lower FP->int64 into FISTP64 followed by a load from a temporary
22542 MachineFunction &MF = DAG.getMachineFunction();
22543 unsigned MemSize = DstTy.getStoreSize();
22545 MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
22546 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
22548 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
22550 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
22552 if (UnsignedFixup) {
22554 // Conversion to unsigned i64 is implemented with a select,
22555 // depending on whether the source value fits in the range
22556 // of a signed i64. Let Thresh be the FP equivalent of
22557 // 0x8000000000000000ULL.
22559 // Adjust = (Value >= Thresh) ? 0x80000000 : 0;
22560 // FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
22561 // FistSrc = (Value - FltOfs);
22562 // Fist-to-mem64 FistSrc
22563 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
22564 // to XOR'ing the high 32 bits with Adjust.
22566 // Being a power of 2, Thresh is exactly representable in all FP formats.
22567 // For X87 we'd like to use the smallest FP type for this constant, but
22568 // for DAG type consistency we have to match the FP operand type.
22570 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
22571 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
22572 bool LosesInfo = false;
22573 if (TheVT == MVT::f64)
22574 // The rounding mode is irrelevant as the conversion should be exact.
22575 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
22577 else if (TheVT == MVT::f80)
22578 Status = Thresh.convert(APFloat::x87DoubleExtended(),
22579 APFloat::rmNearestTiesToEven, &LosesInfo);
22581 assert(Status == APFloat::opOK && !LosesInfo &&
22582 "FP conversion should have been exact");
22584 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
22586 EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
22587 *DAG.getContext(), TheVT);
22590 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
22591 /*IsSignaling*/ true);
22592 Chain = Cmp.getValue(1);
22594 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
22597 // Our preferred lowering of
22599 // (Value >= Thresh) ? 0x8000000000000000ULL : 0
22603 // (Value >= Thresh) << 63
22605 // but since we can get here after LegalOperations, DAGCombine might do the
22606 // wrong thing if we create a select. So, directly create the preferred
22608 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
22609 SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
22610 Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
22612 SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
22613 DAG.getConstantFP(0.0, DL, TheVT));
22616 Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
22617 { Chain, Value, FltOfs });
22618 Chain = Value.getValue(1);
22620 Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
22623 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
22625 // FIXME This causes a redundant load/store if the SSE-class value is already
22626 // in memory, such as if it is on the callstack.
22627 if (isScalarFPTypeInSSEReg(TheVT)) {
22628 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
22629 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
22630 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
22631 SDValue Ops[] = { Chain, StackSlot };
22633 unsigned FLDSize = TheVT.getStoreSize();
22634 assert(FLDSize <= MemSize && "Stack slot not big enough");
22635 MachineMemOperand *MMO = MF.getMachineMemOperand(
22636 MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
22637 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
22638 Chain = Value.getValue(1);
22641 // Build the FP_TO_INT*_IN_MEM
22642 MachineMemOperand *MMO = MF.getMachineMemOperand(
22643 MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
22644 SDValue Ops[] = { Chain, Value, StackSlot };
22645 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
22646 DAG.getVTList(MVT::Other),
22649 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
22650 Chain = Res.getValue(1);
22652 // If we need an unsigned fixup, XOR the result with adjust.
22654 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
22659 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
22660 const X86Subtarget &Subtarget) {
22661 MVT VT = Op.getSimpleValueType();
22662 SDValue In = Op.getOperand(0);
22663 MVT InVT = In.getSimpleValueType();
22665 unsigned Opc = Op.getOpcode();
22667 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
22668 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
22669 "Unexpected extension opcode");
22670 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22671 "Expected same number of elements");
22672 assert((VT.getVectorElementType() == MVT::i16 ||
22673 VT.getVectorElementType() == MVT::i32 ||
22674 VT.getVectorElementType() == MVT::i64) &&
22675 "Unexpected element type");
22676 assert((InVT.getVectorElementType() == MVT::i8 ||
22677 InVT.getVectorElementType() == MVT::i16 ||
22678 InVT.getVectorElementType() == MVT::i32) &&
22679 "Unexpected element type");
22681 unsigned ExtendInVecOpc = DAG.getOpcode_EXTEND_VECTOR_INREG(Opc);
22683 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
22684 assert(InVT == MVT::v32i8 && "Unexpected VT!");
22685 return splitVectorIntUnary(Op, DAG);
22688 if (Subtarget.hasInt256())
22691 // Optimize vectors in AVX mode:
22694 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
22695 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
22696 // Concat upper and lower parts.
22699 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
22700 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
22701 // Concat upper and lower parts.
22703 MVT HalfVT = VT.getHalfNumVectorElementsVT();
22704 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
22706 // Short-circuit if we can determine that each 128-bit half is the same value.
22707 // Otherwise, this is difficult to match and optimize.
22708 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
22709 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
22710 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
22712 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
22713 SDValue Undef = DAG.getUNDEF(InVT);
22714 bool NeedZero = Opc == ISD::ZERO_EXTEND;
22715 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
22716 OpHi = DAG.getBitcast(HalfVT, OpHi);
22718 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
22721 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
22722 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
22723 const SDLoc &dl, SelectionDAG &DAG) {
22724 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
22725 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
22726 DAG.getIntPtrConstant(0, dl));
22727 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
22728 DAG.getIntPtrConstant(8, dl));
22729 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
22730 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
22731 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
22732 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22735 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
22736 const X86Subtarget &Subtarget,
22737 SelectionDAG &DAG) {
22738 MVT VT = Op->getSimpleValueType(0);
22739 SDValue In = Op->getOperand(0);
22740 MVT InVT = In.getSimpleValueType();
22741 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
22743 unsigned NumElts = VT.getVectorNumElements();
22745 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
22746 // avoids a constant pool load.
22747 if (VT.getVectorElementType() != MVT::i8) {
22748 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
22749 return DAG.getNode(ISD::SRL, DL, VT, Extend,
22750 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
22753 // Extend VT if BWI is not supported.
22755 if (!Subtarget.hasBWI()) {
22756 // If v16i32 is to be avoided, we'll need to split and concatenate.
22757 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
22758 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
22760 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
22763 // Widen to 512-bits if VLX is not supported.
22764 MVT WideVT = ExtVT;
22765 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
22766 NumElts *= 512 / ExtVT.getSizeInBits();
22767 InVT = MVT::getVectorVT(MVT::i1, NumElts);
22768 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
22769 In, DAG.getIntPtrConstant(0, DL));
22770 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
22774 SDValue One = DAG.getConstant(1, DL, WideVT);
22775 SDValue Zero = DAG.getConstant(0, DL, WideVT);
22777 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
22779 // Truncate if we had to extend above.
22781 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
22782 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
22785 // Extract back to 128/256-bit if we widened.
22787 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
22788 DAG.getIntPtrConstant(0, DL));
22790 return SelectedVal;
22793 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22794 SelectionDAG &DAG) {
22795 SDValue In = Op.getOperand(0);
22796 MVT SVT = In.getSimpleValueType();
22798 if (SVT.getVectorElementType() == MVT::i1)
22799 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
22801 assert(Subtarget.hasAVX() && "Expected AVX support");
22802 return LowerAVXExtend(Op, DAG, Subtarget);
22805 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
22806 /// It makes use of the fact that vectors with enough leading sign/zero bits
22807 /// prevent the PACKSS/PACKUS from saturating the results.
22808 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
22809 /// within each 128-bit lane.
22810 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
22811 const SDLoc &DL, SelectionDAG &DAG,
22812 const X86Subtarget &Subtarget) {
22813 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
22814 "Unexpected PACK opcode");
22815 assert(DstVT.isVector() && "VT not a vector?");
22817 // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
22818 if (!Subtarget.hasSSE2())
22821 EVT SrcVT = In.getValueType();
22823 // No truncation required, we might get here due to recursive calls.
22824 if (SrcVT == DstVT)
22827 // We only support vector truncation to 64bits or greater from a
22828 // 128bits or greater source.
22829 unsigned DstSizeInBits = DstVT.getSizeInBits();
22830 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
22831 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
22834 unsigned NumElems = SrcVT.getVectorNumElements();
22835 if (!isPowerOf2_32(NumElems))
22838 LLVMContext &Ctx = *DAG.getContext();
22839 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
22840 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
22842 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
22844 // Pack to the largest type possible:
22845 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
22846 EVT InVT = MVT::i16, OutVT = MVT::i8;
22847 if (SrcVT.getScalarSizeInBits() > 16 &&
22848 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
22853 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
22854 if (SrcVT.is128BitVector()) {
22855 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
22856 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
22857 In = DAG.getBitcast(InVT, In);
22858 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, DAG.getUNDEF(InVT));
22859 Res = extractSubVector(Res, 0, DAG, DL, 64);
22860 return DAG.getBitcast(DstVT, Res);
22863 // Split lower/upper subvectors.
22865 std::tie(Lo, Hi) = splitVector(In, DAG, DL);
22867 unsigned SubSizeInBits = SrcSizeInBits / 2;
22868 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
22869 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
22871 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
22872 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
22873 Lo = DAG.getBitcast(InVT, Lo);
22874 Hi = DAG.getBitcast(InVT, Hi);
22875 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
22876 return DAG.getBitcast(DstVT, Res);
22879 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
22880 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
22881 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
22882 Lo = DAG.getBitcast(InVT, Lo);
22883 Hi = DAG.getBitcast(InVT, Hi);
22884 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
22886 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
22887 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
22888 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
22889 SmallVector<int, 64> Mask;
22890 int Scale = 64 / OutVT.getScalarSizeInBits();
22891 narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
22892 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
22894 if (DstVT.is256BitVector())
22895 return DAG.getBitcast(DstVT, Res);
22897 // If 512bit -> 128bit truncate another stage.
22898 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22899 Res = DAG.getBitcast(PackedVT, Res);
22900 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22903 // Recursively pack lower/upper subvectors, concat result and pack again.
22904 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
22906 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22907 if (PackedVT.is128BitVector()) {
22908 // Avoid CONCAT_VECTORS on sub-128bit nodes as these can fail after
22909 // type legalization.
22911 truncateVectorWithPACK(Opcode, PackedVT, In, DL, DAG, Subtarget);
22912 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22915 EVT HalfPackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
22916 Lo = truncateVectorWithPACK(Opcode, HalfPackedVT, Lo, DL, DAG, Subtarget);
22917 Hi = truncateVectorWithPACK(Opcode, HalfPackedVT, Hi, DL, DAG, Subtarget);
22918 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
22919 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22922 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
22923 /// e.g. trunc <8 x i32> X to <8 x i16> -->
22924 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
22925 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
22926 static SDValue truncateVectorWithPACKUS(EVT DstVT, SDValue In, const SDLoc &DL,
22927 const X86Subtarget &Subtarget,
22928 SelectionDAG &DAG) {
22929 EVT SrcVT = In.getValueType();
22930 APInt Mask = APInt::getLowBitsSet(SrcVT.getScalarSizeInBits(),
22931 DstVT.getScalarSizeInBits());
22932 In = DAG.getNode(ISD::AND, DL, SrcVT, In, DAG.getConstant(Mask, DL, SrcVT));
22933 return truncateVectorWithPACK(X86ISD::PACKUS, DstVT, In, DL, DAG, Subtarget);
22936 /// Truncate using inreg sign extension and X86ISD::PACKSS.
22937 static SDValue truncateVectorWithPACKSS(EVT DstVT, SDValue In, const SDLoc &DL,
22938 const X86Subtarget &Subtarget,
22939 SelectionDAG &DAG) {
22940 EVT SrcVT = In.getValueType();
22941 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, SrcVT, In,
22942 DAG.getValueType(DstVT));
22943 return truncateVectorWithPACK(X86ISD::PACKSS, DstVT, In, DL, DAG, Subtarget);
22946 /// This function lowers a vector truncation of 'extended sign-bits' or
22947 /// 'extended zero-bits' values.
22948 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
22949 static SDValue LowerTruncateVecPackWithSignBits(MVT DstVT, SDValue In,
22951 const X86Subtarget &Subtarget,
22952 SelectionDAG &DAG) {
22953 MVT SrcVT = In.getSimpleValueType();
22954 MVT DstSVT = DstVT.getVectorElementType();
22955 MVT SrcSVT = SrcVT.getVectorElementType();
22956 if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
22957 (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
22960 // Don't lower with PACK nodes on AVX512 targets if we'd need more than one.
22961 if (Subtarget.hasAVX512() &&
22962 SrcSVT.getSizeInBits() > (DstSVT.getSizeInBits() * 2))
22965 // Prefer to lower v4i64 -> v4i32 as a shuffle unless we can cheaply
22966 // split this for packing.
22967 if (SrcVT == MVT::v4i64 && DstVT == MVT::v4i32 &&
22968 !isFreeToSplitVector(In.getNode(), DAG) &&
22969 (!Subtarget.hasInt256() || DAG.ComputeNumSignBits(In) != 64))
22972 // If the upper half of the source is undef, then attempt to split and
22973 // only truncate the lower half.
22974 if (DstVT.getSizeInBits() >= 128) {
22975 SmallVector<SDValue> LowerOps;
22976 if (isUpperSubvectorUndef(In, LowerOps, DAG)) {
22977 MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
22978 MVT SrcHalfVT = SrcVT.getHalfNumVectorElementsVT();
22979 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcHalfVT, LowerOps);
22980 if (SDValue Res = LowerTruncateVecPackWithSignBits(DstHalfVT, Lo, DL,
22982 return widenSubVector(Res, false, Subtarget, DAG, DL,
22983 DstVT.getSizeInBits());
22987 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
22988 unsigned NumPackedSignBits = std::min<unsigned>(DstSVT.getSizeInBits(), 16);
22989 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
22991 // Truncate with PACKUS if we are truncating a vector with leading zero
22992 // bits that extend all the way to the packed/truncated value. Pre-SSE41
22993 // we can only use PACKUSWB.
22994 KnownBits Known = DAG.computeKnownBits(In);
22995 if ((NumSrcEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
22996 if (SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, DstVT, In, DL, DAG,
23000 // Truncate with PACKSS if we are truncating a vector with sign-bits
23001 // that extend all the way to the packed/truncated value.
23002 if ((NumSrcEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
23003 if (SDValue V = truncateVectorWithPACK(X86ISD::PACKSS, DstVT, In, DL, DAG,
23010 /// This function lowers a vector truncation from vXi32/vXi64 to vXi8/vXi16 into
23011 /// X86ISD::PACKUS/X86ISD::PACKSS operations.
23012 static SDValue LowerTruncateVecPack(MVT DstVT, SDValue In, const SDLoc &DL,
23013 const X86Subtarget &Subtarget,
23014 SelectionDAG &DAG) {
23015 MVT SrcVT = In.getSimpleValueType();
23016 MVT DstSVT = DstVT.getVectorElementType();
23017 MVT SrcSVT = SrcVT.getVectorElementType();
23018 unsigned NumElems = DstVT.getVectorNumElements();
23019 if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
23020 (DstSVT == MVT::i8 || DstSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
23024 // SSSE3's pshufb results in less instructions in the cases below.
23025 if (Subtarget.hasSSSE3() && NumElems == 8) {
23026 if (SrcSVT == MVT::i16)
23028 if (SrcSVT == MVT::i32 && (DstSVT == MVT::i8 || !Subtarget.hasSSE41()))
23032 // If the upper half of the source is undef, then attempt to split and
23033 // only truncate the lower half.
23034 if (DstVT.getSizeInBits() >= 128) {
23035 SmallVector<SDValue> LowerOps;
23036 if (isUpperSubvectorUndef(In, LowerOps, DAG)) {
23037 MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
23038 MVT SrcHalfVT = SrcVT.getHalfNumVectorElementsVT();
23039 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcHalfVT, LowerOps);
23040 if (SDValue Res = LowerTruncateVecPack(DstHalfVT, Lo, DL, Subtarget, DAG))
23041 return widenSubVector(Res, false, Subtarget, DAG, DL,
23042 DstVT.getSizeInBits());
23046 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
23047 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
23048 // truncate 2 x v4i32 to v8i16.
23049 if (Subtarget.hasSSE41() || DstSVT == MVT::i8)
23050 return truncateVectorWithPACKUS(DstVT, In, DL, Subtarget, DAG);
23052 if (SrcSVT == MVT::i16 || SrcSVT == MVT::i32)
23053 return truncateVectorWithPACKSS(DstVT, In, DL, Subtarget, DAG);
23055 // Special case vXi64 -> vXi16, shuffle to vXi32 and then use PACKSS.
23056 if (DstSVT == MVT::i16 && SrcSVT == MVT::i64) {
23057 MVT TruncVT = MVT::getVectorVT(MVT::i32, NumElems);
23058 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, In);
23059 return truncateVectorWithPACKSS(DstVT, Trunc, DL, Subtarget, DAG);
23065 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
23066 const X86Subtarget &Subtarget) {
23069 MVT VT = Op.getSimpleValueType();
23070 SDValue In = Op.getOperand(0);
23071 MVT InVT = In.getSimpleValueType();
23073 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
23075 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
23076 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
23077 if (InVT.getScalarSizeInBits() <= 16) {
23078 if (Subtarget.hasBWI()) {
23079 // legal, will go to VPMOVB2M, VPMOVW2M
23080 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
23081 // We need to shift to get the lsb into sign position.
23082 // Shift packed bytes not supported natively, bitcast to word
23083 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
23084 In = DAG.getNode(ISD::SHL, DL, ExtVT,
23085 DAG.getBitcast(ExtVT, In),
23086 DAG.getConstant(ShiftInx, DL, ExtVT));
23087 In = DAG.getBitcast(InVT, In);
23089 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
23092 // Use TESTD/Q, extended vector to packed dword/qword.
23093 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
23094 "Unexpected vector type.");
23095 unsigned NumElts = InVT.getVectorNumElements();
23096 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
23097 // We need to change to a wider element type that we have support for.
23098 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
23099 // For 16 element vectors we extend to v16i32 unless we are explicitly
23100 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
23101 // we need to split into two 8 element vectors which we can extend to v8i32,
23102 // truncate and concat the results. There's an additional complication if
23103 // the original type is v16i8. In that case we can't split the v16i8
23104 // directly, so we need to shuffle high elements to low and use
23105 // sign_extend_vector_inreg.
23106 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
23108 if (InVT == MVT::v16i8) {
23109 Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
23110 Hi = DAG.getVectorShuffle(
23112 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
23113 Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
23115 assert(InVT == MVT::v16i16 && "Unexpected VT!");
23116 Lo = extract128BitVector(In, 0, DAG, DL);
23117 Hi = extract128BitVector(In, 8, DAG, DL);
23119 // We're split now, just emit two truncates and a concat. The two
23120 // truncates will trigger legalization to come back to this function.
23121 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
23122 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
23123 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
23125 // We either have 8 elements or we're allowed to use 512-bit vectors.
23126 // If we have VLX, we want to use the narrowest vector that can get the
23127 // job done so we use vXi32.
23128 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
23129 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
23130 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
23132 ShiftInx = InVT.getScalarSizeInBits() - 1;
23135 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
23136 // We need to shift to get the lsb into sign position.
23137 In = DAG.getNode(ISD::SHL, DL, InVT, In,
23138 DAG.getConstant(ShiftInx, DL, InVT));
23140 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
23141 if (Subtarget.hasDQI())
23142 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
23143 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
23146 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
23148 MVT VT = Op.getSimpleValueType();
23149 SDValue In = Op.getOperand(0);
23150 MVT InVT = In.getSimpleValueType();
23151 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
23152 "Invalid TRUNCATE operation");
23154 // If we're called by the type legalizer, handle a few cases.
23155 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23156 if (!TLI.isTypeLegal(InVT)) {
23157 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
23158 VT.is128BitVector() && Subtarget.hasAVX512()) {
23159 assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
23160 "Unexpected subtarget!");
23161 // The default behavior is to truncate one step, concatenate, and then
23162 // truncate the remainder. We'd rather produce two 64-bit results and
23163 // concatenate those.
23165 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
23168 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
23170 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
23171 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
23172 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
23175 // Pre-AVX512 (or prefer-256bit) see if we can make use of PACKSS/PACKUS.
23176 if (!Subtarget.hasAVX512() ||
23177 (InVT.is512BitVector() && VT.is256BitVector()))
23178 if (SDValue SignPack =
23179 LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
23182 // Pre-AVX512 see if we can make use of PACKSS/PACKUS.
23183 if (!Subtarget.hasAVX512())
23184 return LowerTruncateVecPack(VT, In, DL, Subtarget, DAG);
23186 // Otherwise let default legalization handle it.
23190 if (VT.getVectorElementType() == MVT::i1)
23191 return LowerTruncateVecI1(Op, DAG, Subtarget);
23193 // Attempt to truncate with PACKUS/PACKSS even on AVX512 if we'd have to
23194 // concat from subvectors to use VPTRUNC etc.
23195 if (!Subtarget.hasAVX512() || isFreeToSplitVector(In.getNode(), DAG))
23196 if (SDValue SignPack =
23197 LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
23200 // vpmovqb/w/d, vpmovdb/w, vpmovwb
23201 if (Subtarget.hasAVX512()) {
23202 if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
23203 assert(VT == MVT::v32i8 && "Unexpected VT!");
23204 return splitVectorIntUnary(Op, DAG);
23207 // word to byte only under BWI. Otherwise we have to promoted to v16i32
23208 // and then truncate that. But we should only do that if we haven't been
23209 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
23210 // handled by isel patterns.
23211 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
23212 Subtarget.canExtendTo512DQ())
23216 // Handle truncation of V256 to V128 using shuffles.
23217 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
23219 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
23220 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
23221 if (Subtarget.hasInt256()) {
23222 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
23223 In = DAG.getBitcast(MVT::v8i32, In);
23224 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
23225 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
23226 DAG.getIntPtrConstant(0, DL));
23229 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
23230 DAG.getIntPtrConstant(0, DL));
23231 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
23232 DAG.getIntPtrConstant(2, DL));
23233 static const int ShufMask[] = {0, 2, 4, 6};
23234 return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
23235 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
23238 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
23239 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
23240 if (Subtarget.hasInt256()) {
23241 // The PSHUFB mask:
23242 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
23243 -1, -1, -1, -1, -1, -1, -1, -1,
23244 16, 17, 20, 21, 24, 25, 28, 29,
23245 -1, -1, -1, -1, -1, -1, -1, -1 };
23246 In = DAG.getBitcast(MVT::v32i8, In);
23247 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
23248 In = DAG.getBitcast(MVT::v4i64, In);
23250 static const int ShufMask2[] = {0, 2, -1, -1};
23251 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
23252 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
23253 DAG.getIntPtrConstant(0, DL));
23254 return DAG.getBitcast(MVT::v8i16, In);
23257 return Subtarget.hasSSE41()
23258 ? truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG)
23259 : truncateVectorWithPACKSS(VT, In, DL, Subtarget, DAG);
23262 if (VT == MVT::v16i8 && InVT == MVT::v16i16)
23263 return truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG);
23265 llvm_unreachable("All 256->128 cases should have been handled above!");
23268 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
23269 // behaves on out of range inputs to generate optimized conversions.
23270 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
23272 const X86Subtarget &Subtarget) {
23273 MVT SrcVT = Src.getSimpleValueType();
23274 unsigned DstBits = VT.getScalarSizeInBits();
23275 assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
23277 // Calculate the converted result for values in the range 0 to
23278 // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
23279 SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
23281 DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
23282 DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
23283 DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
23285 // The "CVTTP2SI" instruction conveniently sets the sign bit if
23286 // and only if the value was out of range. So we can use that
23287 // as our indicator that we rather use "Big" instead of "Small".
23289 // Use "Small" if "IsOverflown" has all bits cleared
23290 // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
23292 // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
23293 // use the slightly slower blendv select instead.
23294 if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
23295 SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
23296 return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
23299 SDValue IsOverflown =
23300 DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
23301 DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
23302 return DAG.getNode(ISD::OR, dl, VT, Small,
23303 DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
23306 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
23307 bool IsStrict = Op->isStrictFPOpcode();
23308 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
23309 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
23310 MVT VT = Op->getSimpleValueType(0);
23311 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
23312 SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
23313 MVT SrcVT = Src.getSimpleValueType();
23317 if (isSoftFP16(SrcVT)) {
23318 MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
23320 return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
23321 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
23322 {NVT, MVT::Other}, {Chain, Src})});
23323 return DAG.getNode(Op.getOpcode(), dl, VT,
23324 DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
23325 } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
23329 if (VT.isVector()) {
23330 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
23331 MVT ResVT = MVT::v4i32;
23332 MVT TruncVT = MVT::v4i1;
23335 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
23337 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
23339 if (!IsSigned && !Subtarget.hasVLX()) {
23340 assert(Subtarget.useAVX512Regs() && "Unexpected features!");
23341 // Widen to 512-bits.
23342 ResVT = MVT::v8i32;
23343 TruncVT = MVT::v8i1;
23344 Opc = Op.getOpcode();
23345 // Need to concat with zero vector for strict fp to avoid spurious
23347 // TODO: Should we just do this for non-strict as well?
23348 SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
23349 : DAG.getUNDEF(MVT::v8f64);
23350 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
23351 DAG.getIntPtrConstant(0, dl));
23354 Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
23355 Chain = Res.getValue(1);
23357 Res = DAG.getNode(Opc, dl, ResVT, Src);
23360 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
23361 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
23362 DAG.getIntPtrConstant(0, dl));
23364 return DAG.getMergeValues({Res, Chain}, dl);
23368 if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
23369 if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
23373 MVT EleVT = VT.getVectorElementType();
23374 if (EleVT != MVT::i64)
23375 ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
23377 if (SrcVT != MVT::v8f16) {
23379 IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
23380 SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
23382 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
23386 Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
23387 : X86ISD::STRICT_CVTTP2UI,
23388 dl, {ResVT, MVT::Other}, {Chain, Src});
23389 Chain = Res.getValue(1);
23391 Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
23395 // TODO: Need to add exception check code for strict FP.
23396 if (EleVT.getSizeInBits() < 16) {
23397 ResVT = MVT::getVectorVT(EleVT, 8);
23398 Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
23402 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
23403 DAG.getIntPtrConstant(0, dl));
23406 return DAG.getMergeValues({Res, Chain}, dl);
23410 // v8f32/v16f32/v8f64->v8i16/v16i16 need to widen first.
23411 if (VT.getVectorElementType() == MVT::i16) {
23412 assert((SrcVT.getVectorElementType() == MVT::f32 ||
23413 SrcVT.getVectorElementType() == MVT::f64) &&
23414 "Expected f32/f64 vector!");
23415 MVT NVT = VT.changeVectorElementType(MVT::i32);
23417 Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
23418 : ISD::STRICT_FP_TO_UINT,
23419 dl, {NVT, MVT::Other}, {Chain, Src});
23420 Chain = Res.getValue(1);
23422 Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
23426 // TODO: Need to add exception check code for strict FP.
23427 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
23430 return DAG.getMergeValues({Res, Chain}, dl);
23434 // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
23435 if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
23436 assert(!IsSigned && "Expected unsigned conversion!");
23437 assert(Subtarget.useAVX512Regs() && "Requires avx512f");
23441 // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
23442 if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
23443 (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
23444 Subtarget.useAVX512Regs()) {
23445 assert(!IsSigned && "Expected unsigned conversion!");
23446 assert(!Subtarget.hasVLX() && "Unexpected features!");
23447 MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
23448 MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
23449 // Need to concat with zero vector for strict fp to avoid spurious
23451 // TODO: Should we just do this for non-strict as well?
23453 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
23454 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
23455 DAG.getIntPtrConstant(0, dl));
23458 Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
23460 Chain = Res.getValue(1);
23462 Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
23465 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
23466 DAG.getIntPtrConstant(0, dl));
23469 return DAG.getMergeValues({Res, Chain}, dl);
23473 // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
23474 if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
23475 (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
23476 Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
23477 assert(!Subtarget.hasVLX() && "Unexpected features!");
23478 MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
23479 // Need to concat with zero vector for strict fp to avoid spurious
23481 // TODO: Should we just do this for non-strict as well?
23483 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
23484 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
23485 DAG.getIntPtrConstant(0, dl));
23488 Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
23490 Chain = Res.getValue(1);
23492 Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
23495 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
23496 DAG.getIntPtrConstant(0, dl));
23499 return DAG.getMergeValues({Res, Chain}, dl);
23503 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
23504 if (!Subtarget.hasVLX()) {
23505 // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
23506 // legalizer and then widened again by vector op legalization.
23510 SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
23511 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
23512 {Src, Zero, Zero, Zero});
23513 Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
23515 SDValue Chain = Tmp.getValue(1);
23516 Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
23517 DAG.getIntPtrConstant(0, dl));
23518 return DAG.getMergeValues({Tmp, Chain}, dl);
23521 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
23522 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
23523 DAG.getUNDEF(MVT::v2f32));
23525 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
23526 : X86ISD::STRICT_CVTTP2UI;
23527 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
23529 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
23530 return DAG.getNode(Opc, dl, VT, Tmp);
23533 // Generate optimized instructions for pre AVX512 unsigned conversions from
23535 if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
23536 (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
23537 (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
23538 assert(!IsSigned && "Expected unsigned conversion!");
23539 return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
23545 assert(!VT.isVector());
23547 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
23549 if (!IsSigned && UseSSEReg) {
23550 // Conversions from f32/f64 with AVX512 should be legal.
23551 if (Subtarget.hasAVX512())
23554 // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
23555 // behaves on out of range inputs to generate optimized conversions.
23556 if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
23557 (VT == MVT::i64 && Subtarget.is64Bit()))) {
23558 unsigned DstBits = VT.getScalarSizeInBits();
23559 APInt UIntLimit = APInt::getSignMask(DstBits);
23560 SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
23561 DAG.getConstant(UIntLimit, dl, VT));
23562 MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
23564 // Calculate the converted result for values in the range:
23565 // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
23566 // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
23568 DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
23569 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
23570 SDValue Big = DAG.getNode(
23571 X86ISD::CVTTS2SI, dl, VT,
23572 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
23573 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
23575 // The "CVTTS2SI" instruction conveniently sets the sign bit if
23576 // and only if the value was out of range. So we can use that
23577 // as our indicator that we rather use "Big" instead of "Small".
23579 // Use "Small" if "IsOverflown" has all bits cleared
23580 // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
23581 SDValue IsOverflown = DAG.getNode(
23582 ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
23583 return DAG.getNode(ISD::OR, dl, VT, Small,
23584 DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
23587 // Use default expansion for i64.
23588 if (VT == MVT::i64)
23591 assert(VT == MVT::i32 && "Unexpected VT!");
23593 // Promote i32 to i64 and use a signed operation on 64-bit targets.
23594 // FIXME: This does not generate an invalid exception if the input does not
23595 // fit in i32. PR44019
23596 if (Subtarget.is64Bit()) {
23598 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
23600 Chain = Res.getValue(1);
23602 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
23604 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
23606 return DAG.getMergeValues({Res, Chain}, dl);
23610 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
23611 // use fisttp which will be handled later.
23612 if (!Subtarget.hasSSE3())
23616 // Promote i16 to i32 if we can use a SSE operation or the type is f128.
23617 // FIXME: This does not generate an invalid exception if the input does not
23618 // fit in i16. PR44019
23619 if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
23620 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
23622 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
23624 Chain = Res.getValue(1);
23626 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
23628 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
23630 return DAG.getMergeValues({Res, Chain}, dl);
23634 // If this is a FP_TO_SINT using SSEReg we're done.
23635 if (UseSSEReg && IsSigned)
23638 // fp128 needs to use a libcall.
23639 if (SrcVT == MVT::f128) {
23642 LC = RTLIB::getFPTOSINT(SrcVT, VT);
23644 LC = RTLIB::getFPTOUINT(SrcVT, VT);
23646 MakeLibCallOptions CallOptions;
23647 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
23651 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
23656 // Fall back to X87.
23657 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
23659 return DAG.getMergeValues({V, Chain}, dl);
23663 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
23666 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
23667 SelectionDAG &DAG) const {
23668 SDValue Src = Op.getOperand(0);
23669 MVT SrcVT = Src.getSimpleValueType();
23671 if (SrcVT == MVT::f16)
23674 // If the source is in an SSE register, the node is Legal.
23675 if (isScalarFPTypeInSSEReg(SrcVT))
23678 return LRINT_LLRINTHelper(Op.getNode(), DAG);
23681 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
23682 SelectionDAG &DAG) const {
23683 EVT DstVT = N->getValueType(0);
23684 SDValue Src = N->getOperand(0);
23685 EVT SrcVT = Src.getValueType();
23687 if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
23688 // f16 must be promoted before using the lowering in this routine.
23689 // fp128 does not use this lowering.
23694 SDValue Chain = DAG.getEntryNode();
23696 bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
23698 // If we're converting from SSE, the stack slot needs to hold both types.
23699 // Otherwise it only needs to hold the DstVT.
23700 EVT OtherVT = UseSSE ? SrcVT : DstVT;
23701 SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
23702 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
23703 MachinePointerInfo MPI =
23704 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
23707 assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
23708 Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
23709 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
23710 SDValue Ops[] = { Chain, StackPtr };
23712 Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
23713 /*Align*/ std::nullopt,
23714 MachineMemOperand::MOLoad);
23715 Chain = Src.getValue(1);
23718 SDValue StoreOps[] = { Chain, Src, StackPtr };
23719 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
23720 StoreOps, DstVT, MPI, /*Align*/ std::nullopt,
23721 MachineMemOperand::MOStore);
23723 return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
23727 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
23728 // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
23729 // but making use of X86 specifics to produce better instruction sequences.
23730 SDNode *Node = Op.getNode();
23731 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
23732 unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
23733 SDLoc dl(SDValue(Node, 0));
23734 SDValue Src = Node->getOperand(0);
23736 // There are three types involved here: SrcVT is the source floating point
23737 // type, DstVT is the type of the result, and TmpVT is the result of the
23738 // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
23740 EVT SrcVT = Src.getValueType();
23741 EVT DstVT = Node->getValueType(0);
23744 // This code is only for floats and doubles. Fall back to generic code for
23746 if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftFP16(SrcVT))
23749 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
23750 unsigned SatWidth = SatVT.getScalarSizeInBits();
23751 unsigned DstWidth = DstVT.getScalarSizeInBits();
23752 unsigned TmpWidth = TmpVT.getScalarSizeInBits();
23753 assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
23754 "Expected saturation width smaller than result width");
23756 // Promote result of FP_TO_*INT to at least 32 bits.
23757 if (TmpWidth < 32) {
23762 // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
23763 // us to use a native signed conversion instead.
23764 if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
23769 // If the saturation width is smaller than the size of the temporary result,
23770 // we can always use signed conversion, which is native.
23771 if (SatWidth < TmpWidth)
23772 FpToIntOpcode = ISD::FP_TO_SINT;
23774 // Determine minimum and maximum integer values and their corresponding
23775 // floating-point values.
23776 APInt MinInt, MaxInt;
23778 MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
23779 MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
23781 MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
23782 MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
23785 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
23786 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
23788 APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
23789 MinInt, IsSigned, APFloat::rmTowardZero);
23790 APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
23791 MaxInt, IsSigned, APFloat::rmTowardZero);
23792 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
23793 && !(MaxStatus & APFloat::opStatus::opInexact);
23795 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
23796 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
23798 // If the integer bounds are exactly representable as floats, emit a
23799 // min+max+fptoi sequence. Otherwise use comparisons and selects.
23800 if (AreExactFloatBounds) {
23801 if (DstVT != TmpVT) {
23802 // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
23803 SDValue MinClamped = DAG.getNode(
23804 X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
23805 // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
23806 SDValue BothClamped = DAG.getNode(
23807 X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
23808 // Convert clamped value to integer.
23809 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
23811 // NaN will become INDVAL, with the top bit set and the rest zero.
23812 // Truncation will discard the top bit, resulting in zero.
23813 return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
23816 // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
23817 SDValue MinClamped = DAG.getNode(
23818 X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
23819 // Clamp by MaxFloat from above. NaN cannot occur.
23820 SDValue BothClamped = DAG.getNode(
23821 X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
23822 // Convert clamped value to integer.
23823 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
23826 // In the unsigned case we're done, because we mapped NaN to MinFloat,
23831 // Otherwise, select zero if Src is NaN.
23832 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
23833 return DAG.getSelectCC(
23834 dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
23837 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
23838 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
23840 // Result of direct conversion, which may be selected away.
23841 SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
23843 if (DstVT != TmpVT) {
23844 // NaN will become INDVAL, with the top bit set and the rest zero.
23845 // Truncation will discard the top bit, resulting in zero.
23846 FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
23849 SDValue Select = FpToInt;
23850 // For signed conversions where we saturate to the same size as the
23851 // result type of the fptoi instructions, INDVAL coincides with integer
23852 // minimum, so we don't need to explicitly check it.
23853 if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
23854 // If Src ULT MinFloat, select MinInt. In particular, this also selects
23855 // MinInt if Src is NaN.
23856 Select = DAG.getSelectCC(
23857 dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
23860 // If Src OGT MaxFloat, select MaxInt.
23861 Select = DAG.getSelectCC(
23862 dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
23864 // In the unsigned case we are done, because we mapped NaN to MinInt, which
23865 // is already zero. The promoted case was already handled above.
23866 if (!IsSigned || DstVT != TmpVT) {
23870 // Otherwise, select 0 if Src is NaN.
23871 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
23872 return DAG.getSelectCC(
23873 dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
23876 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
23877 bool IsStrict = Op->isStrictFPOpcode();
23880 MVT VT = Op.getSimpleValueType();
23881 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23882 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
23883 MVT SVT = In.getSimpleValueType();
23885 // Let f16->f80 get lowered to a libcall, except for darwin, where we should
23886 // lower it to an fp_extend via f32 (as only f16<>f32 libcalls are available)
23887 if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80 &&
23888 !Subtarget.getTargetTriple().isOSDarwin()))
23891 if (SVT == MVT::f16) {
23892 if (Subtarget.hasFP16())
23895 if (VT != MVT::f32) {
23897 return DAG.getNode(
23898 ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
23899 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
23900 {MVT::f32, MVT::Other}, {Chain, In})});
23902 return DAG.getNode(ISD::FP_EXTEND, DL, VT,
23903 DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
23906 if (!Subtarget.hasF16C()) {
23907 if (!Subtarget.getTargetTriple().isOSDarwin())
23910 assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
23912 // Need a libcall, but ABI for f16 is soft-float on MacOS.
23913 TargetLowering::CallLoweringInfo CLI(DAG);
23914 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
23916 In = DAG.getBitcast(MVT::i16, In);
23917 TargetLowering::ArgListTy Args;
23918 TargetLowering::ArgListEntry Entry;
23920 Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
23921 Entry.IsSExt = false;
23922 Entry.IsZExt = true;
23923 Args.push_back(Entry);
23925 SDValue Callee = DAG.getExternalSymbol(
23926 getLibcallName(RTLIB::FPEXT_F16_F32),
23927 getPointerTy(DAG.getDataLayout()));
23928 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
23929 CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
23933 std::tie(Res,Chain) = LowerCallTo(CLI);
23935 Res = DAG.getMergeValues({Res, Chain}, DL);
23940 In = DAG.getBitcast(MVT::i16, In);
23941 In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
23942 getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
23943 DAG.getIntPtrConstant(0, DL));
23946 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
23948 Chain = Res.getValue(1);
23950 Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
23951 DAG.getTargetConstant(4, DL, MVT::i32));
23953 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
23954 DAG.getIntPtrConstant(0, DL));
23956 return DAG.getMergeValues({Res, Chain}, DL);
23960 if (!SVT.isVector())
23963 if (SVT.getVectorElementType() == MVT::f16) {
23964 assert(Subtarget.hasF16C() && "Unexpected features!");
23965 if (SVT == MVT::v2f16)
23966 In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
23967 DAG.getUNDEF(MVT::v2f16));
23968 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
23969 DAG.getUNDEF(MVT::v4f16));
23971 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
23972 {Op->getOperand(0), Res});
23973 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
23974 } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
23978 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
23981 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
23983 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
23984 {Op->getOperand(0), Res});
23985 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
23988 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
23989 bool IsStrict = Op->isStrictFPOpcode();
23992 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23993 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
23994 MVT VT = Op.getSimpleValueType();
23995 MVT SVT = In.getSimpleValueType();
23997 if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
24000 if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
24001 !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
24002 if (!Subtarget.getTargetTriple().isOSDarwin())
24005 // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
24006 TargetLowering::CallLoweringInfo CLI(DAG);
24007 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
24009 TargetLowering::ArgListTy Args;
24010 TargetLowering::ArgListEntry Entry;
24012 Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
24013 Entry.IsSExt = false;
24014 Entry.IsZExt = true;
24015 Args.push_back(Entry);
24017 SDValue Callee = DAG.getExternalSymbol(
24018 getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
24019 : RTLIB::FPROUND_F32_F16),
24020 getPointerTy(DAG.getDataLayout()));
24021 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
24022 CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
24026 std::tie(Res, Chain) = LowerCallTo(CLI);
24028 Res = DAG.getBitcast(MVT::f16, Res);
24031 Res = DAG.getMergeValues({Res, Chain}, DL);
24036 if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
24037 if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
24044 SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
24047 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
24048 DAG.getConstantFP(0, DL, MVT::v4f32), In,
24049 DAG.getIntPtrConstant(0, DL));
24050 Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
24051 {Chain, Res, Rnd});
24052 Chain = Res.getValue(1);
24054 // FIXME: Should we use zeros for upper elements for non-strict?
24055 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
24056 Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
24059 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
24060 DAG.getIntPtrConstant(0, DL));
24061 Res = DAG.getBitcast(MVT::f16, Res);
24064 return DAG.getMergeValues({Res, Chain}, DL);
24072 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
24073 bool IsStrict = Op->isStrictFPOpcode();
24074 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
24075 assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
24079 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
24080 DAG.getConstant(0, dl, MVT::v8i16), Src,
24081 DAG.getIntPtrConstant(0, dl));
24085 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
24086 {Op.getOperand(0), Res});
24087 Chain = Res.getValue(1);
24089 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
24092 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
24093 DAG.getIntPtrConstant(0, dl));
24096 return DAG.getMergeValues({Res, Chain}, dl);
24101 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
24102 bool IsStrict = Op->isStrictFPOpcode();
24103 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
24104 assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
24108 SDValue Res, Chain;
24110 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
24111 DAG.getConstantFP(0, dl, MVT::v4f32), Src,
24112 DAG.getIntPtrConstant(0, dl));
24114 X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
24115 {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
24116 Chain = Res.getValue(1);
24118 // FIXME: Should we use zeros for upper elements for non-strict?
24119 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
24120 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
24121 DAG.getTargetConstant(4, dl, MVT::i32));
24124 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
24125 DAG.getIntPtrConstant(0, dl));
24128 return DAG.getMergeValues({Res, Chain}, dl);
24133 SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
24134 SelectionDAG &DAG) const {
24136 MakeLibCallOptions CallOptions;
24137 RTLIB::Libcall LC =
24138 RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
24140 makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
24141 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16,
24142 DAG.getBitcast(MVT::i32, Res));
24145 /// Depending on uarch and/or optimizing for size, we might prefer to use a
24146 /// vector operation in place of the typical scalar operation.
24147 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
24148 const X86Subtarget &Subtarget) {
24149 // If both operands have other uses, this is probably not profitable.
24150 SDValue LHS = Op.getOperand(0);
24151 SDValue RHS = Op.getOperand(1);
24152 if (!LHS.hasOneUse() && !RHS.hasOneUse())
24155 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
24156 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
24157 if (IsFP && !Subtarget.hasSSE3())
24159 if (!IsFP && !Subtarget.hasSSSE3())
24162 // Extract from a common vector.
24163 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
24164 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
24165 LHS.getOperand(0) != RHS.getOperand(0) ||
24166 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
24167 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
24168 !shouldUseHorizontalOp(true, DAG, Subtarget))
24171 // Allow commuted 'hadd' ops.
24172 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
24174 switch (Op.getOpcode()) {
24175 case ISD::ADD: HOpcode = X86ISD::HADD; break;
24176 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
24177 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
24178 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
24180 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
24182 unsigned LExtIndex = LHS.getConstantOperandVal(1);
24183 unsigned RExtIndex = RHS.getConstantOperandVal(1);
24184 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
24185 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
24186 std::swap(LExtIndex, RExtIndex);
24188 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
24191 SDValue X = LHS.getOperand(0);
24192 EVT VecVT = X.getValueType();
24193 unsigned BitWidth = VecVT.getSizeInBits();
24194 unsigned NumLanes = BitWidth / 128;
24195 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
24196 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
24197 "Not expecting illegal vector widths here");
24199 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
24200 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
24202 if (BitWidth == 256 || BitWidth == 512) {
24203 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
24204 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
24205 LExtIndex %= NumEltsPerLane;
24208 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
24209 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
24210 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
24211 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
24212 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
24213 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
24214 DAG.getIntPtrConstant(LExtIndex / 2, DL));
24217 /// Depending on uarch and/or optimizing for size, we might prefer to use a
24218 /// vector operation in place of the typical scalar operation.
24219 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
24220 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
24221 "Only expecting float/double");
24222 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
24225 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
24226 /// This mode isn't supported in hardware on X86. But as long as we aren't
24227 /// compiling with trapping math, we can emulate this with
24228 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
24229 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
24230 SDValue N0 = Op.getOperand(0);
24232 MVT VT = Op.getSimpleValueType();
24234 // N0 += copysign(nextafter(0.5, 0.0), N0)
24235 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
24237 APFloat Point5Pred = APFloat(0.5f);
24238 Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
24239 Point5Pred.next(/*nextDown*/true);
24241 SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
24242 DAG.getConstantFP(Point5Pred, dl, VT), N0);
24243 N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
24245 // Truncate the result to remove fraction.
24246 return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
24249 /// The only differences between FABS and FNEG are the mask and the logic op.
24250 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
24251 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
24252 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
24253 "Wrong opcode for lowering FABS or FNEG.");
24255 bool IsFABS = (Op.getOpcode() == ISD::FABS);
24257 // If this is a FABS and it has an FNEG user, bail out to fold the combination
24258 // into an FNABS. We'll lower the FABS after that if it is still in use.
24260 for (SDNode *User : Op->uses())
24261 if (User->getOpcode() == ISD::FNEG)
24265 MVT VT = Op.getSimpleValueType();
24267 bool IsF128 = (VT == MVT::f128);
24268 assert(VT.isFloatingPoint() && VT != MVT::f80 &&
24269 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
24270 "Unexpected type in LowerFABSorFNEG");
24272 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
24273 // decide if we should generate a 16-byte constant mask when we only need 4 or
24274 // 8 bytes for the scalar case.
24276 // There are no scalar bitwise logical SSE/AVX instructions, so we
24277 // generate a 16-byte vector constant and logic op even for the scalar case.
24278 // Using a 16-byte mask allows folding the load of the mask with
24279 // the logic op, so it can save (~4 bytes) on code size.
24280 bool IsFakeVector = !VT.isVector() && !IsF128;
24283 LogicVT = (VT == MVT::f64) ? MVT::v2f64
24284 : (VT == MVT::f32) ? MVT::v4f32
24287 unsigned EltBits = VT.getScalarSizeInBits();
24288 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
24289 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
24290 APInt::getSignMask(EltBits);
24291 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
24292 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
24294 SDValue Op0 = Op.getOperand(0);
24295 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
24296 unsigned LogicOp = IsFABS ? X86ISD::FAND :
24297 IsFNABS ? X86ISD::FOR :
24299 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
24301 if (VT.isVector() || IsF128)
24302 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
24304 // For the scalar case extend to a 128-bit vector, perform the logic op,
24305 // and extract the scalar result back out.
24306 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
24307 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
24308 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
24309 DAG.getIntPtrConstant(0, dl));
24312 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
24313 SDValue Mag = Op.getOperand(0);
24314 SDValue Sign = Op.getOperand(1);
24317 // If the sign operand is smaller, extend it first.
24318 MVT VT = Op.getSimpleValueType();
24319 if (Sign.getSimpleValueType().bitsLT(VT))
24320 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
24322 // And if it is bigger, shrink it first.
24323 if (Sign.getSimpleValueType().bitsGT(VT))
24324 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
24325 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
24327 // At this point the operands and the result should have the same
24328 // type, and that won't be f80 since that is not custom lowered.
24329 bool IsF128 = (VT == MVT::f128);
24330 assert(VT.isFloatingPoint() && VT != MVT::f80 &&
24331 DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
24332 "Unexpected type in LowerFCOPYSIGN");
24334 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
24336 // Perform all scalar logic operations as 16-byte vectors because there are no
24337 // scalar FP logic instructions in SSE.
24338 // TODO: This isn't necessary. If we used scalar types, we might avoid some
24339 // unnecessary splats, but we might miss load folding opportunities. Should
24340 // this decision be based on OptimizeForSize?
24341 bool IsFakeVector = !VT.isVector() && !IsF128;
24344 LogicVT = (VT == MVT::f64) ? MVT::v2f64
24345 : (VT == MVT::f32) ? MVT::v4f32
24348 // The mask constants are automatically splatted for vector types.
24349 unsigned EltSizeInBits = VT.getScalarSizeInBits();
24350 SDValue SignMask = DAG.getConstantFP(
24351 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
24352 SDValue MagMask = DAG.getConstantFP(
24353 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
24355 // First, clear all bits but the sign bit from the second operand (sign).
24357 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
24358 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
24360 // Next, clear the sign bit from the first operand (magnitude).
24361 // TODO: If we had general constant folding for FP logic ops, this check
24362 // wouldn't be necessary.
24364 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
24365 APFloat APF = Op0CN->getValueAPF();
24367 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
24369 // If the magnitude operand wasn't a constant, we need to AND out the sign.
24371 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
24372 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
24375 // OR the magnitude value with the sign bit.
24376 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
24377 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
24378 DAG.getIntPtrConstant(0, dl));
24381 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
24382 SDValue N0 = Op.getOperand(0);
24384 MVT VT = Op.getSimpleValueType();
24386 MVT OpVT = N0.getSimpleValueType();
24387 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
24388 "Unexpected type for FGETSIGN");
24390 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
24391 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
24392 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
24393 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
24394 Res = DAG.getZExtOrTrunc(Res, dl, VT);
24395 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
24399 /// Helper for attempting to create a X86ISD::BT node.
24400 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
24401 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
24402 // instruction. Since the shift amount is in-range-or-undefined, we know
24403 // that doing a bittest on the i32 value is ok. We extend to i32 because
24404 // the encoding for the i16 version is larger than the i32 version.
24405 // Also promote i16 to i32 for performance / code size reason.
24406 if (Src.getValueType().getScalarSizeInBits() < 32)
24407 Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
24409 // No legal type found, give up.
24410 if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
24413 // See if we can use the 32-bit instruction instead of the 64-bit one for a
24414 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
24415 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
24416 // known to be zero.
24417 if (Src.getValueType() == MVT::i64 &&
24418 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
24419 Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
24421 // If the operand types disagree, extend the shift amount to match. Since
24422 // BT ignores high bits (like shifts) we can use anyextend.
24423 if (Src.getValueType() != BitNo.getValueType()) {
24424 // Peek through a mask/modulo operation.
24425 // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
24426 // we probably need a better IsDesirableToPromoteOp to handle this as well.
24427 if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
24428 BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
24429 DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
24430 BitNo.getOperand(0)),
24431 DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
24432 BitNo.getOperand(1)));
24434 BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
24437 return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
24440 /// Helper for creating a X86ISD::SETCC node.
24441 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
24442 SelectionDAG &DAG) {
24443 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
24444 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
24447 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
24448 /// recognizable memcmp expansion.
24449 static bool isOrXorXorTree(SDValue X, bool Root = true) {
24450 if (X.getOpcode() == ISD::OR)
24451 return isOrXorXorTree(X.getOperand(0), false) &&
24452 isOrXorXorTree(X.getOperand(1), false);
24455 return X.getOpcode() == ISD::XOR;
24458 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
24460 template <typename F>
24461 static SDValue emitOrXorXorTree(SDValue X, const SDLoc &DL, SelectionDAG &DAG,
24462 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
24463 SDValue Op0 = X.getOperand(0);
24464 SDValue Op1 = X.getOperand(1);
24465 if (X.getOpcode() == ISD::OR) {
24466 SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
24467 SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
24468 if (VecVT != CmpVT)
24469 return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
24471 return DAG.getNode(ISD::OR, DL, VecVT, A, B);
24472 return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
24474 if (X.getOpcode() == ISD::XOR) {
24475 SDValue A = SToV(Op0);
24476 SDValue B = SToV(Op1);
24477 if (VecVT != CmpVT)
24478 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
24480 return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
24481 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
24483 llvm_unreachable("Impossible");
24486 /// Try to map a 128-bit or larger integer comparison to vector instructions
24487 /// before type legalization splits it up into chunks.
24488 static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
24492 const X86Subtarget &Subtarget) {
24493 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
24495 // We're looking for an oversized integer equality comparison.
24496 EVT OpVT = X.getValueType();
24497 unsigned OpSize = OpVT.getSizeInBits();
24498 if (!OpVT.isScalarInteger() || OpSize < 128)
24501 // Ignore a comparison with zero because that gets special treatment in
24502 // EmitTest(). But make an exception for the special case of a pair of
24503 // logically-combined vector-sized operands compared to zero. This pattern may
24504 // be generated by the memcmp expansion pass with oversized integer compares
24506 bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
24507 if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
24510 // Don't perform this combine if constructing the vector will be expensive.
24511 auto IsVectorBitCastCheap = [](SDValue X) {
24512 X = peekThroughBitcasts(X);
24513 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
24514 X.getOpcode() == ISD::LOAD;
24516 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
24517 !IsOrXorXorTreeCCZero)
24520 // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
24521 // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
24522 // Otherwise use PCMPEQ (plus AND) and mask testing.
24523 bool NoImplicitFloatOps =
24524 DAG.getMachineFunction().getFunction().hasFnAttribute(
24525 Attribute::NoImplicitFloat);
24526 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
24527 ((OpSize == 128 && Subtarget.hasSSE2()) ||
24528 (OpSize == 256 && Subtarget.hasAVX()) ||
24529 (OpSize == 512 && Subtarget.useAVX512Regs()))) {
24530 bool HasPT = Subtarget.hasSSE41();
24532 // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
24533 // vector registers are essentially free. (Technically, widening registers
24534 // prevents load folding, but the tradeoff is worth it.)
24535 bool PreferKOT = Subtarget.preferMaskRegisters();
24536 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
24538 EVT VecVT = MVT::v16i8;
24539 EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
24540 if (OpSize == 256) {
24541 VecVT = MVT::v32i8;
24542 CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
24544 EVT CastVT = VecVT;
24545 bool NeedsAVX512FCast = false;
24546 if (OpSize == 512 || NeedZExt) {
24547 if (Subtarget.hasBWI()) {
24548 VecVT = MVT::v64i8;
24549 CmpVT = MVT::v64i1;
24553 VecVT = MVT::v16i32;
24554 CmpVT = MVT::v16i1;
24555 CastVT = OpSize == 512 ? VecVT
24556 : OpSize == 256 ? MVT::v8i32
24558 NeedsAVX512FCast = true;
24562 auto ScalarToVector = [&](SDValue X) -> SDValue {
24563 bool TmpZext = false;
24564 EVT TmpCastVT = CastVT;
24565 if (X.getOpcode() == ISD::ZERO_EXTEND) {
24566 SDValue OrigX = X.getOperand(0);
24567 unsigned OrigSize = OrigX.getScalarValueSizeInBits();
24568 if (OrigSize < OpSize) {
24569 if (OrigSize == 128) {
24570 TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
24573 } else if (OrigSize == 256) {
24574 TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
24580 X = DAG.getBitcast(TmpCastVT, X);
24581 if (!NeedZExt && !TmpZext)
24583 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
24584 DAG.getConstant(0, DL, VecVT), X,
24585 DAG.getVectorIdxConstant(0, DL));
24589 if (IsOrXorXorTreeCCZero) {
24590 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
24591 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
24592 // Use 2 vector equality compares and 'and' the results before doing a
24594 Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
24596 SDValue VecX = ScalarToVector(X);
24597 SDValue VecY = ScalarToVector(Y);
24598 if (VecVT != CmpVT) {
24599 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
24600 } else if (HasPT) {
24601 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
24603 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
24606 // AVX512 should emit a setcc that will lower to kortest.
24607 if (VecVT != CmpVT) {
24608 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64
24609 : CmpVT == MVT::v32i1 ? MVT::i32
24611 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
24612 DAG.getConstant(0, DL, KRegVT), CC);
24616 DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64, Cmp);
24617 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
24618 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
24619 SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
24620 return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
24622 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
24623 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
24624 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
24625 assert(Cmp.getValueType() == MVT::v16i8 &&
24626 "Non 128-bit vector on pre-SSE41 target");
24627 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
24628 SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
24629 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
24635 /// Helper for matching BINOP(EXTRACTELT(X,0),BINOP(EXTRACTELT(X,1),...))
24636 /// style scalarized (associative) reduction patterns. Partial reductions
24637 /// are supported when the pointer SrcMask is non-null.
24638 /// TODO - move this to SelectionDAG?
24639 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
24640 SmallVectorImpl<SDValue> &SrcOps,
24641 SmallVectorImpl<APInt> *SrcMask = nullptr) {
24642 SmallVector<SDValue, 8> Opnds;
24643 DenseMap<SDValue, APInt> SrcOpMap;
24644 EVT VT = MVT::Other;
24646 // Recognize a special case where a vector is casted into wide integer to
24648 assert(Op.getOpcode() == unsigned(BinOp) &&
24649 "Unexpected bit reduction opcode");
24650 Opnds.push_back(Op.getOperand(0));
24651 Opnds.push_back(Op.getOperand(1));
24653 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
24654 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
24655 // BFS traverse all BinOp operands.
24656 if (I->getOpcode() == unsigned(BinOp)) {
24657 Opnds.push_back(I->getOperand(0));
24658 Opnds.push_back(I->getOperand(1));
24659 // Re-evaluate the number of nodes to be traversed.
24660 e += 2; // 2 more nodes (LHS and RHS) are pushed.
24664 // Quit if a non-EXTRACT_VECTOR_ELT
24665 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
24668 // Quit if without a constant index.
24669 auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
24673 SDValue Src = I->getOperand(0);
24674 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
24675 if (M == SrcOpMap.end()) {
24676 VT = Src.getValueType();
24677 // Quit if not the same type.
24678 if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
24680 unsigned NumElts = VT.getVectorNumElements();
24681 APInt EltCount = APInt::getZero(NumElts);
24682 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
24683 SrcOps.push_back(Src);
24686 // Quit if element already used.
24687 unsigned CIdx = Idx->getZExtValue();
24688 if (M->second[CIdx])
24690 M->second.setBit(CIdx);
24694 // Collect the source partial masks.
24695 for (SDValue &SrcOp : SrcOps)
24696 SrcMask->push_back(SrcOpMap[SrcOp]);
24698 // Quit if not all elements are used.
24699 for (const auto &I : SrcOpMap)
24700 if (!I.second.isAllOnes())
24707 // Helper function for comparing all bits of two vectors.
24708 static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
24709 ISD::CondCode CC, const APInt &OriginalMask,
24710 const X86Subtarget &Subtarget,
24711 SelectionDAG &DAG, X86::CondCode &X86CC) {
24712 EVT VT = LHS.getValueType();
24713 unsigned ScalarSize = VT.getScalarSizeInBits();
24714 if (OriginalMask.getBitWidth() != ScalarSize) {
24715 assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
24719 // Quit if not convertable to legal scalar or 128/256-bit vector.
24720 if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
24723 // FCMP may use ISD::SETNE when nnan - early out if we manage to get here.
24724 if (VT.isFloatingPoint())
24727 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
24728 X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
24730 APInt Mask = OriginalMask;
24732 auto MaskBits = [&](SDValue Src) {
24733 if (Mask.isAllOnes())
24735 EVT SrcVT = Src.getValueType();
24736 SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
24737 return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
24740 // For sub-128-bit vector, cast to (legal) integer and compare with zero.
24741 if (VT.getSizeInBits() < 128) {
24742 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
24743 if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT)) {
24744 if (IntVT != MVT::i64)
24746 auto SplitLHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(LHS)), DL,
24747 MVT::i32, MVT::i32);
24748 auto SplitRHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(RHS)), DL,
24749 MVT::i32, MVT::i32);
24751 DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.first, SplitRHS.first);
24753 DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.second, SplitRHS.second);
24754 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
24755 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi),
24756 DAG.getConstant(0, DL, MVT::i32));
24758 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
24759 DAG.getBitcast(IntVT, MaskBits(LHS)),
24760 DAG.getBitcast(IntVT, MaskBits(RHS)));
24763 // Without PTEST, a masked v2i64 or-reduction is not faster than
24765 bool UseKORTEST = Subtarget.useAVX512Regs();
24766 bool UsePTEST = Subtarget.hasSSE41();
24767 if (!UsePTEST && !Mask.isAllOnes() && ScalarSize > 32)
24770 // Split down to 128/256/512-bit vector.
24771 unsigned TestSize = UseKORTEST ? 512 : (Subtarget.hasAVX() ? 256 : 128);
24773 // If the input vector has vector elements wider than the target test size,
24774 // then cast to <X x i64> so it will safely split.
24775 if (ScalarSize > TestSize) {
24776 if (!Mask.isAllOnes())
24778 VT = EVT::getVectorVT(*DAG.getContext(), MVT::i64, VT.getSizeInBits() / 64);
24779 LHS = DAG.getBitcast(VT, LHS);
24780 RHS = DAG.getBitcast(VT, RHS);
24781 Mask = APInt::getAllOnes(64);
24784 if (VT.getSizeInBits() > TestSize) {
24785 KnownBits KnownRHS = DAG.computeKnownBits(RHS);
24786 if (KnownRHS.isConstant() && KnownRHS.getConstant() == Mask) {
24787 // If ICMP(AND(LHS,MASK),MASK) - reduce using AND splits.
24788 while (VT.getSizeInBits() > TestSize) {
24789 auto Split = DAG.SplitVector(LHS, DL);
24790 VT = Split.first.getValueType();
24791 LHS = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
24793 RHS = DAG.getAllOnesConstant(DL, VT);
24794 } else if (!UsePTEST && !KnownRHS.isZero()) {
24795 // MOVMSK Special Case:
24796 // ALLOF(CMPEQ(X,Y)) -> AND(CMPEQ(X[0],Y[0]),CMPEQ(X[1],Y[1]),....)
24797 MVT SVT = ScalarSize >= 32 ? MVT::i32 : MVT::i8;
24798 VT = MVT::getVectorVT(SVT, VT.getSizeInBits() / SVT.getSizeInBits());
24799 LHS = DAG.getBitcast(VT, MaskBits(LHS));
24800 RHS = DAG.getBitcast(VT, MaskBits(RHS));
24801 EVT BoolVT = VT.changeVectorElementType(MVT::i1);
24802 SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETEQ);
24803 V = DAG.getSExtOrTrunc(V, DL, VT);
24804 while (VT.getSizeInBits() > TestSize) {
24805 auto Split = DAG.SplitVector(V, DL);
24806 VT = Split.first.getValueType();
24807 V = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
24809 V = DAG.getNOT(DL, V, VT);
24810 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
24811 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
24812 DAG.getConstant(0, DL, MVT::i32));
24814 // Convert to a ICMP_EQ(XOR(LHS,RHS),0) pattern.
24815 SDValue V = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
24816 while (VT.getSizeInBits() > TestSize) {
24817 auto Split = DAG.SplitVector(V, DL);
24818 VT = Split.first.getValueType();
24819 V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
24822 RHS = DAG.getConstant(0, DL, VT);
24826 if (UseKORTEST && VT.is512BitVector()) {
24827 MVT TestVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
24828 MVT BoolVT = TestVT.changeVectorElementType(MVT::i1);
24829 LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
24830 RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
24831 SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETNE);
24832 return DAG.getNode(X86ISD::KORTEST, DL, MVT::i32, V, V);
24836 MVT TestVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
24837 LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
24838 RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
24839 SDValue V = DAG.getNode(ISD::XOR, DL, TestVT, LHS, RHS);
24840 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
24843 assert(VT.getSizeInBits() == 128 && "Failure to split to 128-bits");
24844 MVT MaskVT = ScalarSize >= 32 ? MVT::v4i32 : MVT::v16i8;
24845 LHS = DAG.getBitcast(MaskVT, MaskBits(LHS));
24846 RHS = DAG.getBitcast(MaskVT, MaskBits(RHS));
24847 SDValue V = DAG.getNode(X86ISD::PCMPEQ, DL, MaskVT, LHS, RHS);
24848 V = DAG.getNOT(DL, V, MaskVT);
24849 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
24850 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
24851 DAG.getConstant(0, DL, MVT::i32));
24854 // Check whether an AND/OR'd reduction tree is PTEST-able, or if we can fallback
24855 // to CMP(MOVMSK(PCMPEQB(X,Y))).
24856 static SDValue MatchVectorAllEqualTest(SDValue LHS, SDValue RHS,
24857 ISD::CondCode CC, const SDLoc &DL,
24858 const X86Subtarget &Subtarget,
24860 X86::CondCode &X86CC) {
24861 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
24863 bool CmpNull = isNullConstant(RHS);
24864 bool CmpAllOnes = isAllOnesConstant(RHS);
24865 if (!CmpNull && !CmpAllOnes)
24869 if (!Subtarget.hasSSE2() || !Op->hasOneUse())
24872 // Check whether we're masking/truncating an OR-reduction result, in which
24873 // case track the masked bits.
24874 // TODO: Add CmpAllOnes support.
24875 APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
24877 switch (Op.getOpcode()) {
24878 case ISD::TRUNCATE: {
24879 SDValue Src = Op.getOperand(0);
24880 Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
24881 Op.getScalarValueSizeInBits());
24886 if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
24887 Mask = Cst->getAPIntValue();
24888 Op = Op.getOperand(0);
24895 ISD::NodeType LogicOp = CmpNull ? ISD::OR : ISD::AND;
24897 // Match icmp(or(extract(X,0),extract(X,1)),0) anyof reduction patterns.
24898 // Match icmp(and(extract(X,0),extract(X,1)),-1) allof reduction patterns.
24899 SmallVector<SDValue, 8> VecIns;
24900 if (Op.getOpcode() == LogicOp && matchScalarReduction(Op, LogicOp, VecIns)) {
24901 EVT VT = VecIns[0].getValueType();
24902 assert(llvm::all_of(VecIns,
24903 [VT](SDValue V) { return VT == V.getValueType(); }) &&
24904 "Reduction source vector mismatch");
24906 // Quit if not splittable to scalar/128/256/512-bit vector.
24907 if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
24910 // If more than one full vector is evaluated, AND/OR them first before
24912 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
24913 Slot += 2, e += 1) {
24914 // Each iteration will AND/OR 2 nodes and append the result until there is
24915 // only 1 node left, i.e. the final value of all vectors.
24916 SDValue LHS = VecIns[Slot];
24917 SDValue RHS = VecIns[Slot + 1];
24918 VecIns.push_back(DAG.getNode(LogicOp, DL, VT, LHS, RHS));
24921 return LowerVectorAllEqual(DL, VecIns.back(),
24922 CmpNull ? DAG.getConstant(0, DL, VT)
24923 : DAG.getAllOnesConstant(DL, VT),
24924 CC, Mask, Subtarget, DAG, X86CC);
24927 // Match icmp(reduce_or(X),0) anyof reduction patterns.
24928 // Match icmp(reduce_and(X),-1) allof reduction patterns.
24929 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
24930 ISD::NodeType BinOp;
24931 if (SDValue Match =
24932 DAG.matchBinOpReduction(Op.getNode(), BinOp, {LogicOp})) {
24933 EVT MatchVT = Match.getValueType();
24934 return LowerVectorAllEqual(DL, Match,
24935 CmpNull ? DAG.getConstant(0, DL, MatchVT)
24936 : DAG.getAllOnesConstant(DL, MatchVT),
24937 CC, Mask, Subtarget, DAG, X86CC);
24941 if (Mask.isAllOnes()) {
24942 assert(!Op.getValueType().isVector() &&
24943 "Illegal vector type for reduction pattern");
24944 SDValue Src = peekThroughBitcasts(Op);
24945 if (Src.getValueType().isFixedLengthVector() &&
24946 Src.getValueType().getScalarType() == MVT::i1) {
24947 // Match icmp(bitcast(icmp_ne(X,Y)),0) reduction patterns.
24948 // Match icmp(bitcast(icmp_eq(X,Y)),-1) reduction patterns.
24949 if (Src.getOpcode() == ISD::SETCC) {
24950 SDValue LHS = Src.getOperand(0);
24951 SDValue RHS = Src.getOperand(1);
24952 EVT LHSVT = LHS.getValueType();
24953 ISD::CondCode SrcCC = cast<CondCodeSDNode>(Src.getOperand(2))->get();
24954 if (SrcCC == (CmpNull ? ISD::SETNE : ISD::SETEQ) &&
24955 llvm::has_single_bit<uint32_t>(LHSVT.getSizeInBits())) {
24956 APInt SrcMask = APInt::getAllOnes(LHSVT.getScalarSizeInBits());
24957 return LowerVectorAllEqual(DL, LHS, RHS, CC, SrcMask, Subtarget, DAG,
24961 // Match icmp(bitcast(vXi1 trunc(Y)),0) reduction patterns.
24962 // Match icmp(bitcast(vXi1 trunc(Y)),-1) reduction patterns.
24963 // Peek through truncation, mask the LSB and compare against zero/LSB.
24964 if (Src.getOpcode() == ISD::TRUNCATE) {
24965 SDValue Inner = Src.getOperand(0);
24966 EVT InnerVT = Inner.getValueType();
24967 if (llvm::has_single_bit<uint32_t>(InnerVT.getSizeInBits())) {
24968 unsigned BW = InnerVT.getScalarSizeInBits();
24969 APInt SrcMask = APInt(BW, 1);
24970 APInt Cmp = CmpNull ? APInt::getZero(BW) : SrcMask;
24971 return LowerVectorAllEqual(DL, Inner,
24972 DAG.getConstant(Cmp, DL, InnerVT), CC,
24973 SrcMask, Subtarget, DAG, X86CC);
24982 /// return true if \c Op has a use that doesn't just read flags.
24983 static bool hasNonFlagsUse(SDValue Op) {
24984 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
24986 SDNode *User = *UI;
24987 unsigned UOpNo = UI.getOperandNo();
24988 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
24989 // Look pass truncate.
24990 UOpNo = User->use_begin().getOperandNo();
24991 User = *User->use_begin();
24994 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
24995 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
25001 // Transform to an x86-specific ALU node with flags if there is a chance of
25002 // using an RMW op or only the flags are used. Otherwise, leave
25003 // the node alone and emit a 'cmp' or 'test' instruction.
25004 static bool isProfitableToUseFlagOp(SDValue Op) {
25005 for (SDNode *U : Op->uses())
25006 if (U->getOpcode() != ISD::CopyToReg &&
25007 U->getOpcode() != ISD::SETCC &&
25008 U->getOpcode() != ISD::STORE)
25014 /// Emit nodes that will be selected as "test Op0,Op0", or something
25016 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
25017 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
25018 // CF and OF aren't always set the way we want. Determine which
25019 // of these we need.
25020 bool NeedCF = false;
25021 bool NeedOF = false;
25024 case X86::COND_A: case X86::COND_AE:
25025 case X86::COND_B: case X86::COND_BE:
25028 case X86::COND_G: case X86::COND_GE:
25029 case X86::COND_L: case X86::COND_LE:
25030 case X86::COND_O: case X86::COND_NO: {
25031 // Check if we really need to set the
25032 // Overflow flag. If NoSignedWrap is present
25033 // that is not actually needed.
25034 switch (Op->getOpcode()) {
25039 if (Op.getNode()->getFlags().hasNoSignedWrap())
25049 // See if we can use the EFLAGS value from the operand instead of
25050 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
25051 // we prove that the arithmetic won't overflow, we can't use OF or CF.
25052 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
25053 // Emit a CMP with 0, which is the TEST pattern.
25054 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
25055 DAG.getConstant(0, dl, Op.getValueType()));
25057 unsigned Opcode = 0;
25058 unsigned NumOperands = 0;
25060 SDValue ArithOp = Op;
25062 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
25063 // which may be the result of a CAST. We use the variable 'Op', which is the
25064 // non-casted variable when we check for possible users.
25065 switch (ArithOp.getOpcode()) {
25067 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
25068 // because a TEST instruction will be better.
25069 if (!hasNonFlagsUse(Op))
25077 if (!isProfitableToUseFlagOp(Op))
25080 // Otherwise use a regular EFLAGS-setting instruction.
25081 switch (ArithOp.getOpcode()) {
25082 default: llvm_unreachable("unexpected operator!");
25083 case ISD::ADD: Opcode = X86ISD::ADD; break;
25084 case ISD::SUB: Opcode = X86ISD::SUB; break;
25085 case ISD::XOR: Opcode = X86ISD::XOR; break;
25086 case ISD::AND: Opcode = X86ISD::AND; break;
25087 case ISD::OR: Opcode = X86ISD::OR; break;
25097 return SDValue(Op.getNode(), 1);
25100 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
25101 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
25102 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
25103 Op->getOperand(1)).getValue(1);
25110 // Emit a CMP with 0, which is the TEST pattern.
25111 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
25112 DAG.getConstant(0, dl, Op.getValueType()));
25114 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
25115 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
25117 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
25118 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
25119 return SDValue(New.getNode(), 1);
25122 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
25124 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
25125 const SDLoc &dl, SelectionDAG &DAG,
25126 const X86Subtarget &Subtarget) {
25127 if (isNullConstant(Op1))
25128 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
25130 EVT CmpVT = Op0.getValueType();
25132 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
25133 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
25135 // Only promote the compare up to I32 if it is a 16 bit operation
25136 // with an immediate. 16 bit immediates are to be avoided.
25137 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
25138 !DAG.getMachineFunction().getFunction().hasMinSize()) {
25139 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
25140 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
25141 // Don't do this if the immediate can fit in 8-bits.
25142 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
25143 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
25144 unsigned ExtendOp =
25145 isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25146 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
25147 // For equality comparisons try to use SIGN_EXTEND if the input was
25148 // truncate from something with enough sign bits.
25149 if (Op0.getOpcode() == ISD::TRUNCATE) {
25150 if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
25151 ExtendOp = ISD::SIGN_EXTEND;
25152 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
25153 if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
25154 ExtendOp = ISD::SIGN_EXTEND;
25159 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
25160 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
25164 // Try to shrink i64 compares if the input has enough zero bits.
25165 // FIXME: Do this for non-constant compares for constant on LHS?
25166 if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
25167 Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
25168 cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
25169 DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
25171 Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
25172 Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
25175 // 0-x == y --> x+y == 0
25176 // 0-x != y --> x+y != 0
25177 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
25178 Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
25179 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
25180 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
25181 return Add.getValue(1);
25184 // x == 0-y --> x+y == 0
25185 // x != 0-y --> x+y != 0
25186 if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
25187 Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
25188 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
25189 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
25190 return Add.getValue(1);
25193 // Use SUB instead of CMP to enable CSE between SUB and CMP.
25194 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
25195 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
25196 return Sub.getValue(1);
25199 /// Check if replacement of SQRT with RSQRT should be disabled.
25200 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
25201 EVT VT = Op.getValueType();
25203 // We don't need to replace SQRT with RSQRT for half type.
25204 if (VT.getScalarType() == MVT::f16)
25207 // We never want to use both SQRT and RSQRT instructions for the same input.
25208 if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
25212 return Subtarget.hasFastVectorFSQRT();
25213 return Subtarget.hasFastScalarFSQRT();
25216 /// The minimum architected relative accuracy is 2^-12. We need one
25217 /// Newton-Raphson step to have a good float result (24 bits of precision).
25218 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
25219 SelectionDAG &DAG, int Enabled,
25220 int &RefinementSteps,
25221 bool &UseOneConstNR,
25222 bool Reciprocal) const {
25224 EVT VT = Op.getValueType();
25226 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
25227 // It is likely not profitable to do this for f64 because a double-precision
25228 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
25229 // instructions: convert to single, rsqrtss, convert back to double, refine
25230 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
25231 // along with FMA, this could be a throughput win.
25232 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
25233 // after legalize types.
25234 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
25235 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
25236 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
25237 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
25238 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
25239 if (RefinementSteps == ReciprocalEstimate::Unspecified)
25240 RefinementSteps = 1;
25242 UseOneConstNR = false;
25243 // There is no FSQRT for 512-bits, but there is RSQRT14.
25244 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
25245 SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
25246 if (RefinementSteps == 0 && !Reciprocal)
25247 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
25251 if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
25252 Subtarget.hasFP16()) {
25253 assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
25254 if (RefinementSteps == ReciprocalEstimate::Unspecified)
25255 RefinementSteps = 0;
25257 if (VT == MVT::f16) {
25258 SDValue Zero = DAG.getIntPtrConstant(0, DL);
25259 SDValue Undef = DAG.getUNDEF(MVT::v8f16);
25260 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
25261 Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
25262 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
25265 return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
25270 /// The minimum architected relative accuracy is 2^-12. We need one
25271 /// Newton-Raphson step to have a good float result (24 bits of precision).
25272 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
25274 int &RefinementSteps) const {
25276 EVT VT = Op.getValueType();
25278 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
25279 // It is likely not profitable to do this for f64 because a double-precision
25280 // reciprocal estimate with refinement on x86 prior to FMA requires
25281 // 15 instructions: convert to single, rcpss, convert back to double, refine
25282 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
25283 // along with FMA, this could be a throughput win.
25285 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
25286 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
25287 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
25288 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
25289 // Enable estimate codegen with 1 refinement step for vector division.
25290 // Scalar division estimates are disabled because they break too much
25291 // real-world code. These defaults are intended to match GCC behavior.
25292 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
25295 if (RefinementSteps == ReciprocalEstimate::Unspecified)
25296 RefinementSteps = 1;
25298 // There is no FSQRT for 512-bits, but there is RCP14.
25299 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
25300 return DAG.getNode(Opcode, DL, VT, Op);
25303 if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
25304 Subtarget.hasFP16()) {
25305 if (RefinementSteps == ReciprocalEstimate::Unspecified)
25306 RefinementSteps = 0;
25308 if (VT == MVT::f16) {
25309 SDValue Zero = DAG.getIntPtrConstant(0, DL);
25310 SDValue Undef = DAG.getUNDEF(MVT::v8f16);
25311 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
25312 Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
25313 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
25316 return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
25321 /// If we have at least two divisions that use the same divisor, convert to
25322 /// multiplication by a reciprocal. This may need to be adjusted for a given
25323 /// CPU if a division's cost is not at least twice the cost of a multiplication.
25324 /// This is because we still need one division to calculate the reciprocal and
25325 /// then we need two multiplies by that reciprocal as replacements for the
25326 /// original divisions.
25327 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
25332 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
25334 SmallVectorImpl<SDNode *> &Created) const {
25335 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
25336 if (isIntDivCheap(N->getValueType(0), Attr))
25337 return SDValue(N,0); // Lower SDIV as SDIV
25339 assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
25340 "Unexpected divisor!");
25342 // Only perform this transform if CMOV is supported otherwise the select
25343 // below will become a branch.
25344 if (!Subtarget.canUseCMOV())
25347 // fold (sdiv X, pow2)
25348 EVT VT = N->getValueType(0);
25349 // FIXME: Support i8.
25350 if (VT != MVT::i16 && VT != MVT::i32 &&
25351 !(Subtarget.is64Bit() && VT == MVT::i64))
25354 unsigned Lg2 = Divisor.countr_zero();
25356 // If the divisor is 2 or -2, the default expansion is better.
25361 SDValue N0 = N->getOperand(0);
25362 SDValue Zero = DAG.getConstant(0, DL, VT);
25363 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
25364 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
25366 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
25367 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
25368 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
25369 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
25371 Created.push_back(Cmp.getNode());
25372 Created.push_back(Add.getNode());
25373 Created.push_back(CMov.getNode());
25377 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
25379 // If we're dividing by a positive value, we're done. Otherwise, we must
25380 // negate the result.
25381 if (Divisor.isNonNegative())
25384 Created.push_back(SRA.getNode());
25385 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
25388 /// Result of 'and' is compared against zero. Change to a BT node if possible.
25389 /// Returns the BT node and the condition code needed to use it.
25390 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
25391 SelectionDAG &DAG, X86::CondCode &X86CC) {
25392 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
25393 SDValue Op0 = And.getOperand(0);
25394 SDValue Op1 = And.getOperand(1);
25395 if (Op0.getOpcode() == ISD::TRUNCATE)
25396 Op0 = Op0.getOperand(0);
25397 if (Op1.getOpcode() == ISD::TRUNCATE)
25398 Op1 = Op1.getOperand(0);
25400 SDValue Src, BitNo;
25401 if (Op1.getOpcode() == ISD::SHL)
25402 std::swap(Op0, Op1);
25403 if (Op0.getOpcode() == ISD::SHL) {
25404 if (isOneConstant(Op0.getOperand(0))) {
25405 // If we looked past a truncate, check that it's only truncating away
25407 unsigned BitWidth = Op0.getValueSizeInBits();
25408 unsigned AndBitWidth = And.getValueSizeInBits();
25409 if (BitWidth > AndBitWidth) {
25410 KnownBits Known = DAG.computeKnownBits(Op0);
25411 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
25415 BitNo = Op0.getOperand(1);
25417 } else if (Op1.getOpcode() == ISD::Constant) {
25418 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
25419 uint64_t AndRHSVal = AndRHS->getZExtValue();
25420 SDValue AndLHS = Op0;
25422 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
25423 Src = AndLHS.getOperand(0);
25424 BitNo = AndLHS.getOperand(1);
25426 // Use BT if the immediate can't be encoded in a TEST instruction or we
25427 // are optimizing for size and the immedaite won't fit in a byte.
25428 bool OptForSize = DAG.shouldOptForSize();
25429 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
25430 isPowerOf2_64(AndRHSVal)) {
25432 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
25433 Src.getValueType());
25438 // No patterns found, give up.
25439 if (!Src.getNode())
25442 // Remove any bit flip.
25443 if (isBitwiseNot(Src)) {
25444 Src = Src.getOperand(0);
25445 CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
25448 // Attempt to create the X86ISD::BT node.
25449 if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
25450 X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
25457 // Check if pre-AVX condcode can be performed by a single FCMP op.
25458 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
25459 return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
25462 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
25464 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
25465 SDValue &Op1, bool &IsAlwaysSignaling) {
25469 // SSE Condition code mapping:
25478 switch (SetCCOpcode) {
25479 default: llvm_unreachable("Unexpected SETCC condition");
25481 case ISD::SETEQ: SSECC = 0; break;
25483 case ISD::SETGT: Swap = true; [[fallthrough]];
25485 case ISD::SETOLT: SSECC = 1; break;
25487 case ISD::SETGE: Swap = true; [[fallthrough]];
25489 case ISD::SETOLE: SSECC = 2; break;
25490 case ISD::SETUO: SSECC = 3; break;
25492 case ISD::SETNE: SSECC = 4; break;
25493 case ISD::SETULE: Swap = true; [[fallthrough]];
25494 case ISD::SETUGE: SSECC = 5; break;
25495 case ISD::SETULT: Swap = true; [[fallthrough]];
25496 case ISD::SETUGT: SSECC = 6; break;
25497 case ISD::SETO: SSECC = 7; break;
25498 case ISD::SETUEQ: SSECC = 8; break;
25499 case ISD::SETONE: SSECC = 12; break;
25502 std::swap(Op0, Op1);
25504 switch (SetCCOpcode) {
25506 IsAlwaysSignaling = true;
25516 IsAlwaysSignaling = false;
25523 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
25524 /// concatenate the result back.
25525 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
25526 ISD::CondCode Cond, SelectionDAG &DAG,
25528 assert(VT.isInteger() && VT == LHS.getValueType() &&
25529 VT == RHS.getValueType() && "Unsupported VTs!");
25531 SDValue CC = DAG.getCondCode(Cond);
25533 // Extract the LHS Lo/Hi vectors
25534 SDValue LHS1, LHS2;
25535 std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
25537 // Extract the RHS Lo/Hi vectors
25538 SDValue RHS1, RHS2;
25539 std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
25541 // Issue the operation on the smaller types and concatenate the result back
25543 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
25544 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25545 DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
25546 DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
25549 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
25551 SDValue Op0 = Op.getOperand(0);
25552 SDValue Op1 = Op.getOperand(1);
25553 SDValue CC = Op.getOperand(2);
25554 MVT VT = Op.getSimpleValueType();
25557 assert(VT.getVectorElementType() == MVT::i1 &&
25558 "Cannot set masked compare for this operation");
25560 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
25562 // Prefer SETGT over SETLT.
25563 if (SetCCOpcode == ISD::SETLT) {
25564 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
25565 std::swap(Op0, Op1);
25568 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
25571 /// Given a buildvector constant, return a new vector constant with each element
25572 /// incremented or decremented. If incrementing or decrementing would result in
25573 /// unsigned overflow or underflow or this is not a simple vector constant,
25574 /// return an empty value.
25575 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc,
25577 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
25578 if (!BV || !V.getValueType().isSimple())
25581 MVT VT = V.getSimpleValueType();
25582 MVT EltVT = VT.getVectorElementType();
25583 unsigned NumElts = VT.getVectorNumElements();
25584 SmallVector<SDValue, 8> NewVecC;
25586 for (unsigned i = 0; i < NumElts; ++i) {
25587 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
25588 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
25591 // Avoid overflow/underflow.
25592 const APInt &EltC = Elt->getAPIntValue();
25593 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
25595 if (NSW && ((IsInc && EltC.isMaxSignedValue()) ||
25596 (!IsInc && EltC.isMinSignedValue())))
25599 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
25602 return DAG.getBuildVector(VT, DL, NewVecC);
25605 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
25607 /// t = psubus Op0, Op1
25608 /// pcmpeq t, <0..0>
25609 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
25610 ISD::CondCode Cond, const SDLoc &dl,
25611 const X86Subtarget &Subtarget,
25612 SelectionDAG &DAG) {
25613 if (!Subtarget.hasSSE2())
25616 MVT VET = VT.getVectorElementType();
25617 if (VET != MVT::i8 && VET != MVT::i16)
25623 case ISD::SETULT: {
25624 // If the comparison is against a constant we can turn this into a
25625 // setule. With psubus, setule does not require a swap. This is
25626 // beneficial because the constant in the register is no longer
25627 // destructed as the destination so it can be hoisted out of a loop.
25628 // Only do this pre-AVX since vpcmp* is no longer destructive.
25629 if (Subtarget.hasAVX())
25632 incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false);
25638 case ISD::SETUGT: {
25639 // If the comparison is against a constant, we can turn this into a setuge.
25640 // This is beneficial because materializing a constant 0 for the PCMPEQ is
25641 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
25642 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
25644 incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false);
25651 // Psubus is better than flip-sign because it requires no inversion.
25653 std::swap(Op0, Op1);
25659 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
25660 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
25661 DAG.getConstant(0, dl, VT));
25664 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
25665 SelectionDAG &DAG) {
25666 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
25667 Op.getOpcode() == ISD::STRICT_FSETCCS;
25668 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
25669 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
25670 SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
25671 MVT VT = Op->getSimpleValueType(0);
25672 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
25673 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
25677 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
25678 assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
25679 if (isSoftFP16(EltVT, Subtarget))
25682 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
25683 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
25685 // If we have a strict compare with a vXi1 result and the input is 128/256
25686 // bits we can't use a masked compare unless we have VLX. If we use a wider
25687 // compare like we do for non-strict, we might trigger spurious exceptions
25688 // from the upper elements. Instead emit a AVX compare and convert to mask.
25690 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
25691 (!IsStrict || Subtarget.hasVLX() ||
25692 Op0.getSimpleValueType().is512BitVector())) {
25694 unsigned Num = VT.getVectorNumElements();
25695 assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
25697 Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
25699 Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
25700 // The SSE/AVX packed FP comparison nodes are defined with a
25701 // floating-point vector result that matches the operand type. This allows
25702 // them to work with an SSE1 target (integer vector types are not legal).
25703 VT = Op0.getSimpleValueType();
25707 bool IsAlwaysSignaling;
25708 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
25709 if (!Subtarget.hasAVX()) {
25710 // TODO: We could use following steps to handle a quiet compare with
25711 // signaling encodings.
25712 // 1. Get ordered masks from a quiet ISD::SETO
25713 // 2. Use the masks to mask potential unordered elements in operand A, B
25714 // 3. Get the compare results of masked A, B
25715 // 4. Calculating final result using the mask and result from 3
25716 // But currently, we just fall back to scalar operations.
25717 if (IsStrict && IsAlwaysSignaling && !IsSignaling)
25720 // Insert an extra signaling instruction to raise exception.
25721 if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
25722 SDValue SignalCmp = DAG.getNode(
25723 Opc, dl, {VT, MVT::Other},
25724 {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
25725 // FIXME: It seems we need to update the flags of all new strict nodes.
25726 // Otherwise, mayRaiseFPException in MI will return false due to
25727 // NoFPExcept = false by default. However, I didn't find it in other
25729 SignalCmp->setFlags(Op->getFlags());
25730 Chain = SignalCmp.getValue(1);
25733 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
25734 // emit two comparisons and a logic op to tie them together.
25735 if (!cheapX86FSETCC_SSE(Cond)) {
25736 // LLVM predicate is SETUEQ or SETONE.
25738 unsigned CombineOpc;
25739 if (Cond == ISD::SETUEQ) {
25742 CombineOpc = X86ISD::FOR;
25744 assert(Cond == ISD::SETONE);
25747 CombineOpc = X86ISD::FAND;
25750 SDValue Cmp0, Cmp1;
25752 Cmp0 = DAG.getNode(
25753 Opc, dl, {VT, MVT::Other},
25754 {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
25755 Cmp1 = DAG.getNode(
25756 Opc, dl, {VT, MVT::Other},
25757 {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
25758 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
25761 Cmp0 = DAG.getNode(
25762 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
25763 Cmp1 = DAG.getNode(
25764 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
25766 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
25770 Opc, dl, {VT, MVT::Other},
25771 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
25772 Chain = Cmp.getValue(1);
25775 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
25778 // Handle all other FP comparisons here.
25780 // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
25781 SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
25783 Opc, dl, {VT, MVT::Other},
25784 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
25785 Chain = Cmp.getValue(1);
25788 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
25791 if (VT.getFixedSizeInBits() >
25792 Op.getSimpleValueType().getFixedSizeInBits()) {
25793 // We emitted a compare with an XMM/YMM result. Finish converting to a
25794 // mask register using a vptestm.
25795 EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
25796 Cmp = DAG.getBitcast(CastVT, Cmp);
25797 Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
25798 DAG.getConstant(0, dl, CastVT), ISD::SETNE);
25800 // If this is SSE/AVX CMPP, bitcast the result back to integer to match
25801 // the result type of SETCC. The bitcast is expected to be optimized
25802 // away during combining/isel.
25803 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
25807 return DAG.getMergeValues({Cmp, Chain}, dl);
25812 assert(!IsStrict && "Strict SETCC only handles FP operands.");
25814 MVT VTOp0 = Op0.getSimpleValueType();
25816 assert(VTOp0 == Op1.getSimpleValueType() &&
25817 "Expected operands with same type!");
25818 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
25819 "Invalid number of packed elements for source and destination!");
25821 // The non-AVX512 code below works under the assumption that source and
25822 // destination types are the same.
25823 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
25824 "Value types for source and destination must be the same!");
25826 // The result is boolean, but operands are int/float
25827 if (VT.getVectorElementType() == MVT::i1) {
25828 // In AVX-512 architecture setcc returns mask with i1 elements,
25829 // But there is no compare instruction for i8 and i16 elements in KNL.
25830 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
25831 "Unexpected operand type");
25832 return LowerIntVSETCC_AVX512(Op, DAG);
25835 // Lower using XOP integer comparisons.
25836 if (VT.is128BitVector() && Subtarget.hasXOP()) {
25837 // Translate compare code to XOP PCOM compare mode.
25838 unsigned CmpMode = 0;
25840 default: llvm_unreachable("Unexpected SETCC condition");
25842 case ISD::SETLT: CmpMode = 0x00; break;
25844 case ISD::SETLE: CmpMode = 0x01; break;
25846 case ISD::SETGT: CmpMode = 0x02; break;
25848 case ISD::SETGE: CmpMode = 0x03; break;
25849 case ISD::SETEQ: CmpMode = 0x04; break;
25850 case ISD::SETNE: CmpMode = 0x05; break;
25853 // Are we comparing unsigned or signed integers?
25855 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
25857 return DAG.getNode(Opc, dl, VT, Op0, Op1,
25858 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
25861 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
25862 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
25863 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
25864 SDValue BC0 = peekThroughBitcasts(Op0);
25865 if (BC0.getOpcode() == ISD::AND) {
25867 SmallVector<APInt, 64> EltBits;
25868 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
25869 VT.getScalarSizeInBits(), UndefElts,
25870 EltBits, false, false)) {
25871 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
25873 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
25879 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
25880 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
25881 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
25882 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
25883 if (C1 && C1->getAPIntValue().isPowerOf2()) {
25884 unsigned BitWidth = VT.getScalarSizeInBits();
25885 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
25887 SDValue Result = Op0.getOperand(0);
25888 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
25889 DAG.getConstant(ShiftAmt, dl, VT));
25890 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
25891 DAG.getConstant(BitWidth - 1, dl, VT));
25896 // Break 256-bit integer vector compare into smaller ones.
25897 if (VT.is256BitVector() && !Subtarget.hasInt256())
25898 return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
25900 // Break 512-bit integer vector compare into smaller ones.
25901 // TODO: Try harder to use VPCMPx + VPMOV2x?
25902 if (VT.is512BitVector())
25903 return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
25905 // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
25907 // X != INT_MIN --> X >s INT_MIN
25908 // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
25909 // +X != 0 --> +X >s 0
25911 if (Cond == ISD::SETNE &&
25912 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
25913 if (ConstValue.isMinSignedValue())
25915 else if (ConstValue.isMaxSignedValue())
25917 else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
25921 // If both operands are known non-negative, then an unsigned compare is the
25922 // same as a signed compare and there's no need to flip signbits.
25923 // TODO: We could check for more general simplifications here since we're
25924 // computing known bits.
25925 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
25926 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
25928 // Special case: Use min/max operations for unsigned compares.
25929 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25930 if (ISD::isUnsignedIntSetCC(Cond) &&
25931 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
25932 TLI.isOperationLegal(ISD::UMIN, VT)) {
25933 // If we have a constant operand, increment/decrement it and change the
25934 // condition to avoid an invert.
25935 if (Cond == ISD::SETUGT) {
25936 // X > C --> X >= (C+1) --> X == umax(X, C+1)
25937 if (SDValue UGTOp1 =
25938 incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false)) {
25940 Cond = ISD::SETUGE;
25943 if (Cond == ISD::SETULT) {
25944 // X < C --> X <= (C-1) --> X == umin(X, C-1)
25945 if (SDValue ULTOp1 =
25946 incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false)) {
25948 Cond = ISD::SETULE;
25951 bool Invert = false;
25954 default: llvm_unreachable("Unexpected condition code");
25955 case ISD::SETUGT: Invert = true; [[fallthrough]];
25956 case ISD::SETULE: Opc = ISD::UMIN; break;
25957 case ISD::SETULT: Invert = true; [[fallthrough]];
25958 case ISD::SETUGE: Opc = ISD::UMAX; break;
25961 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
25962 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
25964 // If the logical-not of the result is required, perform that now.
25966 Result = DAG.getNOT(dl, Result, VT);
25971 // Try to use SUBUS and PCMPEQ.
25974 LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
25977 // We are handling one of the integer comparisons here. Since SSE only has
25978 // GT and EQ comparisons for integer, swapping operands and multiple
25979 // operations may be required for some comparisons.
25980 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
25982 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
25983 Cond == ISD::SETGE || Cond == ISD::SETUGE;
25984 bool Invert = Cond == ISD::SETNE ||
25985 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
25988 std::swap(Op0, Op1);
25990 // Check that the operation in question is available (most are plain SSE2,
25991 // but PCMPGTQ and PCMPEQQ have different requirements).
25992 if (VT == MVT::v2i64) {
25993 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
25994 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
25996 // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
25997 // the odd elements over the even elements.
25998 if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
25999 Op0 = DAG.getConstant(0, dl, MVT::v4i32);
26000 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
26002 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
26003 static const int MaskHi[] = { 1, 1, 3, 3 };
26004 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
26006 return DAG.getBitcast(VT, Result);
26009 if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
26010 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
26011 Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
26013 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
26014 static const int MaskHi[] = { 1, 1, 3, 3 };
26015 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
26017 return DAG.getBitcast(VT, Result);
26020 // Since SSE has no unsigned integer comparisons, we need to flip the sign
26021 // bits of the inputs before performing those operations. The lower
26022 // compare is always unsigned.
26023 SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
26024 : 0x0000000080000000ULL,
26027 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
26028 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
26030 // Cast everything to the right type.
26031 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
26032 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
26034 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
26035 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
26036 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
26038 // Create masks for only the low parts/high parts of the 64 bit integers.
26039 static const int MaskHi[] = { 1, 1, 3, 3 };
26040 static const int MaskLo[] = { 0, 0, 2, 2 };
26041 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
26042 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
26043 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
26045 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
26046 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
26049 Result = DAG.getNOT(dl, Result, MVT::v4i32);
26051 return DAG.getBitcast(VT, Result);
26054 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
26055 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
26056 // pcmpeqd + pshufd + pand.
26057 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
26059 // First cast everything to the right type.
26060 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
26061 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
26064 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
26066 // Make sure the lower and upper halves are both all-ones.
26067 static const int Mask[] = { 1, 0, 3, 2 };
26068 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
26069 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
26072 Result = DAG.getNOT(dl, Result, MVT::v4i32);
26074 return DAG.getBitcast(VT, Result);
26078 // Since SSE has no unsigned integer comparisons, we need to flip the sign
26079 // bits of the inputs before performing those operations.
26081 MVT EltVT = VT.getVectorElementType();
26082 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
26084 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
26085 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
26088 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
26090 // If the logical-not of the result is required, perform that now.
26092 Result = DAG.getNOT(dl, Result, VT);
26097 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
26098 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
26099 const SDLoc &dl, SelectionDAG &DAG,
26100 const X86Subtarget &Subtarget,
26102 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
26104 // Must be a bitcast from vXi1.
26105 if (Op0.getOpcode() != ISD::BITCAST)
26108 Op0 = Op0.getOperand(0);
26109 MVT VT = Op0.getSimpleValueType();
26110 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
26111 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
26112 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
26115 X86::CondCode X86Cond;
26116 if (isNullConstant(Op1)) {
26117 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
26118 } else if (isAllOnesConstant(Op1)) {
26119 // C flag is set for all ones.
26120 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
26124 // If the input is an AND, we can combine it's operands into the KTEST.
26125 bool KTestable = false;
26126 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
26128 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
26130 if (!isNullConstant(Op1))
26132 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
26133 SDValue LHS = Op0.getOperand(0);
26134 SDValue RHS = Op0.getOperand(1);
26135 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26136 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
26139 // If the input is an OR, we can combine it's operands into the KORTEST.
26142 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
26143 LHS = Op0.getOperand(0);
26144 RHS = Op0.getOperand(1);
26147 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26148 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
26151 /// Emit flags for the given setcc condition and operands. Also returns the
26152 /// corresponding X86 condition code constant in X86CC.
26153 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
26154 ISD::CondCode CC, const SDLoc &dl,
26156 SDValue &X86CC) const {
26157 // Equality Combines.
26158 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
26159 X86::CondCode X86CondCode;
26161 // Optimize to BT if possible.
26162 // Lower (X & (1 << N)) == 0 to BT(X, N).
26163 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
26164 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
26165 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1)) {
26166 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
26167 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
26172 // Try to use PTEST/PMOVMSKB for a tree AND/ORs equality compared with -1/0.
26173 if (SDValue CmpZ = MatchVectorAllEqualTest(Op0, Op1, CC, dl, Subtarget, DAG,
26175 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
26179 // Try to lower using KORTEST or KTEST.
26180 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
26183 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms
26185 if (isOneConstant(Op1) || isNullConstant(Op1)) {
26186 // If the input is a setcc, then reuse the input setcc or use a new one
26187 // with the inverted condition.
26188 if (Op0.getOpcode() == X86ISD::SETCC) {
26189 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
26191 X86CC = Op0.getOperand(0);
26193 X86CondCode = (X86::CondCode)Op0.getConstantOperandVal(0);
26194 X86CondCode = X86::GetOppositeBranchCondition(X86CondCode);
26195 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
26198 return Op0.getOperand(1);
26202 // Try to use the carry flag from the add in place of an separate CMP for:
26203 // (seteq (add X, -1), -1). Similar for setne.
26204 if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
26205 Op0.getOperand(1) == Op1) {
26206 if (isProfitableToUseFlagOp(Op0)) {
26207 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
26209 SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
26210 Op0.getOperand(1));
26211 DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
26212 X86CondCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
26213 X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
26214 return SDValue(New.getNode(), 1);
26219 X86::CondCode CondCode =
26220 TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
26221 assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
26223 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
26224 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
26228 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
26230 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
26231 Op.getOpcode() == ISD::STRICT_FSETCCS;
26232 MVT VT = Op->getSimpleValueType(0);
26234 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
26236 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
26237 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
26238 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
26239 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
26242 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
26244 if (isSoftFP16(Op0.getValueType()))
26247 // Handle f128 first, since one possible outcome is a normal integer
26248 // comparison which gets handled by emitFlagsForSetcc.
26249 if (Op0.getValueType() == MVT::f128) {
26250 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
26251 Op.getOpcode() == ISD::STRICT_FSETCCS);
26253 // If softenSetCCOperands returned a scalar, use it.
26254 if (!Op1.getNode()) {
26255 assert(Op0.getValueType() == Op.getValueType() &&
26256 "Unexpected setcc expansion!");
26258 return DAG.getMergeValues({Op0, Chain}, dl);
26263 if (Op0.getSimpleValueType().isInteger()) {
26264 // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
26265 // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
26266 // this may translate to less uops depending on uarch implementation. The
26267 // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
26268 // canonicalize to that CondCode.
26269 // NOTE: Only do this if incrementing the constant doesn't increase the bit
26270 // encoding size - so it must either already be a i8 or i32 immediate, or it
26271 // shrinks down to that. We don't do this for any i64's to avoid additional
26272 // constant materializations.
26273 // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
26274 if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
26275 const APInt &Op1Val = Op1C->getAPIntValue();
26276 if (!Op1Val.isZero()) {
26277 // Ensure the constant+1 doesn't overflow.
26278 if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
26279 (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
26280 APInt Op1ValPlusOne = Op1Val + 1;
26281 if (Op1ValPlusOne.isSignedIntN(32) &&
26282 (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
26283 Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
26284 CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
26285 : ISD::CondCode::SETUGE;
26292 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
26293 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
26294 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
26297 // Handle floating point.
26298 X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
26299 if (CondCode == X86::COND_INVALID)
26304 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
26306 DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
26307 dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
26308 Chain = EFLAGS.getValue(1);
26310 EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
26313 SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
26314 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
26315 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
26318 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
26319 SDValue LHS = Op.getOperand(0);
26320 SDValue RHS = Op.getOperand(1);
26321 SDValue Carry = Op.getOperand(2);
26322 SDValue Cond = Op.getOperand(3);
26325 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
26326 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
26328 // Recreate the carry if needed.
26329 EVT CarryVT = Carry.getValueType();
26330 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
26331 Carry, DAG.getAllOnesConstant(DL, CarryVT));
26333 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
26334 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
26335 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
26338 // This function returns three things: the arithmetic computation itself
26339 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
26340 // flag and the condition code define the case in which the arithmetic
26341 // computation overflows.
26342 static std::pair<SDValue, SDValue>
26343 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
26344 assert(Op.getResNo() == 0 && "Unexpected result number!");
26345 SDValue Value, Overflow;
26346 SDValue LHS = Op.getOperand(0);
26347 SDValue RHS = Op.getOperand(1);
26348 unsigned BaseOp = 0;
26350 switch (Op.getOpcode()) {
26351 default: llvm_unreachable("Unknown ovf instruction!");
26353 BaseOp = X86ISD::ADD;
26354 Cond = X86::COND_O;
26357 BaseOp = X86ISD::ADD;
26358 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
26361 BaseOp = X86ISD::SUB;
26362 Cond = X86::COND_O;
26365 BaseOp = X86ISD::SUB;
26366 Cond = X86::COND_B;
26369 BaseOp = X86ISD::SMUL;
26370 Cond = X86::COND_O;
26373 BaseOp = X86ISD::UMUL;
26374 Cond = X86::COND_O;
26379 // Also sets EFLAGS.
26380 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
26381 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
26382 Overflow = Value.getValue(1);
26385 return std::make_pair(Value, Overflow);
26388 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
26389 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
26390 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
26391 // looks for this combo and may remove the "setcc" instruction if the "setcc"
26392 // has only one use.
26394 X86::CondCode Cond;
26395 SDValue Value, Overflow;
26396 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
26398 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
26399 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
26400 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
26403 /// Return true if opcode is a X86 logical comparison.
26404 static bool isX86LogicalCmp(SDValue Op) {
26405 unsigned Opc = Op.getOpcode();
26406 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
26407 Opc == X86ISD::FCMP)
26409 if (Op.getResNo() == 1 &&
26410 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
26411 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
26412 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
26418 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
26419 if (V.getOpcode() != ISD::TRUNCATE)
26422 SDValue VOp0 = V.getOperand(0);
26423 unsigned InBits = VOp0.getValueSizeInBits();
26424 unsigned Bits = V.getValueSizeInBits();
26425 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
26428 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
26429 bool AddTest = true;
26430 SDValue Cond = Op.getOperand(0);
26431 SDValue Op1 = Op.getOperand(1);
26432 SDValue Op2 = Op.getOperand(2);
26434 MVT VT = Op1.getSimpleValueType();
26437 if (isSoftFP16(VT)) {
26438 MVT NVT = VT.changeTypeToInteger();
26439 return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
26440 DAG.getBitcast(NVT, Op1),
26441 DAG.getBitcast(NVT, Op2)));
26444 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
26445 // are available or VBLENDV if AVX is available.
26446 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
26447 if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
26448 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
26449 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
26450 bool IsAlwaysSignaling;
26452 translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
26453 CondOp0, CondOp1, IsAlwaysSignaling);
26455 if (Subtarget.hasAVX512()) {
26457 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
26458 DAG.getTargetConstant(SSECC, DL, MVT::i8));
26459 assert(!VT.isVector() && "Not a scalar type?");
26460 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
26463 if (SSECC < 8 || Subtarget.hasAVX()) {
26464 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
26465 DAG.getTargetConstant(SSECC, DL, MVT::i8));
26467 // If we have AVX, we can use a variable vector select (VBLENDV) instead
26468 // of 3 logic instructions for size savings and potentially speed.
26469 // Unfortunately, there is no scalar form of VBLENDV.
26471 // If either operand is a +0.0 constant, don't try this. We can expect to
26472 // optimize away at least one of the logic instructions later in that
26473 // case, so that sequence would be faster than a variable blend.
26475 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
26476 // uses XMM0 as the selection register. That may need just as many
26477 // instructions as the AND/ANDN/OR sequence due to register moves, so
26479 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
26480 !isNullFPConstant(Op2)) {
26481 // Convert to vectors, do a VSELECT, and convert back to scalar.
26482 // All of the conversions should be optimized away.
26483 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
26484 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
26485 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
26486 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
26488 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
26489 VCmp = DAG.getBitcast(VCmpVT, VCmp);
26491 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
26493 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
26494 VSel, DAG.getIntPtrConstant(0, DL));
26496 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
26497 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
26498 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
26502 // AVX512 fallback is to lower selects of scalar floats to masked moves.
26503 if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
26504 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
26505 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
26508 if (Cond.getOpcode() == ISD::SETCC &&
26509 !isSoftFP16(Cond.getOperand(0).getSimpleValueType())) {
26510 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
26512 // If the condition was updated, it's possible that the operands of the
26513 // select were also updated (for example, EmitTest has a RAUW). Refresh
26514 // the local references to the select operands in case they got stale.
26515 Op1 = Op.getOperand(1);
26516 Op2 = Op.getOperand(2);
26520 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
26521 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
26522 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
26523 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
26524 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
26525 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
26526 // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
26527 // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
26528 if (Cond.getOpcode() == X86ISD::SETCC &&
26529 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
26530 isNullConstant(Cond.getOperand(1).getOperand(1))) {
26531 SDValue Cmp = Cond.getOperand(1);
26532 SDValue CmpOp0 = Cmp.getOperand(0);
26533 unsigned CondCode = Cond.getConstantOperandVal(0);
26535 // Special handling for __builtin_ffs(X) - 1 pattern which looks like
26536 // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
26537 // handle to keep the CMP with 0. This should be removed by
26538 // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
26539 // cttz_zero_undef.
26540 auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
26541 return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
26542 Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
26544 if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
26545 ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
26546 (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
26548 } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
26549 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
26550 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
26551 SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
26553 // 'X - 1' sets the carry flag if X == 0.
26554 // '0 - X' sets the carry flag if X != 0.
26555 // Convert the carry flag to a -1/0 mask with sbb:
26556 // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
26557 // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
26558 // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
26559 // select (X == 0), -1, Y --> X - 1; or (sbb), Y
26561 if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
26562 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
26563 Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
26565 SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
26566 Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
26568 SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26569 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
26571 return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
26572 } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
26573 CmpOp0.getOpcode() == ISD::AND &&
26574 isOneConstant(CmpOp0.getOperand(1))) {
26575 SDValue Src1, Src2;
26576 // true if Op2 is XOR or OR operator and one of its operands
26578 // ( a , a op b) || ( b , a op b)
26579 auto isOrXorPattern = [&]() {
26580 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
26581 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
26583 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
26590 if (isOrXorPattern()) {
26592 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
26593 // we need mask of all zeros or ones with same size of the other
26595 if (CmpSz > VT.getSizeInBits())
26596 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
26597 else if (CmpSz < VT.getSizeInBits())
26598 Neg = DAG.getNode(ISD::AND, DL, VT,
26599 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
26600 DAG.getConstant(1, DL, VT));
26603 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
26604 Neg); // -(and (x, 0x1))
26605 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
26606 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
26608 } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
26609 Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
26610 ((CondCode == X86::COND_S) || // smin(x, 0)
26611 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
26612 // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
26614 // If the comparison is testing for a positive value, we have to invert
26615 // the sign bit mask, so only do that transform if the target has a
26616 // bitwise 'and not' instruction (the invert is free).
26617 // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
26618 unsigned ShCt = VT.getSizeInBits() - 1;
26619 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
26620 SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
26621 if (CondCode == X86::COND_G)
26622 Shift = DAG.getNOT(DL, Shift, VT);
26623 return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
26627 // Look past (and (setcc_carry (cmp ...)), 1).
26628 if (Cond.getOpcode() == ISD::AND &&
26629 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
26630 isOneConstant(Cond.getOperand(1)))
26631 Cond = Cond.getOperand(0);
26633 // If condition flag is set by a X86ISD::CMP, then use it as the condition
26634 // setting operand in place of the X86ISD::SETCC.
26635 unsigned CondOpcode = Cond.getOpcode();
26636 if (CondOpcode == X86ISD::SETCC ||
26637 CondOpcode == X86ISD::SETCC_CARRY) {
26638 CC = Cond.getOperand(0);
26640 SDValue Cmp = Cond.getOperand(1);
26641 bool IllegalFPCMov = false;
26642 if (VT.isFloatingPoint() && !VT.isVector() &&
26643 !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV()) // FPStack?
26644 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
26646 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
26647 Cmp.getOpcode() == X86ISD::BT) { // FIXME
26651 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
26652 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
26653 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
26655 X86::CondCode X86Cond;
26656 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
26658 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
26663 // Look past the truncate if the high bits are known zero.
26664 if (isTruncWithZeroHighBitsInput(Cond, DAG))
26665 Cond = Cond.getOperand(0);
26667 // We know the result of AND is compared against zero. Try to match
26669 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
26670 X86::CondCode X86CondCode;
26671 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
26672 CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
26680 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
26681 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
26684 // a < b ? -1 : 0 -> RES = ~setcc_carry
26685 // a < b ? 0 : -1 -> RES = setcc_carry
26686 // a >= b ? -1 : 0 -> RES = setcc_carry
26687 // a >= b ? 0 : -1 -> RES = ~setcc_carry
26688 if (Cond.getOpcode() == X86ISD::SUB) {
26689 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
26691 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
26692 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
26693 (isNullConstant(Op1) || isNullConstant(Op2))) {
26695 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
26696 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
26697 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
26698 return DAG.getNOT(DL, Res, Res.getValueType());
26703 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
26704 // widen the cmov and push the truncate through. This avoids introducing a new
26705 // branch during isel and doesn't add any extensions.
26706 if (Op.getValueType() == MVT::i8 &&
26707 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
26708 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
26709 if (T1.getValueType() == T2.getValueType() &&
26710 // Exclude CopyFromReg to avoid partial register stalls.
26711 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
26712 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
26714 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
26718 // Or finally, promote i8 cmovs if we have CMOV,
26719 // or i16 cmovs if it won't prevent folding a load.
26720 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
26721 // legal, but EmitLoweredSelect() can not deal with these extensions
26722 // being inserted between two CMOV's. (in i16 case too TBN)
26723 // https://bugs.llvm.org/show_bug.cgi?id=40974
26724 if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
26725 (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
26726 !X86::mayFoldLoad(Op2, Subtarget))) {
26727 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
26728 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
26729 SDValue Ops[] = { Op2, Op1, CC, Cond };
26730 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
26731 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
26734 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
26735 // condition is true.
26736 SDValue Ops[] = { Op2, Op1, CC, Cond };
26737 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops, Op->getFlags());
26740 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
26741 const X86Subtarget &Subtarget,
26742 SelectionDAG &DAG) {
26743 MVT VT = Op->getSimpleValueType(0);
26744 SDValue In = Op->getOperand(0);
26745 MVT InVT = In.getSimpleValueType();
26746 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
26747 MVT VTElt = VT.getVectorElementType();
26750 unsigned NumElts = VT.getVectorNumElements();
26752 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
26754 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
26755 // If v16i32 is to be avoided, we'll need to split and concatenate.
26756 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
26757 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
26759 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
26762 // Widen to 512-bits if VLX is not supported.
26763 MVT WideVT = ExtVT;
26764 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
26765 NumElts *= 512 / ExtVT.getSizeInBits();
26766 InVT = MVT::getVectorVT(MVT::i1, NumElts);
26767 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
26768 In, DAG.getIntPtrConstant(0, dl));
26769 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
26773 MVT WideEltVT = WideVT.getVectorElementType();
26774 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
26775 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
26776 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
26778 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
26779 SDValue Zero = DAG.getConstant(0, dl, WideVT);
26780 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
26783 // Truncate if we had to extend i16/i8 above.
26785 WideVT = MVT::getVectorVT(VTElt, NumElts);
26786 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
26789 // Extract back to 128/256-bit if we widened.
26791 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
26792 DAG.getIntPtrConstant(0, dl));
26797 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
26798 SelectionDAG &DAG) {
26799 SDValue In = Op->getOperand(0);
26800 MVT InVT = In.getSimpleValueType();
26802 if (InVT.getVectorElementType() == MVT::i1)
26803 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
26805 assert(Subtarget.hasAVX() && "Expected AVX support");
26806 return LowerAVXExtend(Op, DAG, Subtarget);
26809 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
26810 // For sign extend this needs to handle all vector sizes and SSE4.1 and
26811 // non-SSE4.1 targets. For zero extend this should only handle inputs of
26812 // MVT::v64i8 when BWI is not supported, but AVX512 is.
26813 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
26814 const X86Subtarget &Subtarget,
26815 SelectionDAG &DAG) {
26816 SDValue In = Op->getOperand(0);
26817 MVT VT = Op->getSimpleValueType(0);
26818 MVT InVT = In.getSimpleValueType();
26820 MVT SVT = VT.getVectorElementType();
26821 MVT InSVT = InVT.getVectorElementType();
26822 assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
26824 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
26826 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
26828 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
26829 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
26830 !(VT.is512BitVector() && Subtarget.hasAVX512()))
26834 unsigned Opc = Op.getOpcode();
26835 unsigned NumElts = VT.getVectorNumElements();
26837 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
26838 // For 512-bit vectors, we need 128-bits or 256-bits.
26839 if (InVT.getSizeInBits() > 128) {
26840 // Input needs to be at least the same number of elements as output, and
26841 // at least 128-bits.
26842 int InSize = InSVT.getSizeInBits() * NumElts;
26843 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
26844 InVT = In.getSimpleValueType();
26847 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
26848 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
26849 // need to be handled here for 256/512-bit results.
26850 if (Subtarget.hasInt256()) {
26851 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
26853 if (InVT.getVectorNumElements() != NumElts)
26854 return DAG.getNode(Op.getOpcode(), dl, VT, In);
26856 // FIXME: Apparently we create inreg operations that could be regular
26859 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
26860 : ISD::ZERO_EXTEND;
26861 return DAG.getNode(ExtOpc, dl, VT, In);
26864 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
26865 if (Subtarget.hasAVX()) {
26866 assert(VT.is256BitVector() && "256-bit vector expected");
26867 MVT HalfVT = VT.getHalfNumVectorElementsVT();
26868 int HalfNumElts = HalfVT.getVectorNumElements();
26870 unsigned NumSrcElts = InVT.getVectorNumElements();
26871 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
26872 for (int i = 0; i != HalfNumElts; ++i)
26873 HiMask[i] = HalfNumElts + i;
26875 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
26876 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
26877 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
26878 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
26881 // We should only get here for sign extend.
26882 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
26883 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
26884 unsigned InNumElts = InVT.getVectorNumElements();
26886 // If the source elements are already all-signbits, we don't need to extend,
26887 // just splat the elements.
26888 APInt DemandedElts = APInt::getLowBitsSet(InNumElts, NumElts);
26889 if (DAG.ComputeNumSignBits(In, DemandedElts) == InVT.getScalarSizeInBits()) {
26890 unsigned Scale = InNumElts / NumElts;
26891 SmallVector<int, 16> ShuffleMask;
26892 for (unsigned I = 0; I != NumElts; ++I)
26893 ShuffleMask.append(Scale, I);
26894 return DAG.getBitcast(VT,
26895 DAG.getVectorShuffle(InVT, dl, In, In, ShuffleMask));
26898 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
26900 SDValue SignExt = Curr;
26902 // As SRAI is only available on i16/i32 types, we expand only up to i32
26903 // and handle i64 separately.
26904 if (InVT != MVT::v4i32) {
26905 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
26907 unsigned DestWidth = DestVT.getScalarSizeInBits();
26908 unsigned Scale = DestWidth / InSVT.getSizeInBits();
26909 unsigned DestElts = DestVT.getVectorNumElements();
26911 // Build a shuffle mask that takes each input element and places it in the
26912 // MSBs of the new element size.
26913 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
26914 for (unsigned i = 0; i != DestElts; ++i)
26915 Mask[i * Scale + (Scale - 1)] = i;
26917 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
26918 Curr = DAG.getBitcast(DestVT, Curr);
26920 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
26921 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
26922 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
26925 if (VT == MVT::v2i64) {
26926 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
26927 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
26928 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
26929 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
26930 SignExt = DAG.getBitcast(VT, SignExt);
26936 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
26937 SelectionDAG &DAG) {
26938 MVT VT = Op->getSimpleValueType(0);
26939 SDValue In = Op->getOperand(0);
26940 MVT InVT = In.getSimpleValueType();
26943 if (InVT.getVectorElementType() == MVT::i1)
26944 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
26946 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
26947 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
26948 "Expected same number of elements");
26949 assert((VT.getVectorElementType() == MVT::i16 ||
26950 VT.getVectorElementType() == MVT::i32 ||
26951 VT.getVectorElementType() == MVT::i64) &&
26952 "Unexpected element type");
26953 assert((InVT.getVectorElementType() == MVT::i8 ||
26954 InVT.getVectorElementType() == MVT::i16 ||
26955 InVT.getVectorElementType() == MVT::i32) &&
26956 "Unexpected element type");
26958 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
26959 assert(InVT == MVT::v32i8 && "Unexpected VT!");
26960 return splitVectorIntUnary(Op, DAG);
26963 if (Subtarget.hasInt256())
26966 // Optimize vectors in AVX mode
26967 // Sign extend v8i16 to v8i32 and
26970 // Divide input vector into two parts
26971 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
26972 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
26973 // concat the vectors to original VT
26974 MVT HalfVT = VT.getHalfNumVectorElementsVT();
26975 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
26977 unsigned NumElems = InVT.getVectorNumElements();
26978 SmallVector<int,8> ShufMask(NumElems, -1);
26979 for (unsigned i = 0; i != NumElems/2; ++i)
26980 ShufMask[i] = i + NumElems/2;
26982 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
26983 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
26985 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
26988 /// Change a vector store into a pair of half-size vector stores.
26989 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
26990 SDValue StoredVal = Store->getValue();
26991 assert((StoredVal.getValueType().is256BitVector() ||
26992 StoredVal.getValueType().is512BitVector()) &&
26993 "Expecting 256/512-bit op");
26995 // Splitting volatile memory ops is not allowed unless the operation was not
26996 // legal to begin with. Assume the input store is legal (this transform is
26997 // only used for targets with AVX). Note: It is possible that we have an
26998 // illegal type like v2i128, and so we could allow splitting a volatile store
26999 // in that case if that is important.
27000 if (!Store->isSimple())
27004 SDValue Value0, Value1;
27005 std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
27006 unsigned HalfOffset = Value0.getValueType().getStoreSize();
27007 SDValue Ptr0 = Store->getBasePtr();
27009 DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(HalfOffset), DL);
27011 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
27012 Store->getOriginalAlign(),
27013 Store->getMemOperand()->getFlags());
27014 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
27015 Store->getPointerInfo().getWithOffset(HalfOffset),
27016 Store->getOriginalAlign(),
27017 Store->getMemOperand()->getFlags());
27018 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
27021 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
27023 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
27024 SelectionDAG &DAG) {
27025 SDValue StoredVal = Store->getValue();
27026 assert(StoreVT.is128BitVector() &&
27027 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
27028 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
27030 // Splitting volatile memory ops is not allowed unless the operation was not
27031 // legal to begin with. We are assuming the input op is legal (this transform
27032 // is only used for targets with AVX).
27033 if (!Store->isSimple())
27036 MVT StoreSVT = StoreVT.getScalarType();
27037 unsigned NumElems = StoreVT.getVectorNumElements();
27038 unsigned ScalarSize = StoreSVT.getStoreSize();
27041 SmallVector<SDValue, 4> Stores;
27042 for (unsigned i = 0; i != NumElems; ++i) {
27043 unsigned Offset = i * ScalarSize;
27044 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
27045 TypeSize::Fixed(Offset), DL);
27046 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
27047 DAG.getIntPtrConstant(i, DL));
27048 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
27049 Store->getPointerInfo().getWithOffset(Offset),
27050 Store->getOriginalAlign(),
27051 Store->getMemOperand()->getFlags());
27052 Stores.push_back(Ch);
27054 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
27057 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
27058 SelectionDAG &DAG) {
27059 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
27061 SDValue StoredVal = St->getValue();
27063 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
27064 if (StoredVal.getValueType().isVector() &&
27065 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
27066 unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
27067 assert(NumElts <= 8 && "Unexpected VT");
27068 assert(!St->isTruncatingStore() && "Expected non-truncating store");
27069 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
27070 "Expected AVX512F without AVX512DQI");
27072 // We must pad with zeros to ensure we store zeroes to any unused bits.
27073 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
27074 DAG.getUNDEF(MVT::v16i1), StoredVal,
27075 DAG.getIntPtrConstant(0, dl));
27076 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
27077 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
27078 // Make sure we store zeros in the extra bits.
27080 StoredVal = DAG.getZeroExtendInReg(
27081 StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
27083 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
27084 St->getPointerInfo(), St->getOriginalAlign(),
27085 St->getMemOperand()->getFlags());
27088 if (St->isTruncatingStore())
27091 // If this is a 256-bit store of concatenated ops, we are better off splitting
27092 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
27093 // and each half can execute independently. Some cores would split the op into
27094 // halves anyway, so the concat (vinsertf128) is purely an extra op.
27095 MVT StoreVT = StoredVal.getSimpleValueType();
27096 if (StoreVT.is256BitVector() ||
27097 ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
27098 !Subtarget.hasBWI())) {
27099 if (StoredVal.hasOneUse() && isFreeToSplitVector(StoredVal.getNode(), DAG))
27100 return splitVectorStore(St, DAG);
27104 if (StoreVT.is32BitVector())
27107 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27108 assert(StoreVT.is64BitVector() && "Unexpected VT");
27109 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
27110 TargetLowering::TypeWidenVector &&
27111 "Unexpected type action!");
27113 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
27114 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
27115 DAG.getUNDEF(StoreVT));
27117 if (Subtarget.hasSSE2()) {
27118 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
27120 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
27121 MVT CastVT = MVT::getVectorVT(StVT, 2);
27122 StoredVal = DAG.getBitcast(CastVT, StoredVal);
27123 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
27124 DAG.getIntPtrConstant(0, dl));
27126 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
27127 St->getPointerInfo(), St->getOriginalAlign(),
27128 St->getMemOperand()->getFlags());
27130 assert(Subtarget.hasSSE1() && "Expected SSE");
27131 SDVTList Tys = DAG.getVTList(MVT::Other);
27132 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
27133 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
27134 St->getMemOperand());
27137 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
27138 // may emit an illegal shuffle but the expansion is still better than scalar
27139 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
27140 // we'll emit a shuffle and a arithmetic shift.
27141 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
27142 // TODO: It is possible to support ZExt by zeroing the undef values during
27143 // the shuffle phase or after the shuffle.
27144 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
27145 SelectionDAG &DAG) {
27146 MVT RegVT = Op.getSimpleValueType();
27147 assert(RegVT.isVector() && "We only custom lower vector loads.");
27148 assert(RegVT.isInteger() &&
27149 "We only custom lower integer vector loads.");
27151 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
27154 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
27155 if (RegVT.getVectorElementType() == MVT::i1) {
27156 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
27157 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
27158 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
27159 "Expected AVX512F without AVX512DQI");
27161 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
27162 Ld->getPointerInfo(), Ld->getOriginalAlign(),
27163 Ld->getMemOperand()->getFlags());
27165 // Replace chain users with the new chain.
27166 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
27168 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
27169 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
27170 DAG.getBitcast(MVT::v16i1, Val),
27171 DAG.getIntPtrConstant(0, dl));
27172 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
27178 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
27179 /// each of which has no other use apart from the AND / OR.
27180 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
27181 Opc = Op.getOpcode();
27182 if (Opc != ISD::OR && Opc != ISD::AND)
27184 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
27185 Op.getOperand(0).hasOneUse() &&
27186 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
27187 Op.getOperand(1).hasOneUse());
27190 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
27191 SDValue Chain = Op.getOperand(0);
27192 SDValue Cond = Op.getOperand(1);
27193 SDValue Dest = Op.getOperand(2);
27196 // Bail out when we don't have native compare instructions.
27197 if (Cond.getOpcode() == ISD::SETCC &&
27198 Cond.getOperand(0).getValueType() != MVT::f128 &&
27199 !isSoftFP16(Cond.getOperand(0).getValueType())) {
27200 SDValue LHS = Cond.getOperand(0);
27201 SDValue RHS = Cond.getOperand(1);
27202 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
27204 // Special case for
27205 // setcc([su]{add,sub,mul}o == 0)
27206 // setcc([su]{add,sub,mul}o != 1)
27207 if (ISD::isOverflowIntrOpRes(LHS) &&
27208 (CC == ISD::SETEQ || CC == ISD::SETNE) &&
27209 (isNullConstant(RHS) || isOneConstant(RHS))) {
27210 SDValue Value, Overflow;
27211 X86::CondCode X86Cond;
27212 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
27214 if ((CC == ISD::SETEQ) == isNullConstant(RHS))
27215 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
27217 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
27218 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27222 if (LHS.getSimpleValueType().isInteger()) {
27224 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
27225 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27229 if (CC == ISD::SETOEQ) {
27230 // For FCMP_OEQ, we can emit
27231 // two branches instead of an explicit AND instruction with a
27232 // separate test. However, we only do this if this block doesn't
27233 // have a fall-through edge, because this requires an explicit
27234 // jmp when the condition is false.
27235 if (Op.getNode()->hasOneUse()) {
27236 SDNode *User = *Op.getNode()->use_begin();
27237 // Look for an unconditional branch following this conditional branch.
27238 // We need this because we need to reverse the successors in order
27239 // to implement FCMP_OEQ.
27240 if (User->getOpcode() == ISD::BR) {
27241 SDValue FalseBB = User->getOperand(1);
27243 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
27244 assert(NewBR == User);
27249 DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
27250 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
27251 Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
27253 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
27254 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27258 } else if (CC == ISD::SETUNE) {
27259 // For FCMP_UNE, we can emit
27260 // two branches instead of an explicit OR instruction with a
27262 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
27263 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
27265 DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
27266 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
27267 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27270 X86::CondCode X86Cond =
27271 TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
27272 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
27273 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
27274 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27279 if (ISD::isOverflowIntrOpRes(Cond)) {
27280 SDValue Value, Overflow;
27281 X86::CondCode X86Cond;
27282 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
27284 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
27285 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27289 // Look past the truncate if the high bits are known zero.
27290 if (isTruncWithZeroHighBitsInput(Cond, DAG))
27291 Cond = Cond.getOperand(0);
27293 EVT CondVT = Cond.getValueType();
27295 // Add an AND with 1 if we don't already have one.
27296 if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
27298 DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
27300 SDValue LHS = Cond;
27301 SDValue RHS = DAG.getConstant(0, dl, CondVT);
27304 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
27305 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
27309 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
27310 // Calls to _alloca are needed to probe the stack when allocating more than 4k
27311 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
27312 // that the guard pages used by the OS virtual memory manager are allocated in
27313 // correct sequence.
27315 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
27316 SelectionDAG &DAG) const {
27317 MachineFunction &MF = DAG.getMachineFunction();
27318 bool SplitStack = MF.shouldSplitStack();
27319 bool EmitStackProbeCall = hasStackProbeSymbol(MF);
27320 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
27321 SplitStack || EmitStackProbeCall;
27325 SDNode *Node = Op.getNode();
27326 SDValue Chain = Op.getOperand(0);
27327 SDValue Size = Op.getOperand(1);
27328 MaybeAlign Alignment(Op.getConstantOperandVal(2));
27329 EVT VT = Node->getValueType(0);
27331 // Chain the dynamic stack allocation so that it doesn't modify the stack
27332 // pointer when other instructions are using the stack.
27333 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
27335 bool Is64Bit = Subtarget.is64Bit();
27336 MVT SPTy = getPointerTy(DAG.getDataLayout());
27340 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27341 Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
27342 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
27343 " not tell us which reg is the stack pointer!");
27345 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
27346 const Align StackAlign = TFI.getStackAlign();
27347 if (hasInlineStackProbe(MF)) {
27348 MachineRegisterInfo &MRI = MF.getRegInfo();
27350 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
27351 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
27352 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
27353 Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
27354 DAG.getRegister(Vreg, SPTy));
27356 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
27357 Chain = SP.getValue(1);
27358 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
27360 if (Alignment && *Alignment > StackAlign)
27362 DAG.getNode(ISD::AND, dl, VT, Result,
27363 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
27364 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
27365 } else if (SplitStack) {
27366 MachineRegisterInfo &MRI = MF.getRegInfo();
27369 // The 64 bit implementation of segmented stacks needs to clobber both r10
27370 // r11. This makes it impossible to use it along with nested parameters.
27371 const Function &F = MF.getFunction();
27372 for (const auto &A : F.args()) {
27373 if (A.hasNestAttr())
27374 report_fatal_error("Cannot use segmented stacks with functions that "
27375 "have nested arguments.");
27379 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
27380 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
27381 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
27382 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
27383 DAG.getRegister(Vreg, SPTy));
27385 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
27386 Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
27387 MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
27389 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27390 Register SPReg = RegInfo->getStackRegister();
27391 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
27392 Chain = SP.getValue(1);
27395 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
27396 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
27397 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
27403 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
27405 SDValue Ops[2] = {Result, Chain};
27406 return DAG.getMergeValues(Ops, dl);
27409 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
27410 MachineFunction &MF = DAG.getMachineFunction();
27411 auto PtrVT = getPointerTy(MF.getDataLayout());
27412 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
27414 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
27417 if (!Subtarget.is64Bit() ||
27418 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
27419 // vastart just stores the address of the VarArgsFrameIndex slot into the
27420 // memory location argument.
27421 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
27422 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
27423 MachinePointerInfo(SV));
27427 // gp_offset (0 - 6 * 8)
27428 // fp_offset (48 - 48 + 8 * 16)
27429 // overflow_arg_area (point to parameters coming in memory).
27431 SmallVector<SDValue, 8> MemOps;
27432 SDValue FIN = Op.getOperand(1);
27434 SDValue Store = DAG.getStore(
27435 Op.getOperand(0), DL,
27436 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
27437 MachinePointerInfo(SV));
27438 MemOps.push_back(Store);
27441 FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::Fixed(4), DL);
27442 Store = DAG.getStore(
27443 Op.getOperand(0), DL,
27444 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
27445 MachinePointerInfo(SV, 4));
27446 MemOps.push_back(Store);
27448 // Store ptr to overflow_arg_area
27449 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
27450 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
27452 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
27453 MemOps.push_back(Store);
27455 // Store ptr to reg_save_area.
27456 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
27457 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
27458 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
27459 Store = DAG.getStore(
27460 Op.getOperand(0), DL, RSFIN, FIN,
27461 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
27462 MemOps.push_back(Store);
27463 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
27466 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
27467 assert(Subtarget.is64Bit() &&
27468 "LowerVAARG only handles 64-bit va_arg!");
27469 assert(Op.getNumOperands() == 4);
27471 MachineFunction &MF = DAG.getMachineFunction();
27472 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
27473 // The Win64 ABI uses char* instead of a structure.
27474 return DAG.expandVAArg(Op.getNode());
27476 SDValue Chain = Op.getOperand(0);
27477 SDValue SrcPtr = Op.getOperand(1);
27478 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
27479 unsigned Align = Op.getConstantOperandVal(3);
27482 EVT ArgVT = Op.getNode()->getValueType(0);
27483 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
27484 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
27487 // Decide which area this value should be read from.
27488 // TODO: Implement the AMD64 ABI in its entirety. This simple
27489 // selection mechanism works only for the basic types.
27490 assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
27491 if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
27492 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
27494 assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
27495 "Unhandled argument type in LowerVAARG");
27496 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
27499 if (ArgMode == 2) {
27500 // Make sure using fp_offset makes sense.
27501 assert(!Subtarget.useSoftFloat() &&
27502 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
27503 Subtarget.hasSSE1());
27506 // Insert VAARG node into the DAG
27507 // VAARG returns two values: Variable Argument Address, Chain
27508 SDValue InstOps[] = {Chain, SrcPtr,
27509 DAG.getTargetConstant(ArgSize, dl, MVT::i32),
27510 DAG.getTargetConstant(ArgMode, dl, MVT::i8),
27511 DAG.getTargetConstant(Align, dl, MVT::i32)};
27512 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
27513 SDValue VAARG = DAG.getMemIntrinsicNode(
27514 Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
27515 VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
27516 /*Alignment=*/std::nullopt,
27517 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
27518 Chain = VAARG.getValue(1);
27520 // Load the next argument and return it
27521 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
27524 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
27525 SelectionDAG &DAG) {
27526 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
27527 // where a va_list is still an i8*.
27528 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
27529 if (Subtarget.isCallingConvWin64(
27530 DAG.getMachineFunction().getFunction().getCallingConv()))
27531 // Probably a Win64 va_copy.
27532 return DAG.expandVACopy(Op.getNode());
27534 SDValue Chain = Op.getOperand(0);
27535 SDValue DstPtr = Op.getOperand(1);
27536 SDValue SrcPtr = Op.getOperand(2);
27537 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
27538 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
27541 return DAG.getMemcpy(
27542 Chain, DL, DstPtr, SrcPtr,
27543 DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
27544 Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
27545 false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
27548 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
27549 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
27553 case X86ISD::VSHLI:
27554 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
27557 case X86ISD::VSRLI:
27558 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
27561 case X86ISD::VSRAI:
27562 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
27564 llvm_unreachable("Unknown target vector shift node");
27567 /// Handle vector element shifts where the shift amount is a constant.
27568 /// Takes immediate version of shift as input.
27569 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
27570 SDValue SrcOp, uint64_t ShiftAmt,
27571 SelectionDAG &DAG) {
27572 MVT ElementType = VT.getVectorElementType();
27574 // Bitcast the source vector to the output type, this is mainly necessary for
27575 // vXi8/vXi64 shifts.
27576 if (VT != SrcOp.getSimpleValueType())
27577 SrcOp = DAG.getBitcast(VT, SrcOp);
27579 // Fold this packed shift into its first operand if ShiftAmt is 0.
27583 // Check for ShiftAmt >= element width
27584 if (ShiftAmt >= ElementType.getSizeInBits()) {
27585 if (Opc == X86ISD::VSRAI)
27586 ShiftAmt = ElementType.getSizeInBits() - 1;
27588 return DAG.getConstant(0, dl, VT);
27591 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
27592 && "Unknown target vector shift-by-constant node");
27594 // Fold this packed vector shift into a build vector if SrcOp is a
27595 // vector of Constants or UNDEFs.
27596 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
27599 default: llvm_unreachable("Unknown opcode!");
27600 case X86ISD::VSHLI:
27601 ShiftOpc = ISD::SHL;
27603 case X86ISD::VSRLI:
27604 ShiftOpc = ISD::SRL;
27606 case X86ISD::VSRAI:
27607 ShiftOpc = ISD::SRA;
27611 SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
27612 if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
27616 return DAG.getNode(Opc, dl, VT, SrcOp,
27617 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
27620 /// Handle vector element shifts by a splat shift amount
27621 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
27622 SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
27623 const X86Subtarget &Subtarget,
27624 SelectionDAG &DAG) {
27625 MVT AmtVT = ShAmt.getSimpleValueType();
27626 assert(AmtVT.isVector() && "Vector shift type mismatch");
27627 assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
27628 "Illegal vector splat index");
27630 // Move the splat element to the bottom element.
27631 if (ShAmtIdx != 0) {
27632 SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
27633 Mask[0] = ShAmtIdx;
27634 ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
27637 // Peek through any zext node if we can get back to a 128-bit source.
27638 if (AmtVT.getScalarSizeInBits() == 64 &&
27639 (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
27640 ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
27641 ShAmt.getOperand(0).getValueType().isSimple() &&
27642 ShAmt.getOperand(0).getValueType().is128BitVector()) {
27643 ShAmt = ShAmt.getOperand(0);
27644 AmtVT = ShAmt.getSimpleValueType();
27647 // See if we can mask off the upper elements using the existing source node.
27648 // The shift uses the entire lower 64-bits of the amount vector, so no need to
27649 // do this for vXi64 types.
27650 bool IsMasked = false;
27651 if (AmtVT.getScalarSizeInBits() < 64) {
27652 if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
27653 ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
27654 // If the shift amount has come from a scalar, then zero-extend the scalar
27655 // before moving to the vector.
27656 ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
27657 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
27658 ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
27659 AmtVT = MVT::v4i32;
27661 } else if (ShAmt.getOpcode() == ISD::AND) {
27662 // See if the shift amount is already masked (e.g. for rotation modulo),
27663 // then we can zero-extend it by setting all the other mask elements to
27665 SmallVector<SDValue> MaskElts(
27666 AmtVT.getVectorNumElements(),
27667 DAG.getConstant(0, dl, AmtVT.getScalarType()));
27668 MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
27669 SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
27670 if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
27671 {ShAmt.getOperand(1), Mask}))) {
27672 ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
27678 // Extract if the shift amount vector is larger than 128-bits.
27679 if (AmtVT.getSizeInBits() > 128) {
27680 ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
27681 AmtVT = ShAmt.getSimpleValueType();
27684 // Zero-extend bottom element to v2i64 vector type, either by extension or
27685 // shuffle masking.
27686 if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
27687 if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
27688 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
27689 ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
27690 } else if (Subtarget.hasSSE41()) {
27691 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
27692 MVT::v2i64, ShAmt);
27694 SDValue ByteShift = DAG.getTargetConstant(
27695 (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
27696 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
27697 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
27699 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
27704 // Change opcode to non-immediate version.
27705 Opc = getTargetVShiftUniformOpcode(Opc, true);
27707 // The return type has to be a 128-bit type with the same element
27708 // type as the input type.
27709 MVT EltVT = VT.getVectorElementType();
27710 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
27712 ShAmt = DAG.getBitcast(ShVT, ShAmt);
27713 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
27716 /// Return Mask with the necessary casting or extending
27717 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
27718 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
27719 const X86Subtarget &Subtarget, SelectionDAG &DAG,
27722 if (isAllOnesConstant(Mask))
27723 return DAG.getConstant(1, dl, MaskVT);
27724 if (X86::isZeroNode(Mask))
27725 return DAG.getConstant(0, dl, MaskVT);
27727 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
27729 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
27730 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
27731 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
27732 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
27734 std::tie(Lo, Hi) = DAG.SplitScalar(Mask, dl, MVT::i32, MVT::i32);
27735 Lo = DAG.getBitcast(MVT::v32i1, Lo);
27736 Hi = DAG.getBitcast(MVT::v32i1, Hi);
27737 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
27739 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
27740 Mask.getSimpleValueType().getSizeInBits());
27741 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
27742 // are extracted by EXTRACT_SUBVECTOR.
27743 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
27744 DAG.getBitcast(BitcastVT, Mask),
27745 DAG.getIntPtrConstant(0, dl));
27749 /// Return (and \p Op, \p Mask) for compare instructions or
27750 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
27751 /// necessary casting or extending for \p Mask when lowering masking intrinsics
27752 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
27753 SDValue PreservedSrc,
27754 const X86Subtarget &Subtarget,
27755 SelectionDAG &DAG) {
27756 MVT VT = Op.getSimpleValueType();
27757 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
27758 unsigned OpcodeSelect = ISD::VSELECT;
27761 if (isAllOnesConstant(Mask))
27764 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27766 if (PreservedSrc.isUndef())
27767 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
27768 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
27771 /// Creates an SDNode for a predicated scalar operation.
27772 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
27773 /// The mask is coming as MVT::i8 and it should be transformed
27774 /// to MVT::v1i1 while lowering masking intrinsics.
27775 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
27776 /// "X86select" instead of "vselect". We just can't create the "vselect" node
27777 /// for a scalar instruction.
27778 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
27779 SDValue PreservedSrc,
27780 const X86Subtarget &Subtarget,
27781 SelectionDAG &DAG) {
27783 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
27784 if (MaskConst->getZExtValue() & 0x1)
27787 MVT VT = Op.getSimpleValueType();
27790 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
27791 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
27792 DAG.getBitcast(MVT::v8i1, Mask),
27793 DAG.getIntPtrConstant(0, dl));
27794 if (Op.getOpcode() == X86ISD::FSETCCM ||
27795 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
27796 Op.getOpcode() == X86ISD::VFPCLASSS)
27797 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
27799 if (PreservedSrc.isUndef())
27800 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
27801 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
27804 static int getSEHRegistrationNodeSize(const Function *Fn) {
27805 if (!Fn->hasPersonalityFn())
27806 report_fatal_error(
27807 "querying registration node size for function without personality");
27808 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
27809 // WinEHStatePass for the full struct definition.
27810 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
27811 case EHPersonality::MSVC_X86SEH: return 24;
27812 case EHPersonality::MSVC_CXX: return 16;
27815 report_fatal_error(
27816 "can only recover FP for 32-bit MSVC EH personality functions");
27819 /// When the MSVC runtime transfers control to us, either to an outlined
27820 /// function or when returning to a parent frame after catching an exception, we
27821 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
27822 /// Here's the math:
27823 /// RegNodeBase = EntryEBP - RegNodeSize
27824 /// ParentFP = RegNodeBase - ParentFrameOffset
27825 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
27826 /// subtracting the offset (negative on x86) takes us back to the parent FP.
27827 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
27828 SDValue EntryEBP) {
27829 MachineFunction &MF = DAG.getMachineFunction();
27832 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27833 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
27835 // It's possible that the parent function no longer has a personality function
27836 // if the exceptional code was optimized away, in which case we just return
27837 // the incoming EBP.
27838 if (!Fn->hasPersonalityFn())
27841 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
27842 // registration, or the .set_setframe offset.
27843 MCSymbol *OffsetSym =
27844 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
27845 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
27846 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
27847 SDValue ParentFrameOffset =
27848 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
27850 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
27851 // prologue to RBP in the parent function.
27852 const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
27853 if (Subtarget.is64Bit())
27854 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
27856 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
27857 // RegNodeBase = EntryEBP - RegNodeSize
27858 // ParentFP = RegNodeBase - ParentFrameOffset
27859 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
27860 DAG.getConstant(RegNodeSize, dl, PtrVT));
27861 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
27864 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
27865 SelectionDAG &DAG) const {
27866 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
27867 auto isRoundModeCurDirection = [](SDValue Rnd) {
27868 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
27869 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
27873 auto isRoundModeSAE = [](SDValue Rnd) {
27874 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
27875 unsigned RC = C->getZExtValue();
27876 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
27877 // Clear the NO_EXC bit and check remaining bits.
27878 RC ^= X86::STATIC_ROUNDING::NO_EXC;
27879 // As a convenience we allow no other bits or explicitly
27880 // current direction.
27881 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
27887 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
27888 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
27889 RC = C->getZExtValue();
27890 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
27891 // Clear the NO_EXC bit and check remaining bits.
27892 RC ^= X86::STATIC_ROUNDING::NO_EXC;
27893 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
27894 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
27895 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
27896 RC == X86::STATIC_ROUNDING::TO_ZERO;
27904 unsigned IntNo = Op.getConstantOperandVal(0);
27905 MVT VT = Op.getSimpleValueType();
27906 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
27908 // Propagate flags from original node to transformed node(s).
27909 SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
27912 switch(IntrData->Type) {
27913 case INTR_TYPE_1OP: {
27914 // We specify 2 possible opcodes for intrinsics with rounding modes.
27915 // First, we check if the intrinsic may have non-default rounding mode,
27916 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27917 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27918 if (IntrWithRoundingModeOpcode != 0) {
27919 SDValue Rnd = Op.getOperand(2);
27921 if (isRoundModeSAEToX(Rnd, RC))
27922 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27924 DAG.getTargetConstant(RC, dl, MVT::i32));
27925 if (!isRoundModeCurDirection(Rnd))
27928 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27931 case INTR_TYPE_1OP_SAE: {
27932 SDValue Sae = Op.getOperand(2);
27935 if (isRoundModeCurDirection(Sae))
27936 Opc = IntrData->Opc0;
27937 else if (isRoundModeSAE(Sae))
27938 Opc = IntrData->Opc1;
27942 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
27944 case INTR_TYPE_2OP: {
27945 SDValue Src2 = Op.getOperand(2);
27947 // We specify 2 possible opcodes for intrinsics with rounding modes.
27948 // First, we check if the intrinsic may have non-default rounding mode,
27949 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27950 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27951 if (IntrWithRoundingModeOpcode != 0) {
27952 SDValue Rnd = Op.getOperand(3);
27954 if (isRoundModeSAEToX(Rnd, RC))
27955 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27956 Op.getOperand(1), Src2,
27957 DAG.getTargetConstant(RC, dl, MVT::i32));
27958 if (!isRoundModeCurDirection(Rnd))
27962 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27963 Op.getOperand(1), Src2);
27965 case INTR_TYPE_2OP_SAE: {
27966 SDValue Sae = Op.getOperand(3);
27969 if (isRoundModeCurDirection(Sae))
27970 Opc = IntrData->Opc0;
27971 else if (isRoundModeSAE(Sae))
27972 Opc = IntrData->Opc1;
27976 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
27979 case INTR_TYPE_3OP:
27980 case INTR_TYPE_3OP_IMM8: {
27981 SDValue Src1 = Op.getOperand(1);
27982 SDValue Src2 = Op.getOperand(2);
27983 SDValue Src3 = Op.getOperand(3);
27985 if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
27986 Src3.getValueType() != MVT::i8) {
27987 Src3 = DAG.getTargetConstant(
27988 cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
27991 // We specify 2 possible opcodes for intrinsics with rounding modes.
27992 // First, we check if the intrinsic may have non-default rounding mode,
27993 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27994 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27995 if (IntrWithRoundingModeOpcode != 0) {
27996 SDValue Rnd = Op.getOperand(4);
27998 if (isRoundModeSAEToX(Rnd, RC))
27999 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
28001 DAG.getTargetConstant(RC, dl, MVT::i32));
28002 if (!isRoundModeCurDirection(Rnd))
28006 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
28007 {Src1, Src2, Src3});
28009 case INTR_TYPE_4OP_IMM8: {
28010 assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
28011 SDValue Src4 = Op.getOperand(4);
28012 if (Src4.getValueType() != MVT::i8) {
28013 Src4 = DAG.getTargetConstant(
28014 cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
28017 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
28018 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
28021 case INTR_TYPE_1OP_MASK: {
28022 SDValue Src = Op.getOperand(1);
28023 SDValue PassThru = Op.getOperand(2);
28024 SDValue Mask = Op.getOperand(3);
28025 // We add rounding mode to the Node when
28026 // - RC Opcode is specified and
28027 // - RC is not "current direction".
28028 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
28029 if (IntrWithRoundingModeOpcode != 0) {
28030 SDValue Rnd = Op.getOperand(4);
28032 if (isRoundModeSAEToX(Rnd, RC))
28033 return getVectorMaskingNode(
28034 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
28035 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
28036 Mask, PassThru, Subtarget, DAG);
28037 if (!isRoundModeCurDirection(Rnd))
28040 return getVectorMaskingNode(
28041 DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
28044 case INTR_TYPE_1OP_MASK_SAE: {
28045 SDValue Src = Op.getOperand(1);
28046 SDValue PassThru = Op.getOperand(2);
28047 SDValue Mask = Op.getOperand(3);
28048 SDValue Rnd = Op.getOperand(4);
28051 if (isRoundModeCurDirection(Rnd))
28052 Opc = IntrData->Opc0;
28053 else if (isRoundModeSAE(Rnd))
28054 Opc = IntrData->Opc1;
28058 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
28061 case INTR_TYPE_SCALAR_MASK: {
28062 SDValue Src1 = Op.getOperand(1);
28063 SDValue Src2 = Op.getOperand(2);
28064 SDValue passThru = Op.getOperand(3);
28065 SDValue Mask = Op.getOperand(4);
28066 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
28067 // There are 2 kinds of intrinsics in this group:
28068 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
28069 // (2) With rounding mode and sae - 7 operands.
28070 bool HasRounding = IntrWithRoundingModeOpcode != 0;
28071 if (Op.getNumOperands() == (5U + HasRounding)) {
28073 SDValue Rnd = Op.getOperand(5);
28075 if (isRoundModeSAEToX(Rnd, RC))
28076 return getScalarMaskingNode(
28077 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
28078 DAG.getTargetConstant(RC, dl, MVT::i32)),
28079 Mask, passThru, Subtarget, DAG);
28080 if (!isRoundModeCurDirection(Rnd))
28083 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
28085 Mask, passThru, Subtarget, DAG);
28088 assert(Op.getNumOperands() == (6U + HasRounding) &&
28089 "Unexpected intrinsic form");
28090 SDValue RoundingMode = Op.getOperand(5);
28091 unsigned Opc = IntrData->Opc0;
28093 SDValue Sae = Op.getOperand(6);
28094 if (isRoundModeSAE(Sae))
28095 Opc = IntrWithRoundingModeOpcode;
28096 else if (!isRoundModeCurDirection(Sae))
28099 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
28100 Src2, RoundingMode),
28101 Mask, passThru, Subtarget, DAG);
28103 case INTR_TYPE_SCALAR_MASK_RND: {
28104 SDValue Src1 = Op.getOperand(1);
28105 SDValue Src2 = Op.getOperand(2);
28106 SDValue passThru = Op.getOperand(3);
28107 SDValue Mask = Op.getOperand(4);
28108 SDValue Rnd = Op.getOperand(5);
28112 if (isRoundModeCurDirection(Rnd))
28113 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
28114 else if (isRoundModeSAEToX(Rnd, RC))
28115 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
28116 DAG.getTargetConstant(RC, dl, MVT::i32));
28120 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
28122 case INTR_TYPE_SCALAR_MASK_SAE: {
28123 SDValue Src1 = Op.getOperand(1);
28124 SDValue Src2 = Op.getOperand(2);
28125 SDValue passThru = Op.getOperand(3);
28126 SDValue Mask = Op.getOperand(4);
28127 SDValue Sae = Op.getOperand(5);
28129 if (isRoundModeCurDirection(Sae))
28130 Opc = IntrData->Opc0;
28131 else if (isRoundModeSAE(Sae))
28132 Opc = IntrData->Opc1;
28136 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
28137 Mask, passThru, Subtarget, DAG);
28139 case INTR_TYPE_2OP_MASK: {
28140 SDValue Src1 = Op.getOperand(1);
28141 SDValue Src2 = Op.getOperand(2);
28142 SDValue PassThru = Op.getOperand(3);
28143 SDValue Mask = Op.getOperand(4);
28145 if (IntrData->Opc1 != 0) {
28146 SDValue Rnd = Op.getOperand(5);
28148 if (isRoundModeSAEToX(Rnd, RC))
28149 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
28150 DAG.getTargetConstant(RC, dl, MVT::i32));
28151 else if (!isRoundModeCurDirection(Rnd))
28155 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
28156 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
28158 case INTR_TYPE_2OP_MASK_SAE: {
28159 SDValue Src1 = Op.getOperand(1);
28160 SDValue Src2 = Op.getOperand(2);
28161 SDValue PassThru = Op.getOperand(3);
28162 SDValue Mask = Op.getOperand(4);
28164 unsigned Opc = IntrData->Opc0;
28165 if (IntrData->Opc1 != 0) {
28166 SDValue Sae = Op.getOperand(5);
28167 if (isRoundModeSAE(Sae))
28168 Opc = IntrData->Opc1;
28169 else if (!isRoundModeCurDirection(Sae))
28173 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
28174 Mask, PassThru, Subtarget, DAG);
28176 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
28177 SDValue Src1 = Op.getOperand(1);
28178 SDValue Src2 = Op.getOperand(2);
28179 SDValue Src3 = Op.getOperand(3);
28180 SDValue PassThru = Op.getOperand(4);
28181 SDValue Mask = Op.getOperand(5);
28182 SDValue Sae = Op.getOperand(6);
28184 if (isRoundModeCurDirection(Sae))
28185 Opc = IntrData->Opc0;
28186 else if (isRoundModeSAE(Sae))
28187 Opc = IntrData->Opc1;
28191 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
28192 Mask, PassThru, Subtarget, DAG);
28194 case INTR_TYPE_3OP_MASK_SAE: {
28195 SDValue Src1 = Op.getOperand(1);
28196 SDValue Src2 = Op.getOperand(2);
28197 SDValue Src3 = Op.getOperand(3);
28198 SDValue PassThru = Op.getOperand(4);
28199 SDValue Mask = Op.getOperand(5);
28201 unsigned Opc = IntrData->Opc0;
28202 if (IntrData->Opc1 != 0) {
28203 SDValue Sae = Op.getOperand(6);
28204 if (isRoundModeSAE(Sae))
28205 Opc = IntrData->Opc1;
28206 else if (!isRoundModeCurDirection(Sae))
28209 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
28210 Mask, PassThru, Subtarget, DAG);
28213 SDValue Src1 = Op.getOperand(1);
28214 SDValue Src2 = Op.getOperand(2);
28215 SDValue Src3 = Op.getOperand(3);
28217 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
28218 Src3 = DAG.getBitcast(MaskVT, Src3);
28220 // Reverse the operands to match VSELECT order.
28221 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
28224 SDValue Src1 = Op.getOperand(1);
28225 SDValue Src2 = Op.getOperand(2);
28227 // Swap Src1 and Src2 in the node creation
28228 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
28230 case CFMA_OP_MASKZ:
28231 case CFMA_OP_MASK: {
28232 SDValue Src1 = Op.getOperand(1);
28233 SDValue Src2 = Op.getOperand(2);
28234 SDValue Src3 = Op.getOperand(3);
28235 SDValue Mask = Op.getOperand(4);
28236 MVT VT = Op.getSimpleValueType();
28238 SDValue PassThru = Src3;
28239 if (IntrData->Type == CFMA_OP_MASKZ)
28240 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
28242 // We add rounding mode to the Node when
28243 // - RC Opcode is specified and
28244 // - RC is not "current direction".
28246 if (IntrData->Opc1 != 0) {
28247 SDValue Rnd = Op.getOperand(5);
28249 if (isRoundModeSAEToX(Rnd, RC))
28250 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
28251 DAG.getTargetConstant(RC, dl, MVT::i32));
28252 else if (!isRoundModeCurDirection(Rnd))
28256 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
28257 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
28260 // NOTE: We need to swizzle the operands to pass the multiply operands
28262 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
28263 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
28265 SDValue Src1 = Op.getOperand(1);
28266 SDValue Imm = Op.getOperand(2);
28267 SDValue Mask = Op.getOperand(3);
28268 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
28269 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
28271 // Need to fill with zeros to ensure the bitcast will produce zeroes
28272 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
28273 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
28274 DAG.getConstant(0, dl, MVT::v8i1),
28275 FPclassMask, DAG.getIntPtrConstant(0, dl));
28276 return DAG.getBitcast(MVT::i8, Ins);
28279 case CMP_MASK_CC: {
28280 MVT MaskVT = Op.getSimpleValueType();
28281 SDValue CC = Op.getOperand(3);
28282 SDValue Mask = Op.getOperand(4);
28283 // We specify 2 possible opcodes for intrinsics with rounding modes.
28284 // First, we check if the intrinsic may have non-default rounding mode,
28285 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
28286 if (IntrData->Opc1 != 0) {
28287 SDValue Sae = Op.getOperand(5);
28288 if (isRoundModeSAE(Sae))
28289 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
28290 Op.getOperand(2), CC, Mask, Sae);
28291 if (!isRoundModeCurDirection(Sae))
28294 //default rounding mode
28295 return DAG.getNode(IntrData->Opc0, dl, MaskVT,
28296 {Op.getOperand(1), Op.getOperand(2), CC, Mask});
28298 case CMP_MASK_SCALAR_CC: {
28299 SDValue Src1 = Op.getOperand(1);
28300 SDValue Src2 = Op.getOperand(2);
28301 SDValue CC = Op.getOperand(3);
28302 SDValue Mask = Op.getOperand(4);
28305 if (IntrData->Opc1 != 0) {
28306 SDValue Sae = Op.getOperand(5);
28307 if (isRoundModeSAE(Sae))
28308 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
28309 else if (!isRoundModeCurDirection(Sae))
28312 //default rounding mode
28313 if (!Cmp.getNode())
28314 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
28316 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
28318 // Need to fill with zeros to ensure the bitcast will produce zeroes
28319 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
28320 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
28321 DAG.getConstant(0, dl, MVT::v8i1),
28322 CmpMask, DAG.getIntPtrConstant(0, dl));
28323 return DAG.getBitcast(MVT::i8, Ins);
28325 case COMI: { // Comparison intrinsics
28326 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
28327 SDValue LHS = Op.getOperand(1);
28328 SDValue RHS = Op.getOperand(2);
28329 // Some conditions require the operands to be swapped.
28330 if (CC == ISD::SETLT || CC == ISD::SETLE)
28331 std::swap(LHS, RHS);
28333 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
28336 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
28337 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
28338 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
28339 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
28342 case ISD::SETNE: { // (ZF = 1 or PF = 1)
28343 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
28344 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
28345 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
28348 case ISD::SETGT: // (CF = 0 and ZF = 0)
28349 case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
28350 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
28353 case ISD::SETGE: // CF = 0
28354 case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
28355 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
28358 llvm_unreachable("Unexpected illegal condition!");
28360 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
28362 case COMI_RM: { // Comparison intrinsics with Sae
28363 SDValue LHS = Op.getOperand(1);
28364 SDValue RHS = Op.getOperand(2);
28365 unsigned CondVal = Op.getConstantOperandVal(3);
28366 SDValue Sae = Op.getOperand(4);
28369 if (isRoundModeCurDirection(Sae))
28370 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
28371 DAG.getTargetConstant(CondVal, dl, MVT::i8));
28372 else if (isRoundModeSAE(Sae))
28373 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
28374 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
28377 // Need to fill with zeros to ensure the bitcast will produce zeroes
28378 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
28379 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
28380 DAG.getConstant(0, dl, MVT::v16i1),
28381 FCmp, DAG.getIntPtrConstant(0, dl));
28382 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
28383 DAG.getBitcast(MVT::i16, Ins));
28386 SDValue SrcOp = Op.getOperand(1);
28387 SDValue ShAmt = Op.getOperand(2);
28388 assert(ShAmt.getValueType() == MVT::i32 &&
28389 "Unexpected VSHIFT amount type");
28391 // Catch shift-by-constant.
28392 if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
28393 return getTargetVShiftByConstNode(IntrData->Opc0, dl,
28394 Op.getSimpleValueType(), SrcOp,
28395 CShAmt->getZExtValue(), DAG);
28397 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
28398 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
28399 SrcOp, ShAmt, 0, Subtarget, DAG);
28401 case COMPRESS_EXPAND_IN_REG: {
28402 SDValue Mask = Op.getOperand(3);
28403 SDValue DataToCompress = Op.getOperand(1);
28404 SDValue PassThru = Op.getOperand(2);
28405 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
28406 return Op.getOperand(1);
28408 // Avoid false dependency.
28409 if (PassThru.isUndef())
28410 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
28412 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
28416 case FIXUPIMM_MASKZ: {
28417 SDValue Src1 = Op.getOperand(1);
28418 SDValue Src2 = Op.getOperand(2);
28419 SDValue Src3 = Op.getOperand(3);
28420 SDValue Imm = Op.getOperand(4);
28421 SDValue Mask = Op.getOperand(5);
28422 SDValue Passthru = (IntrData->Type == FIXUPIMM)
28424 : getZeroVector(VT, Subtarget, DAG, dl);
28426 unsigned Opc = IntrData->Opc0;
28427 if (IntrData->Opc1 != 0) {
28428 SDValue Sae = Op.getOperand(6);
28429 if (isRoundModeSAE(Sae))
28430 Opc = IntrData->Opc1;
28431 else if (!isRoundModeCurDirection(Sae))
28435 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
28437 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
28438 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
28440 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
28443 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
28444 // Clear the upper bits of the rounding immediate so that the legacy
28445 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
28446 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
28447 SDValue RoundingMode =
28448 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
28449 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
28450 Op.getOperand(1), RoundingMode);
28453 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
28454 // Clear the upper bits of the rounding immediate so that the legacy
28455 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
28456 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
28457 SDValue RoundingMode =
28458 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
28459 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
28460 Op.getOperand(1), Op.getOperand(2), RoundingMode);
28463 assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
28465 uint64_t Imm = Op.getConstantOperandVal(2);
28466 SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
28467 Op.getValueType());
28468 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
28469 Op.getOperand(1), Control);
28473 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
28474 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
28477 // If the carry in is zero, then we should just use ADD/SUB instead of
28479 if (isNullConstant(Op.getOperand(1))) {
28480 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
28483 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
28484 DAG.getConstant(-1, dl, MVT::i8));
28485 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
28486 Op.getOperand(3), GenCF.getValue(1));
28488 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
28489 SDValue Results[] = { SetCC, Res };
28490 return DAG.getMergeValues(Results, dl);
28492 case CVTPD2PS_MASK:
28493 case CVTPD2DQ_MASK:
28494 case CVTQQ2PS_MASK:
28495 case TRUNCATE_TO_REG: {
28496 SDValue Src = Op.getOperand(1);
28497 SDValue PassThru = Op.getOperand(2);
28498 SDValue Mask = Op.getOperand(3);
28500 if (isAllOnesConstant(Mask))
28501 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
28503 MVT SrcVT = Src.getSimpleValueType();
28504 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
28505 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28506 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
28507 {Src, PassThru, Mask});
28509 case CVTPS2PH_MASK: {
28510 SDValue Src = Op.getOperand(1);
28511 SDValue Rnd = Op.getOperand(2);
28512 SDValue PassThru = Op.getOperand(3);
28513 SDValue Mask = Op.getOperand(4);
28516 unsigned Opc = IntrData->Opc0;
28517 bool SAE = Src.getValueType().is512BitVector() &&
28518 (isRoundModeSAEToX(Rnd, RC) || isRoundModeSAE(Rnd));
28520 Opc = X86ISD::CVTPS2PH_SAE;
28521 Rnd = DAG.getTargetConstant(RC, dl, MVT::i32);
28524 if (isAllOnesConstant(Mask))
28525 return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd);
28528 Opc = X86ISD::MCVTPS2PH_SAE;
28530 Opc = IntrData->Opc1;
28531 MVT SrcVT = Src.getSimpleValueType();
28532 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
28533 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28534 return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd, PassThru, Mask);
28536 case CVTNEPS2BF16_MASK: {
28537 SDValue Src = Op.getOperand(1);
28538 SDValue PassThru = Op.getOperand(2);
28539 SDValue Mask = Op.getOperand(3);
28541 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
28542 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
28544 // Break false dependency.
28545 if (PassThru.isUndef())
28546 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
28548 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
28557 default: return SDValue(); // Don't custom lower most intrinsics.
28559 // ptest and testp intrinsics. The intrinsic these come from are designed to
28560 // return an integer value, not just an instruction so lower it to the ptest
28561 // or testp pattern and a setcc for the result.
28562 case Intrinsic::x86_avx512_ktestc_b:
28563 case Intrinsic::x86_avx512_ktestc_w:
28564 case Intrinsic::x86_avx512_ktestc_d:
28565 case Intrinsic::x86_avx512_ktestc_q:
28566 case Intrinsic::x86_avx512_ktestz_b:
28567 case Intrinsic::x86_avx512_ktestz_w:
28568 case Intrinsic::x86_avx512_ktestz_d:
28569 case Intrinsic::x86_avx512_ktestz_q:
28570 case Intrinsic::x86_sse41_ptestz:
28571 case Intrinsic::x86_sse41_ptestc:
28572 case Intrinsic::x86_sse41_ptestnzc:
28573 case Intrinsic::x86_avx_ptestz_256:
28574 case Intrinsic::x86_avx_ptestc_256:
28575 case Intrinsic::x86_avx_ptestnzc_256:
28576 case Intrinsic::x86_avx_vtestz_ps:
28577 case Intrinsic::x86_avx_vtestc_ps:
28578 case Intrinsic::x86_avx_vtestnzc_ps:
28579 case Intrinsic::x86_avx_vtestz_pd:
28580 case Intrinsic::x86_avx_vtestc_pd:
28581 case Intrinsic::x86_avx_vtestnzc_pd:
28582 case Intrinsic::x86_avx_vtestz_ps_256:
28583 case Intrinsic::x86_avx_vtestc_ps_256:
28584 case Intrinsic::x86_avx_vtestnzc_ps_256:
28585 case Intrinsic::x86_avx_vtestz_pd_256:
28586 case Intrinsic::x86_avx_vtestc_pd_256:
28587 case Intrinsic::x86_avx_vtestnzc_pd_256: {
28588 unsigned TestOpc = X86ISD::PTEST;
28589 X86::CondCode X86CC;
28591 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
28592 case Intrinsic::x86_avx512_ktestc_b:
28593 case Intrinsic::x86_avx512_ktestc_w:
28594 case Intrinsic::x86_avx512_ktestc_d:
28595 case Intrinsic::x86_avx512_ktestc_q:
28597 TestOpc = X86ISD::KTEST;
28598 X86CC = X86::COND_B;
28600 case Intrinsic::x86_avx512_ktestz_b:
28601 case Intrinsic::x86_avx512_ktestz_w:
28602 case Intrinsic::x86_avx512_ktestz_d:
28603 case Intrinsic::x86_avx512_ktestz_q:
28604 TestOpc = X86ISD::KTEST;
28605 X86CC = X86::COND_E;
28607 case Intrinsic::x86_avx_vtestz_ps:
28608 case Intrinsic::x86_avx_vtestz_pd:
28609 case Intrinsic::x86_avx_vtestz_ps_256:
28610 case Intrinsic::x86_avx_vtestz_pd_256:
28611 TestOpc = X86ISD::TESTP;
28613 case Intrinsic::x86_sse41_ptestz:
28614 case Intrinsic::x86_avx_ptestz_256:
28616 X86CC = X86::COND_E;
28618 case Intrinsic::x86_avx_vtestc_ps:
28619 case Intrinsic::x86_avx_vtestc_pd:
28620 case Intrinsic::x86_avx_vtestc_ps_256:
28621 case Intrinsic::x86_avx_vtestc_pd_256:
28622 TestOpc = X86ISD::TESTP;
28624 case Intrinsic::x86_sse41_ptestc:
28625 case Intrinsic::x86_avx_ptestc_256:
28627 X86CC = X86::COND_B;
28629 case Intrinsic::x86_avx_vtestnzc_ps:
28630 case Intrinsic::x86_avx_vtestnzc_pd:
28631 case Intrinsic::x86_avx_vtestnzc_ps_256:
28632 case Intrinsic::x86_avx_vtestnzc_pd_256:
28633 TestOpc = X86ISD::TESTP;
28635 case Intrinsic::x86_sse41_ptestnzc:
28636 case Intrinsic::x86_avx_ptestnzc_256:
28638 X86CC = X86::COND_A;
28642 SDValue LHS = Op.getOperand(1);
28643 SDValue RHS = Op.getOperand(2);
28644 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
28645 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
28646 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
28649 case Intrinsic::x86_sse42_pcmpistria128:
28650 case Intrinsic::x86_sse42_pcmpestria128:
28651 case Intrinsic::x86_sse42_pcmpistric128:
28652 case Intrinsic::x86_sse42_pcmpestric128:
28653 case Intrinsic::x86_sse42_pcmpistrio128:
28654 case Intrinsic::x86_sse42_pcmpestrio128:
28655 case Intrinsic::x86_sse42_pcmpistris128:
28656 case Intrinsic::x86_sse42_pcmpestris128:
28657 case Intrinsic::x86_sse42_pcmpistriz128:
28658 case Intrinsic::x86_sse42_pcmpestriz128: {
28660 X86::CondCode X86CC;
28662 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
28663 case Intrinsic::x86_sse42_pcmpistria128:
28664 Opcode = X86ISD::PCMPISTR;
28665 X86CC = X86::COND_A;
28667 case Intrinsic::x86_sse42_pcmpestria128:
28668 Opcode = X86ISD::PCMPESTR;
28669 X86CC = X86::COND_A;
28671 case Intrinsic::x86_sse42_pcmpistric128:
28672 Opcode = X86ISD::PCMPISTR;
28673 X86CC = X86::COND_B;
28675 case Intrinsic::x86_sse42_pcmpestric128:
28676 Opcode = X86ISD::PCMPESTR;
28677 X86CC = X86::COND_B;
28679 case Intrinsic::x86_sse42_pcmpistrio128:
28680 Opcode = X86ISD::PCMPISTR;
28681 X86CC = X86::COND_O;
28683 case Intrinsic::x86_sse42_pcmpestrio128:
28684 Opcode = X86ISD::PCMPESTR;
28685 X86CC = X86::COND_O;
28687 case Intrinsic::x86_sse42_pcmpistris128:
28688 Opcode = X86ISD::PCMPISTR;
28689 X86CC = X86::COND_S;
28691 case Intrinsic::x86_sse42_pcmpestris128:
28692 Opcode = X86ISD::PCMPESTR;
28693 X86CC = X86::COND_S;
28695 case Intrinsic::x86_sse42_pcmpistriz128:
28696 Opcode = X86ISD::PCMPISTR;
28697 X86CC = X86::COND_E;
28699 case Intrinsic::x86_sse42_pcmpestriz128:
28700 Opcode = X86ISD::PCMPESTR;
28701 X86CC = X86::COND_E;
28704 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
28705 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
28706 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
28707 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
28708 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
28711 case Intrinsic::x86_sse42_pcmpistri128:
28712 case Intrinsic::x86_sse42_pcmpestri128: {
28714 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
28715 Opcode = X86ISD::PCMPISTR;
28717 Opcode = X86ISD::PCMPESTR;
28719 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
28720 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
28721 return DAG.getNode(Opcode, dl, VTs, NewOps);
28724 case Intrinsic::x86_sse42_pcmpistrm128:
28725 case Intrinsic::x86_sse42_pcmpestrm128: {
28727 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
28728 Opcode = X86ISD::PCMPISTR;
28730 Opcode = X86ISD::PCMPESTR;
28732 SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
28733 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
28734 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
28737 case Intrinsic::eh_sjlj_lsda: {
28738 MachineFunction &MF = DAG.getMachineFunction();
28739 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28740 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
28741 auto &Context = MF.getMMI().getContext();
28742 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
28743 Twine(MF.getFunctionNumber()));
28744 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
28745 DAG.getMCSymbol(S, PtrVT));
28748 case Intrinsic::x86_seh_lsda: {
28749 // Compute the symbol for the LSDA. We know it'll get emitted later.
28750 MachineFunction &MF = DAG.getMachineFunction();
28751 SDValue Op1 = Op.getOperand(1);
28752 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
28753 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
28754 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
28756 // Generate a simple absolute symbol reference. This intrinsic is only
28757 // supported on 32-bit Windows, which isn't PIC.
28758 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
28759 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
28762 case Intrinsic::eh_recoverfp: {
28763 SDValue FnOp = Op.getOperand(1);
28764 SDValue IncomingFPOp = Op.getOperand(2);
28765 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
28766 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
28768 report_fatal_error(
28769 "llvm.eh.recoverfp must take a function as the first argument");
28770 return recoverFramePointer(DAG, Fn, IncomingFPOp);
28773 case Intrinsic::localaddress: {
28774 // Returns one of the stack, base, or frame pointer registers, depending on
28775 // which is used to reference local variables.
28776 MachineFunction &MF = DAG.getMachineFunction();
28777 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28779 if (RegInfo->hasBasePointer(MF))
28780 Reg = RegInfo->getBaseRegister();
28781 else { // Handles the SP or FP case.
28782 bool CantUseFP = RegInfo->hasStackRealignment(MF);
28784 Reg = RegInfo->getPtrSizedStackRegister(MF);
28786 Reg = RegInfo->getPtrSizedFrameRegister(MF);
28788 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
28790 case Intrinsic::x86_avx512_vp2intersect_q_512:
28791 case Intrinsic::x86_avx512_vp2intersect_q_256:
28792 case Intrinsic::x86_avx512_vp2intersect_q_128:
28793 case Intrinsic::x86_avx512_vp2intersect_d_512:
28794 case Intrinsic::x86_avx512_vp2intersect_d_256:
28795 case Intrinsic::x86_avx512_vp2intersect_d_128: {
28796 MVT MaskVT = Op.getSimpleValueType();
28798 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
28801 SDValue Operation =
28802 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
28803 Op->getOperand(1), Op->getOperand(2));
28805 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
28806 MaskVT, Operation);
28807 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
28808 MaskVT, Operation);
28809 return DAG.getMergeValues({Result0, Result1}, DL);
28811 case Intrinsic::x86_mmx_pslli_w:
28812 case Intrinsic::x86_mmx_pslli_d:
28813 case Intrinsic::x86_mmx_pslli_q:
28814 case Intrinsic::x86_mmx_psrli_w:
28815 case Intrinsic::x86_mmx_psrli_d:
28816 case Intrinsic::x86_mmx_psrli_q:
28817 case Intrinsic::x86_mmx_psrai_w:
28818 case Intrinsic::x86_mmx_psrai_d: {
28820 SDValue ShAmt = Op.getOperand(2);
28821 // If the argument is a constant, convert it to a target constant.
28822 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
28823 // Clamp out of bounds shift amounts since they will otherwise be masked
28824 // to 8-bits which may make it no longer out of bounds.
28825 unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
28826 if (ShiftAmount == 0)
28827 return Op.getOperand(1);
28829 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
28830 Op.getOperand(0), Op.getOperand(1),
28831 DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
28834 unsigned NewIntrinsic;
28836 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
28837 case Intrinsic::x86_mmx_pslli_w:
28838 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
28840 case Intrinsic::x86_mmx_pslli_d:
28841 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
28843 case Intrinsic::x86_mmx_pslli_q:
28844 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
28846 case Intrinsic::x86_mmx_psrli_w:
28847 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
28849 case Intrinsic::x86_mmx_psrli_d:
28850 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
28852 case Intrinsic::x86_mmx_psrli_q:
28853 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
28855 case Intrinsic::x86_mmx_psrai_w:
28856 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
28858 case Intrinsic::x86_mmx_psrai_d:
28859 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
28863 // The vector shift intrinsics with scalars uses 32b shift amounts but
28864 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
28866 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
28867 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
28868 DAG.getTargetConstant(NewIntrinsic, DL,
28869 getPointerTy(DAG.getDataLayout())),
28870 Op.getOperand(1), ShAmt);
28872 case Intrinsic::thread_pointer: {
28873 if (Subtarget.isTargetELF()) {
28875 EVT PtrVT = getPointerTy(DAG.getDataLayout());
28876 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
28877 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(
28878 *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
28879 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
28880 DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
28882 report_fatal_error(
28883 "Target OS doesn't support __builtin_thread_pointer() yet.");
28888 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28889 SDValue Src, SDValue Mask, SDValue Base,
28890 SDValue Index, SDValue ScaleOp, SDValue Chain,
28891 const X86Subtarget &Subtarget) {
28893 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28894 // Scale must be constant.
28897 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28898 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28899 TLI.getPointerTy(DAG.getDataLayout()));
28900 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
28901 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
28902 // If source is undef or we know it won't be used, use a zero vector
28903 // to break register dependency.
28904 // TODO: use undef instead and let BreakFalseDeps deal with it?
28905 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
28906 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
28908 // Cast mask to an integer type.
28909 Mask = DAG.getBitcast(MaskVT, Mask);
28911 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28913 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
28915 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
28916 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28917 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
28920 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
28921 SDValue Src, SDValue Mask, SDValue Base,
28922 SDValue Index, SDValue ScaleOp, SDValue Chain,
28923 const X86Subtarget &Subtarget) {
28924 MVT VT = Op.getSimpleValueType();
28926 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28927 // Scale must be constant.
28930 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28931 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28932 TLI.getPointerTy(DAG.getDataLayout()));
28933 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
28934 VT.getVectorNumElements());
28935 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
28937 // We support two versions of the gather intrinsics. One with scalar mask and
28938 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
28939 if (Mask.getValueType() != MaskVT)
28940 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28942 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
28943 // If source is undef or we know it won't be used, use a zero vector
28944 // to break register dependency.
28945 // TODO: use undef instead and let BreakFalseDeps deal with it?
28946 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
28947 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
28949 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28951 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
28953 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
28954 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28955 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
28958 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28959 SDValue Src, SDValue Mask, SDValue Base,
28960 SDValue Index, SDValue ScaleOp, SDValue Chain,
28961 const X86Subtarget &Subtarget) {
28963 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28964 // Scale must be constant.
28967 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28968 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28969 TLI.getPointerTy(DAG.getDataLayout()));
28970 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
28971 Src.getSimpleValueType().getVectorNumElements());
28972 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
28974 // We support two versions of the scatter intrinsics. One with scalar mask and
28975 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
28976 if (Mask.getValueType() != MaskVT)
28977 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28979 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28981 SDVTList VTs = DAG.getVTList(MVT::Other);
28982 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
28984 DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28985 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28989 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28990 SDValue Mask, SDValue Base, SDValue Index,
28991 SDValue ScaleOp, SDValue Chain,
28992 const X86Subtarget &Subtarget) {
28994 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28995 // Scale must be constant.
28998 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28999 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
29000 TLI.getPointerTy(DAG.getDataLayout()));
29001 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
29002 SDValue Segment = DAG.getRegister(0, MVT::i32);
29004 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
29005 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
29006 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
29007 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
29008 return SDValue(Res, 0);
29011 /// Handles the lowering of builtin intrinsics with chain that return their
29012 /// value into registers EDX:EAX.
29013 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
29014 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
29016 /// Returns a Glue value which can be used to add extra copy-from-reg if the
29017 /// expanded intrinsics implicitly defines extra registers (i.e. not just
29019 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
29021 unsigned TargetOpcode,
29023 const X86Subtarget &Subtarget,
29024 SmallVectorImpl<SDValue> &Results) {
29025 SDValue Chain = N->getOperand(0);
29029 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
29030 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
29031 Glue = Chain.getValue(1);
29034 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
29035 SDValue N1Ops[] = {Chain, Glue};
29036 SDNode *N1 = DAG.getMachineNode(
29037 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
29038 Chain = SDValue(N1, 0);
29040 // Reads the content of XCR and returns it in registers EDX:EAX.
29042 if (Subtarget.is64Bit()) {
29043 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
29044 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
29047 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
29048 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
29051 Chain = HI.getValue(1);
29052 Glue = HI.getValue(2);
29054 if (Subtarget.is64Bit()) {
29055 // Merge the two 32-bit values into a 64-bit one.
29056 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
29057 DAG.getConstant(32, DL, MVT::i8));
29058 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
29059 Results.push_back(Chain);
29063 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
29064 SDValue Ops[] = { LO, HI };
29065 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
29066 Results.push_back(Pair);
29067 Results.push_back(Chain);
29071 /// Handles the lowering of builtin intrinsics that read the time stamp counter
29072 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
29073 /// READCYCLECOUNTER nodes.
29074 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
29076 const X86Subtarget &Subtarget,
29077 SmallVectorImpl<SDValue> &Results) {
29078 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
29079 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
29080 // and the EAX register is loaded with the low-order 32 bits.
29081 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
29082 /* NoRegister */0, Subtarget,
29084 if (Opcode != X86::RDTSCP)
29087 SDValue Chain = Results[1];
29088 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
29089 // the ECX register. Add 'ecx' explicitly to the chain.
29090 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
29092 Results.push_back(ecx.getValue(1));
29095 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
29096 SelectionDAG &DAG) {
29097 SmallVector<SDValue, 3> Results;
29099 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
29101 return DAG.getMergeValues(Results, DL);
29104 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
29105 MachineFunction &MF = DAG.getMachineFunction();
29106 SDValue Chain = Op.getOperand(0);
29107 SDValue RegNode = Op.getOperand(2);
29108 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
29110 report_fatal_error("EH registrations only live in functions using WinEH");
29112 // Cast the operand to an alloca, and remember the frame index.
29113 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
29115 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
29116 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
29118 // Return the chain operand without making any DAG nodes.
29122 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
29123 MachineFunction &MF = DAG.getMachineFunction();
29124 SDValue Chain = Op.getOperand(0);
29125 SDValue EHGuard = Op.getOperand(2);
29126 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
29128 report_fatal_error("EHGuard only live in functions using WinEH");
29130 // Cast the operand to an alloca, and remember the frame index.
29131 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
29133 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
29134 EHInfo->EHGuardFrameIndex = FINode->getIndex();
29136 // Return the chain operand without making any DAG nodes.
29140 /// Emit Truncating Store with signed or unsigned saturation.
29142 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &DL, SDValue Val,
29143 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
29144 SelectionDAG &DAG) {
29145 SDVTList VTs = DAG.getVTList(MVT::Other);
29146 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
29147 SDValue Ops[] = { Chain, Val, Ptr, Undef };
29148 unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
29149 return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
29152 /// Emit Masked Truncating Store with signed or unsigned saturation.
29153 static SDValue EmitMaskedTruncSStore(bool SignedSat, SDValue Chain,
29155 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
29156 MachineMemOperand *MMO, SelectionDAG &DAG) {
29157 SDVTList VTs = DAG.getVTList(MVT::Other);
29158 SDValue Ops[] = { Chain, Val, Ptr, Mask };
29159 unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
29160 return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
29163 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
29164 SelectionDAG &DAG) {
29165 unsigned IntNo = Op.getConstantOperandVal(1);
29166 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
29170 case Intrinsic::swift_async_context_addr: {
29172 auto &MF = DAG.getMachineFunction();
29173 auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
29174 if (Subtarget.is64Bit()) {
29175 MF.getFrameInfo().setFrameAddressIsTaken(true);
29176 X86FI->setHasSwiftAsyncContext(true);
29177 SDValue Chain = Op->getOperand(0);
29178 SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
29180 SDValue(DAG.getMachineNode(X86::SUB64ri32, dl, MVT::i64, CopyRBP,
29181 DAG.getTargetConstant(8, dl, MVT::i32)),
29183 // Return { result, chain }.
29184 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
29185 CopyRBP.getValue(1));
29187 // 32-bit so no special extended frame, create or reuse an existing
29189 if (!X86FI->getSwiftAsyncContextFrameIdx())
29190 X86FI->setSwiftAsyncContextFrameIdx(
29191 MF.getFrameInfo().CreateStackObject(4, Align(4), false));
29193 DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
29194 // Return { result, chain }.
29195 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
29196 Op->getOperand(0));
29200 case llvm::Intrinsic::x86_seh_ehregnode:
29201 return MarkEHRegistrationNode(Op, DAG);
29202 case llvm::Intrinsic::x86_seh_ehguard:
29203 return MarkEHGuard(Op, DAG);
29204 case llvm::Intrinsic::x86_rdpkru: {
29206 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
29207 // Create a RDPKRU node and pass 0 to the ECX parameter.
29208 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
29209 DAG.getConstant(0, dl, MVT::i32));
29211 case llvm::Intrinsic::x86_wrpkru: {
29213 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
29214 // to the EDX and ECX parameters.
29215 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
29216 Op.getOperand(0), Op.getOperand(2),
29217 DAG.getConstant(0, dl, MVT::i32),
29218 DAG.getConstant(0, dl, MVT::i32));
29220 case llvm::Intrinsic::asan_check_memaccess: {
29221 // Mark this as adjustsStack because it will be lowered to a call.
29222 DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
29223 // Don't do anything here, we will expand these intrinsics out later.
29226 case llvm::Intrinsic::x86_flags_read_u32:
29227 case llvm::Intrinsic::x86_flags_read_u64:
29228 case llvm::Intrinsic::x86_flags_write_u32:
29229 case llvm::Intrinsic::x86_flags_write_u64: {
29230 // We need a frame pointer because this will get lowered to a PUSH/POP
29232 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
29233 MFI.setHasCopyImplyingStackAdjustment(true);
29234 // Don't do anything here, we will expand these intrinsics out later
29235 // during FinalizeISel in EmitInstrWithCustomInserter.
29238 case Intrinsic::x86_lwpins32:
29239 case Intrinsic::x86_lwpins64:
29240 case Intrinsic::x86_umwait:
29241 case Intrinsic::x86_tpause: {
29243 SDValue Chain = Op->getOperand(0);
29244 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
29248 default: llvm_unreachable("Impossible intrinsic");
29249 case Intrinsic::x86_umwait:
29250 Opcode = X86ISD::UMWAIT;
29252 case Intrinsic::x86_tpause:
29253 Opcode = X86ISD::TPAUSE;
29255 case Intrinsic::x86_lwpins32:
29256 case Intrinsic::x86_lwpins64:
29257 Opcode = X86ISD::LWPINS;
29261 SDValue Operation =
29262 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
29263 Op->getOperand(3), Op->getOperand(4));
29264 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
29265 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
29266 Operation.getValue(1));
29268 case Intrinsic::x86_enqcmd:
29269 case Intrinsic::x86_enqcmds: {
29271 SDValue Chain = Op.getOperand(0);
29272 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
29275 default: llvm_unreachable("Impossible intrinsic!");
29276 case Intrinsic::x86_enqcmd:
29277 Opcode = X86ISD::ENQCMD;
29279 case Intrinsic::x86_enqcmds:
29280 Opcode = X86ISD::ENQCMDS;
29283 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
29285 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
29286 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
29287 Operation.getValue(1));
29289 case Intrinsic::x86_aesenc128kl:
29290 case Intrinsic::x86_aesdec128kl:
29291 case Intrinsic::x86_aesenc256kl:
29292 case Intrinsic::x86_aesdec256kl: {
29294 SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
29295 SDValue Chain = Op.getOperand(0);
29299 default: llvm_unreachable("Impossible intrinsic");
29300 case Intrinsic::x86_aesenc128kl:
29301 Opcode = X86ISD::AESENC128KL;
29303 case Intrinsic::x86_aesdec128kl:
29304 Opcode = X86ISD::AESDEC128KL;
29306 case Intrinsic::x86_aesenc256kl:
29307 Opcode = X86ISD::AESENC256KL;
29309 case Intrinsic::x86_aesdec256kl:
29310 Opcode = X86ISD::AESDEC256KL;
29314 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
29315 MachineMemOperand *MMO = MemIntr->getMemOperand();
29316 EVT MemVT = MemIntr->getMemoryVT();
29317 SDValue Operation = DAG.getMemIntrinsicNode(
29318 Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
29320 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
29322 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
29323 {ZF, Operation.getValue(0), Operation.getValue(2)});
29325 case Intrinsic::x86_aesencwide128kl:
29326 case Intrinsic::x86_aesdecwide128kl:
29327 case Intrinsic::x86_aesencwide256kl:
29328 case Intrinsic::x86_aesdecwide256kl: {
29330 SDVTList VTs = DAG.getVTList(
29331 {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
29332 MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
29333 SDValue Chain = Op.getOperand(0);
29337 default: llvm_unreachable("Impossible intrinsic");
29338 case Intrinsic::x86_aesencwide128kl:
29339 Opcode = X86ISD::AESENCWIDE128KL;
29341 case Intrinsic::x86_aesdecwide128kl:
29342 Opcode = X86ISD::AESDECWIDE128KL;
29344 case Intrinsic::x86_aesencwide256kl:
29345 Opcode = X86ISD::AESENCWIDE256KL;
29347 case Intrinsic::x86_aesdecwide256kl:
29348 Opcode = X86ISD::AESDECWIDE256KL;
29352 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
29353 MachineMemOperand *MMO = MemIntr->getMemOperand();
29354 EVT MemVT = MemIntr->getMemoryVT();
29355 SDValue Operation = DAG.getMemIntrinsicNode(
29357 {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
29358 Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
29359 Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
29361 SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
29363 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
29364 {ZF, Operation.getValue(1), Operation.getValue(2),
29365 Operation.getValue(3), Operation.getValue(4),
29366 Operation.getValue(5), Operation.getValue(6),
29367 Operation.getValue(7), Operation.getValue(8),
29368 Operation.getValue(9)});
29370 case Intrinsic::x86_testui: {
29372 SDValue Chain = Op.getOperand(0);
29373 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
29374 SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
29375 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
29376 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
29377 Operation.getValue(1));
29379 case Intrinsic::x86_atomic_bts_rm:
29380 case Intrinsic::x86_atomic_btc_rm:
29381 case Intrinsic::x86_atomic_btr_rm: {
29383 MVT VT = Op.getSimpleValueType();
29384 SDValue Chain = Op.getOperand(0);
29385 SDValue Op1 = Op.getOperand(2);
29386 SDValue Op2 = Op.getOperand(3);
29387 unsigned Opc = IntNo == Intrinsic::x86_atomic_bts_rm ? X86ISD::LBTS_RM
29388 : IntNo == Intrinsic::x86_atomic_btc_rm ? X86ISD::LBTC_RM
29390 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
29392 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
29393 {Chain, Op1, Op2}, VT, MMO);
29394 Chain = Res.getValue(1);
29395 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
29396 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
29398 case Intrinsic::x86_atomic_bts:
29399 case Intrinsic::x86_atomic_btc:
29400 case Intrinsic::x86_atomic_btr: {
29402 MVT VT = Op.getSimpleValueType();
29403 SDValue Chain = Op.getOperand(0);
29404 SDValue Op1 = Op.getOperand(2);
29405 SDValue Op2 = Op.getOperand(3);
29406 unsigned Opc = IntNo == Intrinsic::x86_atomic_bts ? X86ISD::LBTS
29407 : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
29409 SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
29410 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
29412 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
29413 {Chain, Op1, Op2, Size}, VT, MMO);
29414 Chain = Res.getValue(1);
29415 Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
29416 unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
29418 Res = DAG.getNode(ISD::SHL, DL, VT, Res,
29419 DAG.getShiftAmountConstant(Imm, VT, DL));
29420 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
29422 case Intrinsic::x86_cmpccxadd32:
29423 case Intrinsic::x86_cmpccxadd64: {
29425 SDValue Chain = Op.getOperand(0);
29426 SDValue Addr = Op.getOperand(2);
29427 SDValue Src1 = Op.getOperand(3);
29428 SDValue Src2 = Op.getOperand(4);
29429 SDValue CC = Op.getOperand(5);
29430 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
29431 SDValue Operation = DAG.getMemIntrinsicNode(
29432 X86ISD::CMPCCXADD, DL, Op->getVTList(), {Chain, Addr, Src1, Src2, CC},
29436 case Intrinsic::x86_aadd32:
29437 case Intrinsic::x86_aadd64:
29438 case Intrinsic::x86_aand32:
29439 case Intrinsic::x86_aand64:
29440 case Intrinsic::x86_aor32:
29441 case Intrinsic::x86_aor64:
29442 case Intrinsic::x86_axor32:
29443 case Intrinsic::x86_axor64: {
29445 SDValue Chain = Op.getOperand(0);
29446 SDValue Op1 = Op.getOperand(2);
29447 SDValue Op2 = Op.getOperand(3);
29448 MVT VT = Op2.getSimpleValueType();
29452 llvm_unreachable("Unknown Intrinsic");
29453 case Intrinsic::x86_aadd32:
29454 case Intrinsic::x86_aadd64:
29455 Opc = X86ISD::AADD;
29457 case Intrinsic::x86_aand32:
29458 case Intrinsic::x86_aand64:
29459 Opc = X86ISD::AAND;
29461 case Intrinsic::x86_aor32:
29462 case Intrinsic::x86_aor64:
29465 case Intrinsic::x86_axor32:
29466 case Intrinsic::x86_axor64:
29467 Opc = X86ISD::AXOR;
29470 MachineMemOperand *MMO = cast<MemSDNode>(Op)->getMemOperand();
29471 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(),
29472 {Chain, Op1, Op2}, VT, MMO);
29474 case Intrinsic::x86_atomic_add_cc:
29475 case Intrinsic::x86_atomic_sub_cc:
29476 case Intrinsic::x86_atomic_or_cc:
29477 case Intrinsic::x86_atomic_and_cc:
29478 case Intrinsic::x86_atomic_xor_cc: {
29480 SDValue Chain = Op.getOperand(0);
29481 SDValue Op1 = Op.getOperand(2);
29482 SDValue Op2 = Op.getOperand(3);
29483 X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
29484 MVT VT = Op2.getSimpleValueType();
29488 llvm_unreachable("Unknown Intrinsic");
29489 case Intrinsic::x86_atomic_add_cc:
29490 Opc = X86ISD::LADD;
29492 case Intrinsic::x86_atomic_sub_cc:
29493 Opc = X86ISD::LSUB;
29495 case Intrinsic::x86_atomic_or_cc:
29498 case Intrinsic::x86_atomic_and_cc:
29499 Opc = X86ISD::LAND;
29501 case Intrinsic::x86_atomic_xor_cc:
29502 Opc = X86ISD::LXOR;
29505 MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
29506 SDValue LockArith =
29507 DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
29508 {Chain, Op1, Op2}, VT, MMO);
29509 Chain = LockArith.getValue(1);
29510 return DAG.getMergeValues({getSETCC(CC, LockArith, DL, DAG), Chain}, DL);
29517 switch(IntrData->Type) {
29518 default: llvm_unreachable("Unknown Intrinsic Type");
29521 // Emit the node with the right value type.
29522 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
29523 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
29525 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
29526 // Otherwise return the value from Rand, which is always 0, casted to i32.
29527 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
29528 DAG.getConstant(1, dl, Op->getValueType(1)),
29529 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
29530 SDValue(Result.getNode(), 1)};
29531 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
29533 // Return { result, isValid, chain }.
29534 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
29535 SDValue(Result.getNode(), 2));
29537 case GATHER_AVX2: {
29538 SDValue Chain = Op.getOperand(0);
29539 SDValue Src = Op.getOperand(2);
29540 SDValue Base = Op.getOperand(3);
29541 SDValue Index = Op.getOperand(4);
29542 SDValue Mask = Op.getOperand(5);
29543 SDValue Scale = Op.getOperand(6);
29544 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
29545 Scale, Chain, Subtarget);
29548 //gather(v1, mask, index, base, scale);
29549 SDValue Chain = Op.getOperand(0);
29550 SDValue Src = Op.getOperand(2);
29551 SDValue Base = Op.getOperand(3);
29552 SDValue Index = Op.getOperand(4);
29553 SDValue Mask = Op.getOperand(5);
29554 SDValue Scale = Op.getOperand(6);
29555 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
29559 //scatter(base, mask, index, v1, scale);
29560 SDValue Chain = Op.getOperand(0);
29561 SDValue Base = Op.getOperand(2);
29562 SDValue Mask = Op.getOperand(3);
29563 SDValue Index = Op.getOperand(4);
29564 SDValue Src = Op.getOperand(5);
29565 SDValue Scale = Op.getOperand(6);
29566 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
29567 Scale, Chain, Subtarget);
29570 const APInt &HintVal = Op.getConstantOperandAPInt(6);
29571 assert((HintVal == 2 || HintVal == 3) &&
29572 "Wrong prefetch hint in intrinsic: should be 2 or 3");
29573 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
29574 SDValue Chain = Op.getOperand(0);
29575 SDValue Mask = Op.getOperand(2);
29576 SDValue Index = Op.getOperand(3);
29577 SDValue Base = Op.getOperand(4);
29578 SDValue Scale = Op.getOperand(5);
29579 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
29582 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
29584 SmallVector<SDValue, 2> Results;
29585 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
29587 return DAG.getMergeValues(Results, dl);
29589 // Read Performance Monitoring Counters.
29591 // Read Processor Register.
29593 // GetExtended Control Register.
29595 SmallVector<SDValue, 2> Results;
29597 // RDPMC uses ECX to select the index of the performance counter to read.
29598 // RDPRU uses ECX to select the processor register to read.
29599 // XGETBV uses ECX to select the index of the XCR register to return.
29600 // The result is stored into registers EDX:EAX.
29601 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
29602 Subtarget, Results);
29603 return DAG.getMergeValues(Results, dl);
29605 // XTEST intrinsics.
29607 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
29608 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
29610 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
29611 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
29612 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
29613 Ret, SDValue(InTrans.getNode(), 1));
29615 case TRUNCATE_TO_MEM_VI8:
29616 case TRUNCATE_TO_MEM_VI16:
29617 case TRUNCATE_TO_MEM_VI32: {
29618 SDValue Mask = Op.getOperand(4);
29619 SDValue DataToTruncate = Op.getOperand(3);
29620 SDValue Addr = Op.getOperand(2);
29621 SDValue Chain = Op.getOperand(0);
29623 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
29624 assert(MemIntr && "Expected MemIntrinsicSDNode!");
29626 EVT MemVT = MemIntr->getMemoryVT();
29628 uint16_t TruncationOp = IntrData->Opc0;
29629 switch (TruncationOp) {
29630 case X86ISD::VTRUNC: {
29631 if (isAllOnesConstant(Mask)) // return just a truncate store
29632 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
29633 MemIntr->getMemOperand());
29635 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
29636 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
29637 SDValue Offset = DAG.getUNDEF(VMask.getValueType());
29639 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
29640 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
29641 true /* truncating */);
29643 case X86ISD::VTRUNCUS:
29644 case X86ISD::VTRUNCS: {
29645 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
29646 if (isAllOnesConstant(Mask))
29647 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
29648 MemIntr->getMemOperand(), DAG);
29650 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
29651 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
29653 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
29654 VMask, MemVT, MemIntr->getMemOperand(), DAG);
29657 llvm_unreachable("Unsupported truncstore intrinsic");
29663 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
29664 SelectionDAG &DAG) const {
29665 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
29666 MFI.setReturnAddressIsTaken(true);
29668 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
29671 unsigned Depth = Op.getConstantOperandVal(0);
29673 EVT PtrVT = getPointerTy(DAG.getDataLayout());
29676 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
29677 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
29678 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
29679 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
29680 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
29681 MachinePointerInfo());
29684 // Just load the return address.
29685 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
29686 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
29687 MachinePointerInfo());
29690 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
29691 SelectionDAG &DAG) const {
29692 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
29693 return getReturnAddressFrameIndex(DAG);
29696 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
29697 MachineFunction &MF = DAG.getMachineFunction();
29698 MachineFrameInfo &MFI = MF.getFrameInfo();
29699 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
29700 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
29701 EVT VT = Op.getValueType();
29703 MFI.setFrameAddressIsTaken(true);
29705 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
29706 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
29707 // is not possible to crawl up the stack without looking at the unwind codes
29709 int FrameAddrIndex = FuncInfo->getFAIndex();
29710 if (!FrameAddrIndex) {
29711 // Set up a frame object for the return address.
29712 unsigned SlotSize = RegInfo->getSlotSize();
29713 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
29714 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
29715 FuncInfo->setFAIndex(FrameAddrIndex);
29717 return DAG.getFrameIndex(FrameAddrIndex, VT);
29720 unsigned FrameReg =
29721 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
29722 SDLoc dl(Op); // FIXME probably not meaningful
29723 unsigned Depth = Op.getConstantOperandVal(0);
29724 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
29725 (FrameReg == X86::EBP && VT == MVT::i32)) &&
29726 "Invalid Frame Register!");
29727 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
29729 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
29730 MachinePointerInfo());
29734 // FIXME? Maybe this could be a TableGen attribute on some registers and
29735 // this table could be generated automatically from RegInfo.
29736 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
29737 const MachineFunction &MF) const {
29738 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
29740 Register Reg = StringSwitch<unsigned>(RegName)
29741 .Case("esp", X86::ESP)
29742 .Case("rsp", X86::RSP)
29743 .Case("ebp", X86::EBP)
29744 .Case("rbp", X86::RBP)
29747 if (Reg == X86::EBP || Reg == X86::RBP) {
29748 if (!TFI.hasFP(MF))
29749 report_fatal_error("register " + StringRef(RegName) +
29750 " is allocatable: function has no frame pointer");
29753 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
29754 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
29755 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
29756 "Invalid Frame Register!");
29764 report_fatal_error("Invalid register name global variable");
29767 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
29768 SelectionDAG &DAG) const {
29769 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
29770 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
29773 Register X86TargetLowering::getExceptionPointerRegister(
29774 const Constant *PersonalityFn) const {
29775 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
29776 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
29778 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
29781 Register X86TargetLowering::getExceptionSelectorRegister(
29782 const Constant *PersonalityFn) const {
29783 // Funclet personalities don't use selectors (the runtime does the selection).
29784 if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
29785 return X86::NoRegister;
29786 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
29789 bool X86TargetLowering::needsFixedCatchObjects() const {
29790 return Subtarget.isTargetWin64();
29793 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
29794 SDValue Chain = Op.getOperand(0);
29795 SDValue Offset = Op.getOperand(1);
29796 SDValue Handler = Op.getOperand(2);
29799 EVT PtrVT = getPointerTy(DAG.getDataLayout());
29800 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
29801 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
29802 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
29803 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
29804 "Invalid Frame Register!");
29805 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
29806 Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
29808 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
29809 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
29811 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
29812 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
29813 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
29815 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
29816 DAG.getRegister(StoreAddrReg, PtrVT));
29819 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
29820 SelectionDAG &DAG) const {
29822 // If the subtarget is not 64bit, we may need the global base reg
29823 // after isel expand pseudo, i.e., after CGBR pass ran.
29824 // Therefore, ask for the GlobalBaseReg now, so that the pass
29825 // inserts the code for us in case we need it.
29826 // Otherwise, we will end up in a situation where we will
29827 // reference a virtual register that is not defined!
29828 if (!Subtarget.is64Bit()) {
29829 const X86InstrInfo *TII = Subtarget.getInstrInfo();
29830 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
29832 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
29833 DAG.getVTList(MVT::i32, MVT::Other),
29834 Op.getOperand(0), Op.getOperand(1));
29837 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
29838 SelectionDAG &DAG) const {
29840 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
29841 Op.getOperand(0), Op.getOperand(1));
29844 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
29845 SelectionDAG &DAG) const {
29847 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
29851 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
29852 return Op.getOperand(0);
29855 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
29856 SelectionDAG &DAG) const {
29857 SDValue Root = Op.getOperand(0);
29858 SDValue Trmp = Op.getOperand(1); // trampoline
29859 SDValue FPtr = Op.getOperand(2); // nested function
29860 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
29863 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
29864 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29866 if (Subtarget.is64Bit()) {
29867 SDValue OutChains[6];
29869 // Large code-model.
29870 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
29871 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
29873 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
29874 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
29876 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
29878 // Load the pointer to the nested function into R11.
29879 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
29880 SDValue Addr = Trmp;
29881 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
29882 Addr, MachinePointerInfo(TrmpAddr));
29884 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
29885 DAG.getConstant(2, dl, MVT::i64));
29886 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
29887 MachinePointerInfo(TrmpAddr, 2), Align(2));
29889 // Load the 'nest' parameter value into R10.
29890 // R10 is specified in X86CallingConv.td
29891 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
29892 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
29893 DAG.getConstant(10, dl, MVT::i64));
29894 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
29895 Addr, MachinePointerInfo(TrmpAddr, 10));
29897 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
29898 DAG.getConstant(12, dl, MVT::i64));
29899 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
29900 MachinePointerInfo(TrmpAddr, 12), Align(2));
29902 // Jump to the nested function.
29903 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
29904 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
29905 DAG.getConstant(20, dl, MVT::i64));
29906 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
29907 Addr, MachinePointerInfo(TrmpAddr, 20));
29909 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
29910 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
29911 DAG.getConstant(22, dl, MVT::i64));
29912 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
29913 Addr, MachinePointerInfo(TrmpAddr, 22));
29915 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
29917 const Function *Func =
29918 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
29919 CallingConv::ID CC = Func->getCallingConv();
29924 llvm_unreachable("Unsupported calling convention");
29925 case CallingConv::C:
29926 case CallingConv::X86_StdCall: {
29927 // Pass 'nest' parameter in ECX.
29928 // Must be kept in sync with X86CallingConv.td
29929 NestReg = X86::ECX;
29931 // Check that ECX wasn't needed by an 'inreg' parameter.
29932 FunctionType *FTy = Func->getFunctionType();
29933 const AttributeList &Attrs = Func->getAttributes();
29935 if (!Attrs.isEmpty() && !Func->isVarArg()) {
29936 unsigned InRegCount = 0;
29939 for (FunctionType::param_iterator I = FTy->param_begin(),
29940 E = FTy->param_end(); I != E; ++I, ++Idx)
29941 if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
29942 const DataLayout &DL = DAG.getDataLayout();
29943 // FIXME: should only count parameters that are lowered to integers.
29944 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
29947 if (InRegCount > 2) {
29948 report_fatal_error("Nest register in use - reduce number of inreg"
29954 case CallingConv::X86_FastCall:
29955 case CallingConv::X86_ThisCall:
29956 case CallingConv::Fast:
29957 case CallingConv::Tail:
29958 case CallingConv::SwiftTail:
29959 // Pass 'nest' parameter in EAX.
29960 // Must be kept in sync with X86CallingConv.td
29961 NestReg = X86::EAX;
29965 SDValue OutChains[4];
29966 SDValue Addr, Disp;
29968 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29969 DAG.getConstant(10, dl, MVT::i32));
29970 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
29972 // This is storing the opcode for MOV32ri.
29973 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
29974 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
29976 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
29977 Trmp, MachinePointerInfo(TrmpAddr));
29979 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29980 DAG.getConstant(1, dl, MVT::i32));
29981 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
29982 MachinePointerInfo(TrmpAddr, 1), Align(1));
29984 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
29985 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29986 DAG.getConstant(5, dl, MVT::i32));
29988 DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
29989 MachinePointerInfo(TrmpAddr, 5), Align(1));
29991 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29992 DAG.getConstant(6, dl, MVT::i32));
29993 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
29994 MachinePointerInfo(TrmpAddr, 6), Align(1));
29996 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
30000 SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
30001 SelectionDAG &DAG) const {
30003 The rounding mode is in bits 11:10 of FPSR, and has the following
30005 00 Round to nearest
30010 GET_ROUNDING, on the other hand, expects the following:
30017 To perform the conversion, we use a packed lookup table of the four 2-bit
30018 values that we can index by FPSP[11:10]
30019 0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
30021 (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
30024 MachineFunction &MF = DAG.getMachineFunction();
30025 MVT VT = Op.getSimpleValueType();
30028 // Save FP Control Word to stack slot
30029 int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
30030 SDValue StackSlot =
30031 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
30033 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
30035 SDValue Chain = Op.getOperand(0);
30036 SDValue Ops[] = {Chain, StackSlot};
30037 Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
30038 DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
30039 Align(2), MachineMemOperand::MOStore);
30041 // Load FP Control Word from stack slot
30042 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
30043 Chain = CWD.getValue(1);
30045 // Mask and turn the control bits into a shift for the lookup table.
30047 DAG.getNode(ISD::SRL, DL, MVT::i16,
30048 DAG.getNode(ISD::AND, DL, MVT::i16,
30049 CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
30050 DAG.getConstant(9, DL, MVT::i8));
30051 Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
30053 SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
30055 DAG.getNode(ISD::AND, DL, MVT::i32,
30056 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
30057 DAG.getConstant(3, DL, MVT::i32));
30059 RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
30061 return DAG.getMergeValues({RetVal, Chain}, DL);
30064 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
30065 SelectionDAG &DAG) const {
30066 MachineFunction &MF = DAG.getMachineFunction();
30068 SDValue Chain = Op.getNode()->getOperand(0);
30070 // FP control word may be set only from data in memory. So we need to allocate
30071 // stack space to save/load FP control word.
30072 int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
30073 SDValue StackSlot =
30074 DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
30075 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
30076 MachineMemOperand *MMO =
30077 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
30079 // Store FP control word into memory.
30080 SDValue Ops[] = {Chain, StackSlot};
30081 Chain = DAG.getMemIntrinsicNode(
30082 X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
30084 // Load FP Control Word from stack slot and clear RM field (bits 11:10).
30085 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
30086 Chain = CWD.getValue(1);
30087 CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
30088 DAG.getConstant(0xf3ff, DL, MVT::i16));
30090 // Calculate new rounding mode.
30091 SDValue NewRM = Op.getNode()->getOperand(1);
30093 if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
30094 uint64_t RM = CVal->getZExtValue();
30096 switch (static_cast<RoundingMode>(RM)) {
30097 case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
30098 case RoundingMode::TowardNegative: FieldVal = X86::rmDownward; break;
30099 case RoundingMode::TowardPositive: FieldVal = X86::rmUpward; break;
30100 case RoundingMode::TowardZero: FieldVal = X86::rmTowardZero; break;
30102 llvm_unreachable("rounding mode is not supported by X86 hardware");
30104 RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
30106 // Need to convert argument into bits of control word:
30107 // 0 Round to 0 -> 11
30108 // 1 Round to nearest -> 00
30109 // 2 Round to +inf -> 10
30110 // 3 Round to -inf -> 01
30111 // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
30112 // To make the conversion, put all these values into a value 0xc9 and shift
30113 // it left depending on the rounding mode:
30114 // (0xc9 << 4) & 0xc00 = X86::rmTowardZero
30115 // (0xc9 << 6) & 0xc00 = X86::rmToNearest
30117 // (0xc9 << (2 * NewRM + 4)) & 0xc00
30118 SDValue ShiftValue =
30119 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
30120 DAG.getNode(ISD::ADD, DL, MVT::i32,
30121 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
30122 DAG.getConstant(1, DL, MVT::i8)),
30123 DAG.getConstant(4, DL, MVT::i32)));
30125 DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
30127 RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
30128 DAG.getConstant(0xc00, DL, MVT::i16));
30131 // Update rounding mode bits and store the new FP Control Word into stack.
30132 CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
30133 Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(2));
30135 // Load FP control word from the slot.
30136 SDValue OpsLD[] = {Chain, StackSlot};
30137 MachineMemOperand *MMOL =
30138 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
30139 Chain = DAG.getMemIntrinsicNode(
30140 X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
30142 // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
30143 // same way but in bits 14:13.
30144 if (Subtarget.hasSSE1()) {
30145 // Store MXCSR into memory.
30146 Chain = DAG.getNode(
30147 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
30148 DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
30151 // Load MXCSR from stack slot and clear RM field (bits 14:13).
30152 SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
30153 Chain = CWD.getValue(1);
30154 CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
30155 DAG.getConstant(0xffff9fff, DL, MVT::i32));
30157 // Shift X87 RM bits from 11:10 to 14:13.
30158 RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
30159 RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
30160 DAG.getConstant(3, DL, MVT::i8));
30162 // Update rounding mode bits and store the new FP Control Word into stack.
30163 CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
30164 Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(4));
30166 // Load MXCSR from the slot.
30167 Chain = DAG.getNode(
30168 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
30169 DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
30176 const unsigned X87StateSize = 28;
30177 const unsigned FPStateSize = 32;
30178 [[maybe_unused]] const unsigned FPStateSizeInBits = FPStateSize * 8;
30180 SDValue X86TargetLowering::LowerGET_FPENV_MEM(SDValue Op,
30181 SelectionDAG &DAG) const {
30182 MachineFunction &MF = DAG.getMachineFunction();
30184 SDValue Chain = Op->getOperand(0);
30185 SDValue Ptr = Op->getOperand(1);
30186 auto *Node = cast<FPStateAccessSDNode>(Op);
30187 EVT MemVT = Node->getMemoryVT();
30188 assert(MemVT.getSizeInBits() == FPStateSizeInBits);
30189 MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
30191 // Get x87 state, if it presents.
30192 if (Subtarget.hasX87()) {
30194 DAG.getMemIntrinsicNode(X86ISD::FNSTENVm, DL, DAG.getVTList(MVT::Other),
30195 {Chain, Ptr}, MemVT, MMO);
30197 // FNSTENV changes the exception mask, so load back the stored environment.
30198 MachineMemOperand::Flags NewFlags =
30199 MachineMemOperand::MOLoad |
30200 (MMO->getFlags() & ~MachineMemOperand::MOStore);
30201 MMO = MF.getMachineMemOperand(MMO, NewFlags);
30203 DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
30204 {Chain, Ptr}, MemVT, MMO);
30207 // If target supports SSE, get MXCSR as well.
30208 if (Subtarget.hasSSE1()) {
30209 // Get pointer to the MXCSR location in memory.
30210 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
30211 SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
30212 DAG.getConstant(X87StateSize, DL, PtrVT));
30213 // Store MXCSR into memory.
30214 Chain = DAG.getNode(
30215 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
30216 DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
30223 static SDValue createSetFPEnvNodes(SDValue Ptr, SDValue Chain, SDLoc DL,
30224 EVT MemVT, MachineMemOperand *MMO,
30226 const X86Subtarget &Subtarget) {
30227 // Set x87 state, if it presents.
30228 if (Subtarget.hasX87())
30230 DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
30231 {Chain, Ptr}, MemVT, MMO);
30232 // If target supports SSE, set MXCSR as well.
30233 if (Subtarget.hasSSE1()) {
30234 // Get pointer to the MXCSR location in memory.
30235 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
30236 SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
30237 DAG.getConstant(X87StateSize, DL, PtrVT));
30238 // Load MXCSR from memory.
30239 Chain = DAG.getNode(
30240 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
30241 DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
30247 SDValue X86TargetLowering::LowerSET_FPENV_MEM(SDValue Op,
30248 SelectionDAG &DAG) const {
30250 SDValue Chain = Op->getOperand(0);
30251 SDValue Ptr = Op->getOperand(1);
30252 auto *Node = cast<FPStateAccessSDNode>(Op);
30253 EVT MemVT = Node->getMemoryVT();
30254 assert(MemVT.getSizeInBits() == FPStateSizeInBits);
30255 MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
30256 return createSetFPEnvNodes(Ptr, Chain, DL, MemVT, MMO, DAG, Subtarget);
30259 SDValue X86TargetLowering::LowerRESET_FPENV(SDValue Op,
30260 SelectionDAG &DAG) const {
30261 MachineFunction &MF = DAG.getMachineFunction();
30263 SDValue Chain = Op.getNode()->getOperand(0);
30265 IntegerType *ItemTy = Type::getInt32Ty(*DAG.getContext());
30266 ArrayType *FPEnvTy = ArrayType::get(ItemTy, 8);
30267 SmallVector<Constant *, 8> FPEnvVals;
30269 // x87 FPU Control Word: mask all floating-point exceptions, sets rounding to
30270 // nearest. FPU precision is set to 53 bits on Windows and 64 bits otherwise
30271 // for compatibility with glibc.
30272 unsigned X87CW = Subtarget.isTargetWindowsMSVC() ? 0x27F : 0x37F;
30273 FPEnvVals.push_back(ConstantInt::get(ItemTy, X87CW));
30274 Constant *Zero = ConstantInt::get(ItemTy, 0);
30275 for (unsigned I = 0; I < 6; ++I)
30276 FPEnvVals.push_back(Zero);
30278 // MXCSR: mask all floating-point exceptions, sets rounding to nearest, clear
30279 // all exceptions, sets DAZ and FTZ to 0.
30280 FPEnvVals.push_back(ConstantInt::get(ItemTy, 0x1F80));
30281 Constant *FPEnvBits = ConstantArray::get(FPEnvTy, FPEnvVals);
30282 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
30283 SDValue Env = DAG.getConstantPool(FPEnvBits, PtrVT);
30284 MachinePointerInfo MPI =
30285 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
30286 MachineMemOperand *MMO = MF.getMachineMemOperand(
30287 MPI, MachineMemOperand::MOStore, X87StateSize, Align(4));
30289 return createSetFPEnvNodes(Env, Chain, DL, MVT::i32, MMO, DAG, Subtarget);
30292 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
30294 // i8/i16 vector implemented using dword LZCNT vector instruction
30295 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
30296 // split the vector, perform operation on it's Lo a Hi part and
30297 // concatenate the results.
30298 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
30299 const X86Subtarget &Subtarget) {
30300 assert(Op.getOpcode() == ISD::CTLZ);
30302 MVT VT = Op.getSimpleValueType();
30303 MVT EltVT = VT.getVectorElementType();
30304 unsigned NumElems = VT.getVectorNumElements();
30306 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
30307 "Unsupported element type");
30309 // Split vector, it's Lo and Hi parts will be handled in next iteration.
30310 if (NumElems > 16 ||
30311 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
30312 return splitVectorIntUnary(Op, DAG);
30314 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
30315 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
30316 "Unsupported value type for operation");
30318 // Use native supported vector instruction vplzcntd.
30319 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
30320 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
30321 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
30322 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
30324 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
30327 // Lower CTLZ using a PSHUFB lookup table implementation.
30328 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
30329 const X86Subtarget &Subtarget,
30330 SelectionDAG &DAG) {
30331 MVT VT = Op.getSimpleValueType();
30332 int NumElts = VT.getVectorNumElements();
30333 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
30334 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
30336 // Per-nibble leading zero PSHUFB lookup table.
30337 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
30338 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
30339 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
30340 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
30342 SmallVector<SDValue, 64> LUTVec;
30343 for (int i = 0; i < NumBytes; ++i)
30344 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
30345 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
30347 // Begin by bitcasting the input to byte vector, then split those bytes
30348 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
30349 // If the hi input nibble is zero then we add both results together, otherwise
30350 // we just take the hi result (by masking the lo result to zero before the
30352 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
30353 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
30355 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
30357 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
30359 if (CurrVT.is512BitVector()) {
30360 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
30361 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
30362 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
30364 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
30367 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
30368 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
30369 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
30370 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
30372 // Merge result back from vXi8 back to VT, working on the lo/hi halves
30373 // of the current vector width in the same way we did for the nibbles.
30374 // If the upper half of the input element is zero then add the halves'
30375 // leading zero counts together, otherwise just use the upper half's.
30376 // Double the width of the result until we are at target width.
30377 while (CurrVT != VT) {
30378 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
30379 int CurrNumElts = CurrVT.getVectorNumElements();
30380 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
30381 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
30382 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
30384 // Check if the upper half of the input element is zero.
30385 if (CurrVT.is512BitVector()) {
30386 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
30387 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
30388 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
30389 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
30391 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
30392 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
30394 HiZ = DAG.getBitcast(NextVT, HiZ);
30396 // Move the upper/lower halves to the lower bits as we'll be extending to
30397 // NextVT. Mask the lower result to zero if HiZ is true and add the results
30399 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
30400 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
30401 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
30402 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
30403 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
30410 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
30411 const X86Subtarget &Subtarget,
30412 SelectionDAG &DAG) {
30413 MVT VT = Op.getSimpleValueType();
30415 if (Subtarget.hasCDI() &&
30416 // vXi8 vectors need to be promoted to 512-bits for vXi32.
30417 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
30418 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
30420 // Decompose 256-bit ops into smaller 128-bit ops.
30421 if (VT.is256BitVector() && !Subtarget.hasInt256())
30422 return splitVectorIntUnary(Op, DAG);
30424 // Decompose 512-bit ops into smaller 256-bit ops.
30425 if (VT.is512BitVector() && !Subtarget.hasBWI())
30426 return splitVectorIntUnary(Op, DAG);
30428 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
30429 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
30432 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
30433 SelectionDAG &DAG) {
30434 MVT VT = Op.getSimpleValueType();
30436 unsigned NumBits = VT.getSizeInBits();
30438 unsigned Opc = Op.getOpcode();
30441 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
30443 Op = Op.getOperand(0);
30444 if (VT == MVT::i8) {
30445 // Zero extend to i32 since there is not an i8 bsr.
30447 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
30450 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
30451 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
30452 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
30454 if (Opc == ISD::CTLZ) {
30455 // If src is zero (i.e. bsr sets ZF), returns NumBits.
30456 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
30457 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
30459 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
30462 // Finally xor with NumBits-1.
30463 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
30464 DAG.getConstant(NumBits - 1, dl, OpVT));
30467 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
30471 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
30472 SelectionDAG &DAG) {
30473 MVT VT = Op.getSimpleValueType();
30474 unsigned NumBits = VT.getScalarSizeInBits();
30475 SDValue N0 = Op.getOperand(0);
30478 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
30479 "Only scalar CTTZ requires custom lowering");
30481 // Issue a bsf (scan bits forward) which also sets EFLAGS.
30482 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
30483 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
30485 // If src is known never zero we can skip the CMOV.
30486 if (DAG.isKnownNeverZero(N0))
30489 // If src is zero (i.e. bsf sets ZF), returns NumBits.
30490 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
30491 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
30493 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
30496 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
30497 const X86Subtarget &Subtarget) {
30498 MVT VT = Op.getSimpleValueType();
30499 if (VT == MVT::i16 || VT == MVT::i32)
30500 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
30502 if (VT == MVT::v32i16 || VT == MVT::v64i8)
30503 return splitVectorIntBinary(Op, DAG);
30505 assert(Op.getSimpleValueType().is256BitVector() &&
30506 Op.getSimpleValueType().isInteger() &&
30507 "Only handle AVX 256-bit vector integer operation");
30508 return splitVectorIntBinary(Op, DAG);
30511 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
30512 const X86Subtarget &Subtarget) {
30513 MVT VT = Op.getSimpleValueType();
30514 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
30515 unsigned Opcode = Op.getOpcode();
30518 if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
30519 (VT.is256BitVector() && !Subtarget.hasInt256())) {
30520 assert(Op.getSimpleValueType().isInteger() &&
30521 "Only handle AVX vector integer operation");
30522 return splitVectorIntBinary(Op, DAG);
30525 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
30526 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
30527 EVT SetCCResultType =
30528 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
30530 unsigned BitWidth = VT.getScalarSizeInBits();
30531 if (Opcode == ISD::USUBSAT) {
30532 if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
30533 // Handle a special-case with a bit-hack instead of cmp+select:
30534 // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
30535 // If the target can use VPTERNLOG, DAGToDAG will match this as
30536 // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
30537 // "broadcast" constant load.
30538 ConstantSDNode *C = isConstOrConstSplat(Y, true);
30539 if (C && C->getAPIntValue().isSignMask()) {
30540 SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
30541 SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
30542 SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
30543 SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
30544 return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
30547 if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
30548 // usubsat X, Y --> (X >u Y) ? X - Y : 0
30549 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
30550 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
30551 // TODO: Move this to DAGCombiner?
30552 if (SetCCResultType == VT &&
30553 DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
30554 return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
30555 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
30559 if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
30560 (!VT.isVector() || VT == MVT::v2i64)) {
30561 APInt MinVal = APInt::getSignedMinValue(BitWidth);
30562 APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
30563 SDValue Zero = DAG.getConstant(0, DL, VT);
30565 DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
30566 DAG.getVTList(VT, SetCCResultType), X, Y);
30567 SDValue SumDiff = Result.getValue(0);
30568 SDValue Overflow = Result.getValue(1);
30569 SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
30570 SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
30572 DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
30573 Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
30574 return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
30577 // Use default expansion.
30581 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
30582 SelectionDAG &DAG) {
30583 MVT VT = Op.getSimpleValueType();
30584 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
30585 // Since X86 does not have CMOV for 8-bit integer, we don't convert
30586 // 8-bit integer abs to NEG and CMOV.
30588 SDValue N0 = Op.getOperand(0);
30589 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
30590 DAG.getConstant(0, DL, VT), N0);
30591 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
30592 SDValue(Neg.getNode(), 1)};
30593 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
30596 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
30597 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
30599 SDValue Src = Op.getOperand(0);
30601 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
30602 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
30605 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
30606 assert(VT.isInteger() &&
30607 "Only handle AVX 256-bit vector integer operation");
30608 return splitVectorIntUnary(Op, DAG);
30611 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
30612 return splitVectorIntUnary(Op, DAG);
30614 // Default to expand.
30618 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
30619 SelectionDAG &DAG) {
30620 MVT VT = Op.getSimpleValueType();
30622 // For AVX1 cases, split to use legal ops.
30623 if (VT.is256BitVector() && !Subtarget.hasInt256())
30624 return splitVectorIntBinary(Op, DAG);
30626 if (VT == MVT::v32i16 || VT == MVT::v64i8)
30627 return splitVectorIntBinary(Op, DAG);
30629 // Default to expand.
30633 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
30634 SelectionDAG &DAG) {
30635 MVT VT = Op.getSimpleValueType();
30637 // For AVX1 cases, split to use legal ops.
30638 if (VT.is256BitVector() && !Subtarget.hasInt256())
30639 return splitVectorIntBinary(Op, DAG);
30641 if (VT == MVT::v32i16 || VT == MVT::v64i8)
30642 return splitVectorIntBinary(Op, DAG);
30644 // Default to expand.
30648 static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
30649 SelectionDAG &DAG) {
30650 assert((Op.getOpcode() == ISD::FMAXIMUM || Op.getOpcode() == ISD::FMINIMUM) &&
30651 "Expected FMAXIMUM or FMINIMUM opcode");
30652 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
30653 EVT VT = Op.getValueType();
30654 SDValue X = Op.getOperand(0);
30655 SDValue Y = Op.getOperand(1);
30657 uint64_t SizeInBits = VT.getScalarSizeInBits();
30658 APInt PreferredZero = APInt::getZero(SizeInBits);
30659 APInt OppositeZero = PreferredZero;
30660 EVT IVT = VT.changeTypeToInteger();
30661 X86ISD::NodeType MinMaxOp;
30662 if (Op.getOpcode() == ISD::FMAXIMUM) {
30663 MinMaxOp = X86ISD::FMAX;
30664 OppositeZero.setSignBit();
30666 PreferredZero.setSignBit();
30667 MinMaxOp = X86ISD::FMIN;
30670 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
30672 // The tables below show the expected result of Max in cases of NaN and
30677 // --------------- ---------------
30678 // Num | Max | Y | +0 | +0 | +0 |
30679 // X --------------- X ---------------
30680 // xNaN | X | X/Y | -0 | +0 | -0 |
30681 // --------------- ---------------
30683 // It is achieved by means of FMAX/FMIN with preliminary checks and operand
30686 // We check if any of operands is NaN and return NaN. Then we check if any of
30687 // operands is zero or negative zero (for fmaximum and fminimum respectively)
30688 // to ensure the correct zero is returned.
30689 auto MatchesZero = [](SDValue Op, APInt Zero) {
30690 Op = peekThroughBitcasts(Op);
30691 if (auto *CstOp = dyn_cast<ConstantFPSDNode>(Op))
30692 return CstOp->getValueAPF().bitcastToAPInt() == Zero;
30693 if (auto *CstOp = dyn_cast<ConstantSDNode>(Op))
30694 return CstOp->getAPIntValue() == Zero;
30695 if (Op->getOpcode() == ISD::BUILD_VECTOR ||
30696 Op->getOpcode() == ISD::SPLAT_VECTOR) {
30697 for (const SDValue &OpVal : Op->op_values()) {
30698 if (OpVal.isUndef())
30700 auto *CstOp = dyn_cast<ConstantFPSDNode>(OpVal);
30703 if (!CstOp->getValueAPF().isZero())
30705 if (CstOp->getValueAPF().bitcastToAPInt() != Zero)
30713 bool IsXNeverNaN = DAG.isKnownNeverNaN(X);
30714 bool IsYNeverNaN = DAG.isKnownNeverNaN(Y);
30715 bool IgnoreSignedZero = DAG.getTarget().Options.NoSignedZerosFPMath ||
30716 Op->getFlags().hasNoSignedZeros() ||
30717 DAG.isKnownNeverZeroFloat(X) ||
30718 DAG.isKnownNeverZeroFloat(Y);
30719 SDValue NewX, NewY;
30720 if (IgnoreSignedZero || MatchesZero(Y, PreferredZero) ||
30721 MatchesZero(X, OppositeZero)) {
30722 // Operands are already in right order or order does not matter.
30725 } else if (MatchesZero(X, PreferredZero) || MatchesZero(Y, OppositeZero)) {
30728 } else if (!VT.isVector() && (VT == MVT::f16 || Subtarget.hasDQI()) &&
30729 (Op->getFlags().hasNoNaNs() || IsXNeverNaN || IsYNeverNaN)) {
30732 // VFPCLASSS consumes a vector type. So provide a minimal one corresponded
30734 MVT VectorType = MVT::getVectorVT(VT.getSimpleVT(), 128 / SizeInBits);
30735 SDValue VX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorType, X);
30736 // Bits of classes:
30737 // Bits Imm8[0] Imm8[1] Imm8[2] Imm8[3] Imm8[4] Imm8[5] Imm8[6] Imm8[7]
30738 // Class QNAN PosZero NegZero PosINF NegINF Denormal Negative SNAN
30739 SDValue Imm = DAG.getTargetConstant(MinMaxOp == X86ISD::FMAX ? 0b11 : 0b101,
30741 SDValue IsNanZero = DAG.getNode(X86ISD::VFPCLASSS, DL, MVT::v1i1, VX, Imm);
30742 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
30743 DAG.getConstant(0, DL, MVT::v8i1), IsNanZero,
30744 DAG.getIntPtrConstant(0, DL));
30745 SDValue NeedSwap = DAG.getBitcast(MVT::i8, Ins);
30746 NewX = DAG.getSelect(DL, VT, NeedSwap, Y, X);
30747 NewY = DAG.getSelect(DL, VT, NeedSwap, X, Y);
30748 return DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
30751 if (Subtarget.is64Bit() || VT != MVT::f64) {
30752 SDValue XInt = DAG.getNode(ISD::BITCAST, DL, IVT, X);
30753 SDValue ZeroCst = DAG.getConstant(0, DL, IVT);
30754 IsXSigned = DAG.getSetCC(DL, SetCCType, XInt, ZeroCst, ISD::SETLT);
30756 assert(VT == MVT::f64);
30757 SDValue Ins = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v2f64,
30758 DAG.getConstantFP(0, DL, MVT::v2f64), X,
30759 DAG.getIntPtrConstant(0, DL));
30760 SDValue VX = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, Ins);
30761 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VX,
30762 DAG.getIntPtrConstant(1, DL));
30763 Hi = DAG.getBitcast(MVT::i32, Hi);
30764 SDValue ZeroCst = DAG.getConstant(0, DL, MVT::i32);
30765 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(),
30766 *DAG.getContext(), MVT::i32);
30767 IsXSigned = DAG.getSetCC(DL, SetCCType, Hi, ZeroCst, ISD::SETLT);
30769 if (MinMaxOp == X86ISD::FMAX) {
30770 NewX = DAG.getSelect(DL, VT, IsXSigned, X, Y);
30771 NewY = DAG.getSelect(DL, VT, IsXSigned, Y, X);
30773 NewX = DAG.getSelect(DL, VT, IsXSigned, Y, X);
30774 NewY = DAG.getSelect(DL, VT, IsXSigned, X, Y);
30778 bool IgnoreNaN = DAG.getTarget().Options.NoNaNsFPMath ||
30779 Op->getFlags().hasNoNaNs() || (IsXNeverNaN && IsYNeverNaN);
30781 // If we did no ordering operands for signed zero handling and we need
30782 // to process NaN and we know that the second operand is not NaN then put
30783 // it in first operand and we will not need to post handle NaN after max/min.
30784 if (IgnoreSignedZero && !IgnoreNaN && DAG.isKnownNeverNaN(NewY))
30785 std::swap(NewX, NewY);
30787 SDValue MinMax = DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
30789 if (IgnoreNaN || DAG.isKnownNeverNaN(NewX))
30792 SDValue IsNaN = DAG.getSetCC(DL, SetCCType, NewX, NewX, ISD::SETUO);
30793 return DAG.getSelect(DL, VT, IsNaN, NewX, MinMax);
30796 static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
30797 SelectionDAG &DAG) {
30798 MVT VT = Op.getSimpleValueType();
30800 // For AVX1 cases, split to use legal ops.
30801 if (VT.is256BitVector() && !Subtarget.hasInt256())
30802 return splitVectorIntBinary(Op, DAG);
30804 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.useBWIRegs())
30805 return splitVectorIntBinary(Op, DAG);
30808 bool IsSigned = Op.getOpcode() == ISD::ABDS;
30809 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
30811 // TODO: Move to TargetLowering expandABD() once we have ABD promotion.
30812 if (VT.isScalarInteger()) {
30813 unsigned WideBits = std::max<unsigned>(2 * VT.getScalarSizeInBits(), 32u);
30814 MVT WideVT = MVT::getIntegerVT(WideBits);
30815 if (TLI.isTypeLegal(WideVT)) {
30816 // abds(lhs, rhs) -> trunc(abs(sub(sext(lhs), sext(rhs))))
30817 // abdu(lhs, rhs) -> trunc(abs(sub(zext(lhs), zext(rhs))))
30818 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
30819 SDValue LHS = DAG.getFreeze(Op.getOperand(0));
30820 SDValue RHS = DAG.getFreeze(Op.getOperand(1));
30821 LHS = DAG.getNode(ExtOpc, dl, WideVT, LHS);
30822 RHS = DAG.getNode(ExtOpc, dl, WideVT, RHS);
30823 SDValue Diff = DAG.getNode(ISD::SUB, dl, WideVT, LHS, RHS);
30824 SDValue AbsDiff = DAG.getNode(ISD::ABS, dl, WideVT, Diff);
30825 return DAG.getNode(ISD::TRUNCATE, dl, VT, AbsDiff);
30829 // TODO: Move to TargetLowering expandABD().
30830 if (!Subtarget.hasSSE41() &&
30831 ((IsSigned && VT == MVT::v16i8) || VT == MVT::v4i32)) {
30832 SDValue LHS = DAG.getFreeze(Op.getOperand(0));
30833 SDValue RHS = DAG.getFreeze(Op.getOperand(1));
30834 ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
30835 SDValue Cmp = DAG.getSetCC(dl, VT, LHS, RHS, CC);
30836 SDValue Diff0 = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
30837 SDValue Diff1 = DAG.getNode(ISD::SUB, dl, VT, RHS, LHS);
30838 return getBitSelect(dl, VT, Diff0, Diff1, Cmp, DAG);
30841 // Default to expand.
30845 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
30846 SelectionDAG &DAG) {
30848 MVT VT = Op.getSimpleValueType();
30850 // Decompose 256-bit ops into 128-bit ops.
30851 if (VT.is256BitVector() && !Subtarget.hasInt256())
30852 return splitVectorIntBinary(Op, DAG);
30854 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
30855 return splitVectorIntBinary(Op, DAG);
30857 SDValue A = Op.getOperand(0);
30858 SDValue B = Op.getOperand(1);
30860 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
30861 // vector pairs, multiply and truncate.
30862 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
30863 unsigned NumElts = VT.getVectorNumElements();
30865 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
30866 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
30867 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
30868 return DAG.getNode(
30869 ISD::TRUNCATE, dl, VT,
30870 DAG.getNode(ISD::MUL, dl, ExVT,
30871 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
30872 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
30875 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
30877 // Extract the lo/hi parts to any extend to i16.
30878 // We're going to mask off the low byte of each result element of the
30879 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
30881 SDValue Undef = DAG.getUNDEF(VT);
30882 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
30883 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
30886 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
30887 // If the RHS is a constant, manually unpackl/unpackh.
30888 SmallVector<SDValue, 16> LoOps, HiOps;
30889 for (unsigned i = 0; i != NumElts; i += 16) {
30890 for (unsigned j = 0; j != 8; ++j) {
30891 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
30893 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
30898 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
30899 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
30901 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
30902 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
30905 // Multiply, mask the lower 8bits of the lo/hi results and pack.
30906 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
30907 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
30908 return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
30911 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
30912 if (VT == MVT::v4i32) {
30913 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
30914 "Should not custom lower when pmulld is available!");
30916 // Extract the odd parts.
30917 static const int UnpackMask[] = { 1, -1, 3, -1 };
30918 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
30919 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
30921 // Multiply the even parts.
30922 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
30923 DAG.getBitcast(MVT::v2i64, A),
30924 DAG.getBitcast(MVT::v2i64, B));
30925 // Now multiply odd parts.
30926 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
30927 DAG.getBitcast(MVT::v2i64, Aodds),
30928 DAG.getBitcast(MVT::v2i64, Bodds));
30930 Evens = DAG.getBitcast(VT, Evens);
30931 Odds = DAG.getBitcast(VT, Odds);
30933 // Merge the two vectors back together with a shuffle. This expands into 2
30935 static const int ShufMask[] = { 0, 4, 2, 6 };
30936 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
30939 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
30940 "Only know how to lower V2I64/V4I64/V8I64 multiply");
30941 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
30943 // Ahi = psrlqi(a, 32);
30944 // Bhi = psrlqi(b, 32);
30946 // AloBlo = pmuludq(a, b);
30947 // AloBhi = pmuludq(a, Bhi);
30948 // AhiBlo = pmuludq(Ahi, b);
30950 // Hi = psllqi(AloBhi + AhiBlo, 32);
30951 // return AloBlo + Hi;
30952 KnownBits AKnown = DAG.computeKnownBits(A);
30953 KnownBits BKnown = DAG.computeKnownBits(B);
30955 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
30956 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
30957 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
30959 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
30960 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
30961 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
30963 SDValue Zero = DAG.getConstant(0, dl, VT);
30965 // Only multiply lo/hi halves that aren't known to be zero.
30966 SDValue AloBlo = Zero;
30967 if (!ALoIsZero && !BLoIsZero)
30968 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
30970 SDValue AloBhi = Zero;
30971 if (!ALoIsZero && !BHiIsZero) {
30972 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
30973 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
30976 SDValue AhiBlo = Zero;
30977 if (!AHiIsZero && !BLoIsZero) {
30978 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
30979 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
30982 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
30983 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
30985 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
30988 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
30989 MVT VT, bool IsSigned,
30990 const X86Subtarget &Subtarget,
30992 SDValue *Low = nullptr) {
30993 unsigned NumElts = VT.getVectorNumElements();
30995 // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
30996 // to a vXi16 type. Do the multiplies, shift the results and pack the half
30997 // lane results back together.
30999 // We'll take different approaches for signed and unsigned.
31000 // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
31001 // and use pmullw to calculate the full 16-bit product.
31002 // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
31003 // shift them left into the upper byte of each word. This allows us to use
31004 // pmulhw to calculate the full 16-bit product. This trick means we don't
31005 // need to sign extend the bytes to use pmullw.
31007 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
31008 SDValue Zero = DAG.getConstant(0, dl, VT);
31012 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
31013 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
31015 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
31016 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
31020 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
31021 // If the RHS is a constant, manually unpackl/unpackh and extend.
31022 SmallVector<SDValue, 16> LoOps, HiOps;
31023 for (unsigned i = 0; i != NumElts; i += 16) {
31024 for (unsigned j = 0; j != 8; ++j) {
31025 SDValue LoOp = B.getOperand(i + j);
31026 SDValue HiOp = B.getOperand(i + j + 8);
31029 LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
31030 HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
31031 LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
31032 DAG.getConstant(8, dl, MVT::i16));
31033 HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
31034 DAG.getConstant(8, dl, MVT::i16));
31036 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
31037 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
31040 LoOps.push_back(LoOp);
31041 HiOps.push_back(HiOp);
31045 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
31046 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
31047 } else if (IsSigned) {
31048 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
31049 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
31051 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
31052 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
31055 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
31056 // pack back to vXi8.
31057 unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
31058 SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
31059 SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
31062 *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
31064 return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
31067 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
31068 SelectionDAG &DAG) {
31070 MVT VT = Op.getSimpleValueType();
31071 bool IsSigned = Op->getOpcode() == ISD::MULHS;
31072 unsigned NumElts = VT.getVectorNumElements();
31073 SDValue A = Op.getOperand(0);
31074 SDValue B = Op.getOperand(1);
31076 // Decompose 256-bit ops into 128-bit ops.
31077 if (VT.is256BitVector() && !Subtarget.hasInt256())
31078 return splitVectorIntBinary(Op, DAG);
31080 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
31081 return splitVectorIntBinary(Op, DAG);
31083 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
31084 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
31085 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
31086 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
31088 // PMULxD operations multiply each even value (starting at 0) of LHS with
31089 // the related value of RHS and produce a widen result.
31090 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
31091 // => <2 x i64> <ae|cg>
31093 // In other word, to have all the results, we need to perform two PMULxD:
31094 // 1. one with the even values.
31095 // 2. one with the odd values.
31096 // To achieve #2, with need to place the odd values at an even position.
31098 // Place the odd value at an even position (basically, shift all values 1
31099 // step to the left):
31100 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
31101 9, -1, 11, -1, 13, -1, 15, -1};
31102 // <a|b|c|d> => <b|undef|d|undef>
31104 DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
31105 // <e|f|g|h> => <f|undef|h|undef>
31107 DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
31109 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
31111 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
31113 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
31114 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
31115 // => <2 x i64> <ae|cg>
31116 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
31117 DAG.getBitcast(MulVT, A),
31118 DAG.getBitcast(MulVT, B)));
31119 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
31120 // => <2 x i64> <bf|dh>
31121 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
31122 DAG.getBitcast(MulVT, Odd0),
31123 DAG.getBitcast(MulVT, Odd1)));
31125 // Shuffle it back into the right order.
31126 SmallVector<int, 16> ShufMask(NumElts);
31127 for (int i = 0; i != (int)NumElts; ++i)
31128 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
31130 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
31132 // If we have a signed multiply but no PMULDQ fix up the result of an
31133 // unsigned multiply.
31134 if (IsSigned && !Subtarget.hasSSE41()) {
31135 SDValue Zero = DAG.getConstant(0, dl, VT);
31136 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
31137 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
31138 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
31139 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
31141 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
31142 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
31148 // Only i8 vectors should need custom lowering after this.
31149 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
31150 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
31151 "Unsupported vector type");
31153 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
31154 // logical shift down the upper half and pack back to i8.
31156 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
31157 // and then ashr/lshr the upper bits down to the lower bits before multiply.
31159 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
31160 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
31161 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
31162 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
31163 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
31164 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
31165 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
31166 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
31167 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
31170 return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
31173 // Custom lowering for SMULO/UMULO.
31174 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
31175 SelectionDAG &DAG) {
31176 MVT VT = Op.getSimpleValueType();
31178 // Scalars defer to LowerXALUO.
31179 if (!VT.isVector())
31180 return LowerXALUO(Op, DAG);
31183 bool IsSigned = Op->getOpcode() == ISD::SMULO;
31184 SDValue A = Op.getOperand(0);
31185 SDValue B = Op.getOperand(1);
31186 EVT OvfVT = Op->getValueType(1);
31188 if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
31189 (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
31190 // Extract the LHS Lo/Hi vectors
31191 SDValue LHSLo, LHSHi;
31192 std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
31194 // Extract the RHS Lo/Hi vectors
31195 SDValue RHSLo, RHSHi;
31196 std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
31198 EVT LoOvfVT, HiOvfVT;
31199 std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
31200 SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
31201 SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
31203 // Issue the split operations.
31204 SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
31205 SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
31207 // Join the separate data results and the overflow results.
31208 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
31209 SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
31212 return DAG.getMergeValues({Res, Ovf}, dl);
31215 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31217 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
31219 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
31220 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
31221 unsigned NumElts = VT.getVectorNumElements();
31222 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
31223 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
31224 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
31225 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
31226 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
31228 SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
31232 SDValue High, LowSign;
31233 if (OvfVT.getVectorElementType() == MVT::i1 &&
31234 (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
31235 // Rather the truncating try to do the compare on vXi16 or vXi32.
31236 // Shift the high down filling with sign bits.
31237 High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
31238 // Fill all 16 bits with the sign bit from the low.
31240 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
31241 LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
31244 if (!Subtarget.hasBWI()) {
31245 // We can't do a vXi16 compare so sign extend to v16i32.
31246 High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
31247 LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
31250 // Otherwise do the compare at vXi8.
31251 High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
31252 High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
31254 DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
31257 Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
31260 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
31261 if (OvfVT.getVectorElementType() == MVT::i1 &&
31262 (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
31263 // Rather the truncating try to do the compare on vXi16 or vXi32.
31265 if (!Subtarget.hasBWI()) {
31266 // We can't do a vXi16 compare so sign extend to v16i32.
31267 High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
31270 // Otherwise do the compare at vXi8.
31271 High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
31275 DAG.getSetCC(dl, SetccVT, High,
31276 DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
31279 Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
31281 return DAG.getMergeValues({Low, Ovf}, dl);
31286 LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
31290 // SMULO overflows if the high bits don't match the sign of the low.
31292 DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
31293 Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
31295 // UMULO overflows if the high bits are non-zero.
31297 DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
31300 Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
31302 return DAG.getMergeValues({Low, Ovf}, dl);
31305 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
31306 assert(Subtarget.isTargetWin64() && "Unexpected target");
31307 EVT VT = Op.getValueType();
31308 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
31309 "Unexpected return type for lowering");
31311 if (isa<ConstantSDNode>(Op->getOperand(1))) {
31312 SmallVector<SDValue> Result;
31313 if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i64, DAG))
31314 return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), VT, Result[0], Result[1]);
31319 switch (Op->getOpcode()) {
31320 default: llvm_unreachable("Unexpected request for libcall!");
31321 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
31322 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
31323 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
31324 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
31328 SDValue InChain = DAG.getEntryNode();
31330 TargetLowering::ArgListTy Args;
31331 TargetLowering::ArgListEntry Entry;
31332 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
31333 EVT ArgVT = Op->getOperand(i).getValueType();
31334 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
31335 "Unexpected argument type for lowering");
31336 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
31337 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
31338 MachinePointerInfo MPI =
31339 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
31340 Entry.Node = StackPtr;
31342 DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
31343 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
31344 Entry.Ty = PointerType::get(ArgTy,0);
31345 Entry.IsSExt = false;
31346 Entry.IsZExt = false;
31347 Args.push_back(Entry);
31350 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
31351 getPointerTy(DAG.getDataLayout()));
31353 TargetLowering::CallLoweringInfo CLI(DAG);
31354 CLI.setDebugLoc(dl)
31357 getLibcallCallingConv(LC),
31358 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
31361 .setSExtResult(isSigned)
31362 .setZExtResult(!isSigned);
31364 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
31365 return DAG.getBitcast(VT, CallInfo.first);
31368 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
31370 SDValue &Chain) const {
31371 assert(Subtarget.isTargetWin64() && "Unexpected target");
31372 EVT VT = Op.getValueType();
31373 bool IsStrict = Op->isStrictFPOpcode();
31375 SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
31376 EVT ArgVT = Arg.getValueType();
31378 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
31379 "Unexpected return type for lowering");
31382 if (Op->getOpcode() == ISD::FP_TO_SINT ||
31383 Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
31384 LC = RTLIB::getFPTOSINT(ArgVT, VT);
31386 LC = RTLIB::getFPTOUINT(ArgVT, VT);
31387 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
31390 MakeLibCallOptions CallOptions;
31391 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
31394 // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
31395 // expected VT (i128).
31396 std::tie(Result, Chain) =
31397 makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
31398 Result = DAG.getBitcast(VT, Result);
31402 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
31403 SelectionDAG &DAG) const {
31404 assert(Subtarget.isTargetWin64() && "Unexpected target");
31405 EVT VT = Op.getValueType();
31406 bool IsStrict = Op->isStrictFPOpcode();
31408 SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
31409 EVT ArgVT = Arg.getValueType();
31411 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
31412 "Unexpected argument type for lowering");
31415 if (Op->getOpcode() == ISD::SINT_TO_FP ||
31416 Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
31417 LC = RTLIB::getSINTTOFP(ArgVT, VT);
31419 LC = RTLIB::getUINTTOFP(ArgVT, VT);
31420 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
31423 MakeLibCallOptions CallOptions;
31424 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
31426 // Pass the i128 argument as an indirect argument on the stack.
31427 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
31428 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
31429 MachinePointerInfo MPI =
31430 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
31431 Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
31434 std::tie(Result, Chain) =
31435 makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
31436 return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
31439 // Return true if the required (according to Opcode) shift-imm form is natively
31440 // supported by the Subtarget
31441 static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
31443 if (!VT.isSimple())
31446 if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
31449 if (VT.getScalarSizeInBits() < 16)
31452 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
31453 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
31456 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
31457 (VT.is256BitVector() && Subtarget.hasInt256());
31459 bool AShift = LShift && (Subtarget.hasAVX512() ||
31460 (VT != MVT::v2i64 && VT != MVT::v4i64));
31461 return (Opcode == ISD::SRA) ? AShift : LShift;
31464 // The shift amount is a variable, but it is the same for all vector lanes.
31465 // These instructions are defined together with shift-immediate.
31467 bool supportedVectorShiftWithBaseAmnt(EVT VT, const X86Subtarget &Subtarget,
31469 return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
31472 // Return true if the required (according to Opcode) variable-shift form is
31473 // natively supported by the Subtarget
31474 static bool supportedVectorVarShift(EVT VT, const X86Subtarget &Subtarget,
31476 if (!VT.isSimple())
31479 if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
31482 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
31485 // vXi16 supported only on AVX-512, BWI
31486 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
31489 if (Subtarget.hasAVX512() &&
31490 (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
31493 bool LShift = VT.is128BitVector() || VT.is256BitVector();
31494 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
31495 return (Opcode == ISD::SRA) ? AShift : LShift;
31498 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
31499 const X86Subtarget &Subtarget) {
31500 MVT VT = Op.getSimpleValueType();
31502 SDValue R = Op.getOperand(0);
31503 SDValue Amt = Op.getOperand(1);
31504 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
31506 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
31507 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
31508 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
31509 SDValue Ex = DAG.getBitcast(ExVT, R);
31511 // ashr(R, 63) === cmp_slt(R, 0)
31512 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
31513 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
31514 "Unsupported PCMPGT op");
31515 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
31518 if (ShiftAmt >= 32) {
31519 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
31521 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
31522 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
31523 ShiftAmt - 32, DAG);
31524 if (VT == MVT::v2i64)
31525 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
31526 if (VT == MVT::v4i64)
31527 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
31528 {9, 1, 11, 3, 13, 5, 15, 7});
31530 // SRA upper i32, SRL whole i64 and select lower i32.
31531 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
31534 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
31535 Lower = DAG.getBitcast(ExVT, Lower);
31536 if (VT == MVT::v2i64)
31537 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
31538 if (VT == MVT::v4i64)
31539 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
31540 {8, 1, 10, 3, 12, 5, 14, 7});
31542 return DAG.getBitcast(VT, Ex);
31545 // Optimize shl/srl/sra with constant shift amount.
31546 APInt APIntShiftAmt;
31547 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
31550 // If the shift amount is out of range, return undef.
31551 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
31552 return DAG.getUNDEF(VT);
31554 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
31556 if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) {
31557 // Hardware support for vector shifts is sparse which makes us scalarize the
31558 // vector operations in many cases. Also, on sandybridge ADD is faster than
31559 // shl: (shl V, 1) -> (add (freeze V), (freeze V))
31560 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
31561 // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
31562 // must be 0). (add undef, undef) however can be any value. To make this
31563 // safe, we must freeze R to ensure that register allocation uses the same
31564 // register for an undefined value. This ensures that the result will
31565 // still be even and preserves the original semantics.
31566 R = DAG.getFreeze(R);
31567 return DAG.getNode(ISD::ADD, dl, VT, R, R);
31570 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
31573 // i64 SRA needs to be performed as partial shifts.
31574 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
31575 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
31576 Op.getOpcode() == ISD::SRA)
31577 return ArithmeticShiftRight64(ShiftAmt);
31579 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
31580 (Subtarget.hasBWI() && VT == MVT::v64i8)) {
31581 unsigned NumElts = VT.getVectorNumElements();
31582 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
31584 // Simple i8 add case
31585 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
31586 // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
31587 // must be 0). (add undef, undef) however can be any value. To make this
31588 // safe, we must freeze R to ensure that register allocation uses the same
31589 // register for an undefined value. This ensures that the result will
31590 // still be even and preserves the original semantics.
31591 R = DAG.getFreeze(R);
31592 return DAG.getNode(ISD::ADD, dl, VT, R, R);
31595 // ashr(R, 7) === cmp_slt(R, 0)
31596 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
31597 SDValue Zeros = DAG.getConstant(0, dl, VT);
31598 if (VT.is512BitVector()) {
31599 assert(VT == MVT::v64i8 && "Unexpected element type!");
31600 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
31601 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
31603 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
31606 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
31607 if (VT == MVT::v16i8 && Subtarget.hasXOP())
31610 if (Op.getOpcode() == ISD::SHL) {
31611 // Make a large shift.
31612 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
31614 SHL = DAG.getBitcast(VT, SHL);
31615 // Zero out the rightmost bits.
31616 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
31617 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
31619 if (Op.getOpcode() == ISD::SRL) {
31620 // Make a large shift.
31621 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
31623 SRL = DAG.getBitcast(VT, SRL);
31624 // Zero out the leftmost bits.
31625 APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
31626 return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
31628 if (Op.getOpcode() == ISD::SRA) {
31629 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
31630 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
31632 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
31633 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
31634 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
31637 llvm_unreachable("Unknown shift opcode.");
31643 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
31644 const X86Subtarget &Subtarget) {
31645 MVT VT = Op.getSimpleValueType();
31647 SDValue R = Op.getOperand(0);
31648 SDValue Amt = Op.getOperand(1);
31649 unsigned Opcode = Op.getOpcode();
31650 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
31652 int BaseShAmtIdx = -1;
31653 if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
31654 if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
31655 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
31658 // vXi8 shifts - shift as v8i16 + mask result.
31659 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
31660 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
31661 VT == MVT::v64i8) &&
31662 !Subtarget.hasXOP()) {
31663 unsigned NumElts = VT.getVectorNumElements();
31664 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
31665 if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
31666 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
31667 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
31669 // Create the mask using vXi16 shifts. For shift-rights we need to move
31670 // the upper byte down before splatting the vXi8 mask.
31671 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
31672 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
31673 BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
31674 if (Opcode != ISD::SHL)
31675 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
31677 BitMask = DAG.getBitcast(VT, BitMask);
31678 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
31679 SmallVector<int, 64>(NumElts, 0));
31681 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
31682 DAG.getBitcast(ExtVT, R), BaseShAmt,
31683 BaseShAmtIdx, Subtarget, DAG);
31684 Res = DAG.getBitcast(VT, Res);
31685 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
31687 if (Opcode == ISD::SRA) {
31688 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
31689 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
31690 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
31692 getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
31693 BaseShAmtIdx, Subtarget, DAG);
31694 SignMask = DAG.getBitcast(VT, SignMask);
31695 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
31696 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
31706 // Convert a shift/rotate left amount to a multiplication scale factor.
31707 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
31708 const X86Subtarget &Subtarget,
31709 SelectionDAG &DAG) {
31710 MVT VT = Amt.getSimpleValueType();
31711 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
31712 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
31713 (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
31714 (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
31715 (Subtarget.hasInt256() && VT == MVT::v32i8) ||
31716 (Subtarget.hasBWI() && VT == MVT::v64i8)))
31719 MVT SVT = VT.getVectorElementType();
31720 unsigned SVTBits = SVT.getSizeInBits();
31721 unsigned NumElems = VT.getVectorNumElements();
31724 SmallVector<APInt> EltBits;
31725 if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
31726 APInt One(SVTBits, 1);
31727 SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
31728 for (unsigned I = 0; I != NumElems; ++I) {
31729 if (UndefElts[I] || EltBits[I].uge(SVTBits))
31731 uint64_t ShAmt = EltBits[I].getZExtValue();
31732 Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
31734 return DAG.getBuildVector(VT, dl, Elts);
31737 // If the target doesn't support variable shifts, use either FP conversion
31738 // or integer multiplication to avoid shifting each element individually.
31739 if (VT == MVT::v4i32) {
31740 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
31741 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
31742 DAG.getConstant(0x3f800000U, dl, VT));
31743 Amt = DAG.getBitcast(MVT::v4f32, Amt);
31744 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
31747 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
31748 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
31749 SDValue Z = DAG.getConstant(0, dl, VT);
31750 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
31751 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
31752 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
31753 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
31754 if (Subtarget.hasSSE41())
31755 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
31756 return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
31762 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
31763 SelectionDAG &DAG) {
31764 MVT VT = Op.getSimpleValueType();
31766 SDValue R = Op.getOperand(0);
31767 SDValue Amt = Op.getOperand(1);
31768 unsigned EltSizeInBits = VT.getScalarSizeInBits();
31769 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
31771 unsigned Opc = Op.getOpcode();
31772 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
31773 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
31775 assert(VT.isVector() && "Custom lowering only for vector shifts!");
31776 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
31778 if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
31781 if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
31784 if (supportedVectorVarShift(VT, Subtarget, Opc))
31787 // i64 vector arithmetic shift can be emulated with the transform:
31788 // M = lshr(SIGN_MASK, Amt)
31789 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
31790 if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
31791 (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
31793 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
31794 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
31795 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
31796 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
31797 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
31801 // XOP has 128-bit variable logical/arithmetic shifts.
31802 // +ve/-ve Amt = shift left/right.
31803 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
31804 VT == MVT::v8i16 || VT == MVT::v16i8)) {
31805 if (Opc == ISD::SRL || Opc == ISD::SRA) {
31806 SDValue Zero = DAG.getConstant(0, dl, VT);
31807 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
31809 if (Opc == ISD::SHL || Opc == ISD::SRL)
31810 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
31811 if (Opc == ISD::SRA)
31812 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
31815 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
31816 // shifts per-lane and then shuffle the partial results back together.
31817 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
31818 // Splat the shift amounts so the scalar shifts above will catch it.
31819 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
31820 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
31821 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
31822 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
31823 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
31826 // If possible, lower this shift as a sequence of two shifts by
31827 // constant plus a BLENDing shuffle instead of scalarizing it.
31829 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
31831 // Could be rewritten as:
31832 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
31834 // The advantage is that the two shifts from the example would be
31835 // lowered as X86ISD::VSRLI nodes in parallel before blending.
31836 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
31837 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
31838 SDValue Amt1, Amt2;
31839 unsigned NumElts = VT.getVectorNumElements();
31840 SmallVector<int, 8> ShuffleMask;
31841 for (unsigned i = 0; i != NumElts; ++i) {
31842 SDValue A = Amt->getOperand(i);
31844 ShuffleMask.push_back(SM_SentinelUndef);
31847 if (!Amt1 || Amt1 == A) {
31848 ShuffleMask.push_back(i);
31852 if (!Amt2 || Amt2 == A) {
31853 ShuffleMask.push_back(i + NumElts);
31860 // Only perform this blend if we can perform it without loading a mask.
31861 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
31862 (VT != MVT::v16i16 ||
31863 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
31864 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
31865 canWidenShuffleElements(ShuffleMask))) {
31866 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
31867 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
31868 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
31869 Cst2->getAPIntValue().ult(EltSizeInBits)) {
31870 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
31871 Cst1->getZExtValue(), DAG);
31872 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
31873 Cst2->getZExtValue(), DAG);
31874 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
31879 // If possible, lower this packed shift into a vector multiply instead of
31880 // expanding it into a sequence of scalar shifts.
31881 // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
31882 if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
31883 Subtarget.canExtendTo512BW())))
31884 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
31885 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
31887 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
31888 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
31889 if (Opc == ISD::SRL && ConstantAmt &&
31890 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
31891 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
31892 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
31893 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
31894 SDValue Zero = DAG.getConstant(0, dl, VT);
31895 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
31896 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
31897 return DAG.getSelect(dl, VT, ZAmt, R, Res);
31901 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
31902 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
31903 // TODO: Special case handling for shift by 0/1, really we can afford either
31904 // of these cases in pre-SSE41/XOP/AVX512 but not both.
31905 if (Opc == ISD::SRA && ConstantAmt &&
31906 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
31907 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
31908 !Subtarget.hasAVX512()) ||
31909 DAG.isKnownNeverZero(Amt))) {
31910 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
31911 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
31912 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
31914 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
31916 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
31918 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
31919 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
31920 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
31921 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
31925 // v4i32 Non Uniform Shifts.
31926 // If the shift amount is constant we can shift each lane using the SSE2
31927 // immediate shifts, else we need to zero-extend each lane to the lower i64
31928 // and shift using the SSE2 variable shifts.
31929 // The separate results can then be blended together.
31930 if (VT == MVT::v4i32) {
31931 SDValue Amt0, Amt1, Amt2, Amt3;
31933 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
31934 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
31935 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
31936 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
31938 // The SSE2 shifts use the lower i64 as the same shift amount for
31939 // all lanes and the upper i64 is ignored. On AVX we're better off
31940 // just zero-extending, but for SSE just duplicating the top 16-bits is
31941 // cheaper and has the same effect for out of range values.
31942 if (Subtarget.hasAVX()) {
31943 SDValue Z = DAG.getConstant(0, dl, VT);
31944 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
31945 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
31946 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
31947 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
31949 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
31950 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
31951 {4, 5, 6, 7, -1, -1, -1, -1});
31952 SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
31953 SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
31954 Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
31955 Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
31956 Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
31957 Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
31961 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
31962 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
31963 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
31964 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
31965 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
31967 // Merge the shifted lane results optimally with/without PBLENDW.
31968 // TODO - ideally shuffle combining would handle this.
31969 if (Subtarget.hasSSE41()) {
31970 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
31971 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
31972 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
31974 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
31975 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
31976 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
31979 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
31980 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
31981 // make the existing SSE solution better.
31982 // NOTE: We honor prefered vector width before promoting to 512-bits.
31983 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
31984 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
31985 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
31986 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
31987 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
31988 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
31989 "Unexpected vector type");
31990 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
31991 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
31992 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
31993 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
31994 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
31995 return DAG.getNode(ISD::TRUNCATE, dl, VT,
31996 DAG.getNode(Opc, dl, ExtVT, R, Amt));
31999 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
32000 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
32001 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
32002 (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
32003 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
32004 !Subtarget.hasXOP()) {
32005 int NumElts = VT.getVectorNumElements();
32006 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
32008 // Extend constant shift amount to vXi16 (it doesn't matter if the type
32010 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
32011 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
32012 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
32013 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
32014 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
32015 "Constant build vector expected");
32017 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
32018 bool IsSigned = Opc == ISD::SRA;
32019 R = DAG.getExtOrTrunc(IsSigned, R, dl, ExVT);
32020 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
32021 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
32022 return DAG.getZExtOrTrunc(R, dl, VT);
32025 SmallVector<SDValue, 16> LoAmt, HiAmt;
32026 for (int i = 0; i != NumElts; i += 16) {
32027 for (int j = 0; j != 8; ++j) {
32028 LoAmt.push_back(Amt.getOperand(i + j));
32029 HiAmt.push_back(Amt.getOperand(i + j + 8));
32033 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
32034 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
32035 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
32037 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
32038 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
32039 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
32040 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
32041 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
32042 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
32043 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
32044 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
32045 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
32048 if (VT == MVT::v16i8 ||
32049 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
32050 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
32051 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
32053 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
32054 if (VT.is512BitVector()) {
32055 // On AVX512BW targets we make use of the fact that VSELECT lowers
32056 // to a masked blend which selects bytes based just on the sign bit
32057 // extracted to a mask.
32058 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
32059 V0 = DAG.getBitcast(VT, V0);
32060 V1 = DAG.getBitcast(VT, V1);
32061 Sel = DAG.getBitcast(VT, Sel);
32062 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
32064 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
32065 } else if (Subtarget.hasSSE41()) {
32066 // On SSE41 targets we can use PBLENDVB which selects bytes based just
32067 // on the sign bit.
32068 V0 = DAG.getBitcast(VT, V0);
32069 V1 = DAG.getBitcast(VT, V1);
32070 Sel = DAG.getBitcast(VT, Sel);
32071 return DAG.getBitcast(SelVT,
32072 DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
32074 // On pre-SSE41 targets we test for the sign bit by comparing to
32075 // zero - a negative value will set all bits of the lanes to true
32076 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
32077 SDValue Z = DAG.getConstant(0, dl, SelVT);
32078 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
32079 return DAG.getSelect(dl, SelVT, C, V0, V1);
32082 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
32083 // We can safely do this using i16 shifts as we're only interested in
32084 // the 3 lower bits of each byte.
32085 Amt = DAG.getBitcast(ExtVT, Amt);
32086 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
32087 Amt = DAG.getBitcast(VT, Amt);
32089 if (Opc == ISD::SHL || Opc == ISD::SRL) {
32090 // r = VSELECT(r, shift(r, 4), a);
32091 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
32092 R = SignBitSelect(VT, Amt, M, R);
32095 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
32097 // r = VSELECT(r, shift(r, 2), a);
32098 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
32099 R = SignBitSelect(VT, Amt, M, R);
32102 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
32104 // return VSELECT(r, shift(r, 1), a);
32105 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
32106 R = SignBitSelect(VT, Amt, M, R);
32110 if (Opc == ISD::SRA) {
32111 // For SRA we need to unpack each byte to the higher byte of a i16 vector
32112 // so we can correctly sign extend. We don't care what happens to the
32114 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
32115 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
32116 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
32117 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
32118 ALo = DAG.getBitcast(ExtVT, ALo);
32119 AHi = DAG.getBitcast(ExtVT, AHi);
32120 RLo = DAG.getBitcast(ExtVT, RLo);
32121 RHi = DAG.getBitcast(ExtVT, RHi);
32123 // r = VSELECT(r, shift(r, 4), a);
32124 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
32125 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
32126 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
32127 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
32130 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
32131 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
32133 // r = VSELECT(r, shift(r, 2), a);
32134 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
32135 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
32136 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
32137 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
32140 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
32141 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
32143 // r = VSELECT(r, shift(r, 1), a);
32144 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
32145 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
32146 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
32147 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
32149 // Logical shift the result back to the lower byte, leaving a zero upper
32150 // byte meaning that we can safely pack with PACKUSWB.
32151 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
32152 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
32153 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
32157 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
32158 MVT ExtVT = MVT::v8i32;
32159 SDValue Z = DAG.getConstant(0, dl, VT);
32160 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
32161 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
32162 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
32163 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
32164 ALo = DAG.getBitcast(ExtVT, ALo);
32165 AHi = DAG.getBitcast(ExtVT, AHi);
32166 RLo = DAG.getBitcast(ExtVT, RLo);
32167 RHi = DAG.getBitcast(ExtVT, RHi);
32168 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
32169 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
32170 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
32171 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
32172 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
32175 if (VT == MVT::v8i16) {
32176 // If we have a constant shift amount, the non-SSE41 path is best as
32177 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
32178 bool UseSSE41 = Subtarget.hasSSE41() &&
32179 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
32181 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
32182 // On SSE41 targets we can use PBLENDVB which selects bytes based just on
32185 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
32186 V0 = DAG.getBitcast(ExtVT, V0);
32187 V1 = DAG.getBitcast(ExtVT, V1);
32188 Sel = DAG.getBitcast(ExtVT, Sel);
32189 return DAG.getBitcast(
32190 VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
32192 // On pre-SSE41 targets we splat the sign bit - a negative value will
32193 // set all bits of the lanes to true and VSELECT uses that in
32194 // its OR(AND(V0,C),AND(V1,~C)) lowering.
32196 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
32197 return DAG.getSelect(dl, VT, C, V0, V1);
32200 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
32202 // On SSE41 targets we need to replicate the shift mask in both
32203 // bytes for PBLENDVB.
32206 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
32207 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
32209 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
32212 // r = VSELECT(r, shift(r, 8), a);
32213 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
32214 R = SignBitSelect(Amt, M, R);
32217 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
32219 // r = VSELECT(r, shift(r, 4), a);
32220 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
32221 R = SignBitSelect(Amt, M, R);
32224 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
32226 // r = VSELECT(r, shift(r, 2), a);
32227 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
32228 R = SignBitSelect(Amt, M, R);
32231 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
32233 // return VSELECT(r, shift(r, 1), a);
32234 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
32235 R = SignBitSelect(Amt, M, R);
32239 // Decompose 256-bit shifts into 128-bit shifts.
32240 if (VT.is256BitVector())
32241 return splitVectorIntBinary(Op, DAG);
32243 if (VT == MVT::v32i16 || VT == MVT::v64i8)
32244 return splitVectorIntBinary(Op, DAG);
32249 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
32250 SelectionDAG &DAG) {
32251 MVT VT = Op.getSimpleValueType();
32252 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
32253 "Unexpected funnel shift opcode!");
32256 SDValue Op0 = Op.getOperand(0);
32257 SDValue Op1 = Op.getOperand(1);
32258 SDValue Amt = Op.getOperand(2);
32259 unsigned EltSizeInBits = VT.getScalarSizeInBits();
32260 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
32262 if (VT.isVector()) {
32263 APInt APIntShiftAmt;
32264 bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
32266 if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
32268 std::swap(Op0, Op1);
32271 uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
32272 SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
32273 return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
32274 {Op0, Op1, Imm}, DAG, Subtarget);
32276 return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
32277 {Op0, Op1, Amt}, DAG, Subtarget);
32279 assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
32280 VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
32281 VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
32282 "Unexpected funnel shift type!");
32284 // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
32285 // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
32287 // TODO: Can't use generic expansion as UNDEF amt elements can be
32288 // converted to other values when folded to shift amounts, losing the
32290 uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
32291 uint64_t ShXAmt = IsFSHR ? (EltSizeInBits - ShiftAmt) : ShiftAmt;
32292 uint64_t ShYAmt = IsFSHR ? ShiftAmt : (EltSizeInBits - ShiftAmt);
32293 SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, Op0,
32294 DAG.getShiftAmountConstant(ShXAmt, VT, DL));
32295 SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Op1,
32296 DAG.getShiftAmountConstant(ShYAmt, VT, DL));
32297 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
32300 SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
32301 SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
32302 bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
32304 // Constant vXi16 funnel shifts can be efficiently handled by default.
32305 if (IsCst && EltSizeInBits == 16)
32308 unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
32309 unsigned NumElts = VT.getVectorNumElements();
32310 MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
32311 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
32313 // Split 256-bit integers on XOP/pre-AVX2 targets.
32314 // Split 512-bit integers on non 512-bit BWI targets.
32315 if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
32316 !Subtarget.hasAVX2())) ||
32317 (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
32318 EltSizeInBits < 32)) {
32319 // Pre-mask the amount modulo using the wider vector.
32320 Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
32321 return splitVectorOp(Op, DAG);
32324 // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
32325 if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
32326 int ScalarAmtIdx = -1;
32327 if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
32328 // Uniform vXi16 funnel shifts can be efficiently handled by default.
32329 if (EltSizeInBits == 16)
32332 SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
32333 SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
32334 Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
32335 ScalarAmtIdx, Subtarget, DAG);
32336 Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
32337 ScalarAmtIdx, Subtarget, DAG);
32338 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
32342 MVT WideSVT = MVT::getIntegerVT(
32343 std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
32344 MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
32346 // If per-element shifts are legal, fallback to generic expansion.
32347 if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
32350 // Attempt to fold as:
32351 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
32352 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
32353 if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
32354 supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
32355 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
32356 Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
32357 AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
32358 Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
32359 EltSizeInBits, DAG);
32360 SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
32361 Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
32363 Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
32364 EltSizeInBits, DAG);
32365 return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
32368 // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
32369 if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
32370 supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
32371 SDValue Z = DAG.getConstant(0, DL, VT);
32372 SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
32373 SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
32374 SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
32375 SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
32376 SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
32377 SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
32378 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
32381 // Fallback to generic expansion.
32385 (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
32386 "Unexpected funnel shift type!");
32388 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
32389 bool OptForSize = DAG.shouldOptForSize();
32390 bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
32392 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
32393 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
32394 if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
32395 !isa<ConstantSDNode>(Amt)) {
32396 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
32397 SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
32398 Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
32399 Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
32400 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
32401 SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
32402 Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
32404 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
32406 Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
32407 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
32409 return DAG.getZExtOrTrunc(Res, DL, VT);
32412 if (VT == MVT::i8 || ExpandFunnel)
32415 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
32416 if (VT == MVT::i16) {
32417 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
32418 DAG.getConstant(15, DL, Amt.getValueType()));
32419 unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
32420 return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
32426 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
32427 SelectionDAG &DAG) {
32428 MVT VT = Op.getSimpleValueType();
32429 assert(VT.isVector() && "Custom lowering only for vector rotates!");
32432 SDValue R = Op.getOperand(0);
32433 SDValue Amt = Op.getOperand(1);
32434 unsigned Opcode = Op.getOpcode();
32435 unsigned EltSizeInBits = VT.getScalarSizeInBits();
32436 int NumElts = VT.getVectorNumElements();
32437 bool IsROTL = Opcode == ISD::ROTL;
32439 // Check for constant splat rotation amount.
32440 APInt CstSplatValue;
32441 bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
32443 // Check for splat rotate by zero.
32444 if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
32447 // AVX512 implicitly uses modulo rotation amounts.
32448 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
32449 // Attempt to rotate by immediate.
32451 unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
32452 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
32453 return DAG.getNode(RotOpc, DL, VT, R,
32454 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
32457 // Else, fall-back on VPROLV/VPRORV.
32461 // AVX512 VBMI2 vXi16 - lower to funnel shifts.
32462 if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
32463 unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
32464 return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
32467 SDValue Z = DAG.getConstant(0, DL, VT);
32470 // If the ISD::ROTR amount is constant, we're always better converting to
32472 if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
32473 return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
32475 // XOP targets always prefers ISD::ROTL.
32476 if (Subtarget.hasXOP())
32477 return DAG.getNode(ISD::ROTL, DL, VT, R,
32478 DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
32481 // Split 256-bit integers on XOP/pre-AVX2 targets.
32482 if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
32483 return splitVectorIntBinary(Op, DAG);
32485 // XOP has 128-bit vector variable + immediate rotates.
32486 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
32487 // XOP implicitly uses modulo rotation amounts.
32488 if (Subtarget.hasXOP()) {
32489 assert(IsROTL && "Only ROTL expected");
32490 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
32492 // Attempt to rotate by immediate.
32494 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
32495 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
32496 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
32499 // Use general rotate by variable (per-element).
32503 // Rotate by an uniform constant - expand back to shifts.
32504 // TODO: Can't use generic expansion as UNDEF amt elements can be converted
32505 // to other values when folded to shift amounts, losing the splat.
32507 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
32508 uint64_t ShlAmt = IsROTL ? RotAmt : (EltSizeInBits - RotAmt);
32509 uint64_t SrlAmt = IsROTL ? (EltSizeInBits - RotAmt) : RotAmt;
32510 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, R,
32511 DAG.getShiftAmountConstant(ShlAmt, VT, DL));
32512 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, R,
32513 DAG.getShiftAmountConstant(SrlAmt, VT, DL));
32514 return DAG.getNode(ISD::OR, DL, VT, Shl, Srl);
32517 // Split 512-bit integers on non 512-bit BWI targets.
32518 if (VT.is512BitVector() && !Subtarget.useBWIRegs())
32519 return splitVectorIntBinary(Op, DAG);
32522 (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
32523 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
32524 Subtarget.hasAVX2()) ||
32525 ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
32526 "Only vXi32/vXi16/vXi8 vector rotates supported");
32528 MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
32529 MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
32531 SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
32532 SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
32534 // Attempt to fold as unpack(x,x) << zext(splat(y)):
32535 // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
32536 // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
32537 if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
32538 int BaseRotAmtIdx = -1;
32539 if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
32540 if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
32541 unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
32542 return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
32544 unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
32545 SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
32546 SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
32547 Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
32548 BaseRotAmtIdx, Subtarget, DAG);
32549 Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
32550 BaseRotAmtIdx, Subtarget, DAG);
32551 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
32555 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
32556 unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
32558 // Attempt to fold as unpack(x,x) << zext(y):
32559 // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
32560 // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
32561 // Const vXi16/vXi32 are excluded in favor of MUL-based lowering.
32562 if (!(ConstantAmt && EltSizeInBits != 8) &&
32563 !supportedVectorVarShift(VT, Subtarget, ShiftOpc) &&
32564 (ConstantAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc))) {
32565 SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
32566 SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
32567 SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
32568 SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
32569 SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
32570 SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
32571 return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
32574 // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
32576 // TODO: We're doing nothing here that we couldn't do for funnel shifts.
32577 if (EltSizeInBits == 8) {
32579 MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
32581 // Attempt to fold as:
32582 // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
32583 // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
32584 if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
32585 supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
32586 // If we're rotating by constant, just use default promotion.
32589 // See if we can perform this by widening to vXi16 or vXi32.
32590 R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
32592 ISD::OR, DL, WideVT, R,
32593 getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
32594 Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
32595 R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
32597 R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
32598 return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
32601 // We don't need ModuloAmt here as we just peek at individual bits.
32602 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
32603 if (Subtarget.hasSSE41()) {
32604 // On SSE41 targets we can use PBLENDVB which selects bytes based just
32605 // on the sign bit.
32606 V0 = DAG.getBitcast(VT, V0);
32607 V1 = DAG.getBitcast(VT, V1);
32608 Sel = DAG.getBitcast(VT, Sel);
32609 return DAG.getBitcast(SelVT,
32610 DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
32612 // On pre-SSE41 targets we test for the sign bit by comparing to
32613 // zero - a negative value will set all bits of the lanes to true
32614 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
32615 SDValue Z = DAG.getConstant(0, DL, SelVT);
32616 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
32617 return DAG.getSelect(DL, SelVT, C, V0, V1);
32620 // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
32621 if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
32622 Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
32626 unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
32627 unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
32629 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
32630 // We can safely do this using i16 shifts as we're only interested in
32631 // the 3 lower bits of each byte.
32632 Amt = DAG.getBitcast(ExtVT, Amt);
32633 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
32634 Amt = DAG.getBitcast(VT, Amt);
32636 // r = VSELECT(r, rot(r, 4), a);
32640 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
32641 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
32642 R = SignBitSelect(VT, Amt, M, R);
32645 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
32647 // r = VSELECT(r, rot(r, 2), a);
32650 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
32651 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
32652 R = SignBitSelect(VT, Amt, M, R);
32655 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
32657 // return VSELECT(r, rot(r, 1), a);
32660 DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
32661 DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
32662 return SignBitSelect(VT, Amt, M, R);
32665 bool IsSplatAmt = DAG.isSplatValue(Amt);
32666 bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
32667 supportedVectorVarShift(VT, Subtarget, ISD::SRL);
32669 // Fallback for splats + all supported variable shifts.
32670 // Fallback for non-constants AVX2 vXi16 as well.
32671 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
32672 Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
32673 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
32674 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
32675 SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
32676 SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
32677 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
32680 // Everything below assumes ISD::ROTL.
32682 Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
32686 // ISD::ROT* uses modulo rotate amounts.
32687 Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
32689 assert(IsROTL && "Only ROTL supported");
32691 // As with shifts, attempt to convert the rotation amount to a multiplication
32692 // factor, fallback to general expansion.
32693 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
32697 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
32698 if (EltSizeInBits == 16) {
32699 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
32700 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
32701 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
32704 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
32705 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
32706 // that can then be OR'd with the lower 32-bits.
32707 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
32708 static const int OddMask[] = {1, -1, 3, -1};
32709 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
32710 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
32712 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
32713 DAG.getBitcast(MVT::v2i64, R),
32714 DAG.getBitcast(MVT::v2i64, Scale));
32715 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
32716 DAG.getBitcast(MVT::v2i64, R13),
32717 DAG.getBitcast(MVT::v2i64, Scale13));
32718 Res02 = DAG.getBitcast(VT, Res02);
32719 Res13 = DAG.getBitcast(VT, Res13);
32721 return DAG.getNode(ISD::OR, DL, VT,
32722 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
32723 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
32726 /// Returns true if the operand type is exactly twice the native width, and
32727 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
32728 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
32729 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
32730 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
32731 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
32734 return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
32735 if (OpWidth == 128)
32736 return Subtarget.canUseCMPXCHG16B();
32741 TargetLoweringBase::AtomicExpansionKind
32742 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
32743 Type *MemType = SI->getValueOperand()->getType();
32745 bool NoImplicitFloatOps =
32746 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
32747 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
32748 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
32749 (Subtarget.hasSSE1() || Subtarget.hasX87()))
32750 return AtomicExpansionKind::None;
32752 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
32753 : AtomicExpansionKind::None;
32756 // Note: this turns large loads into lock cmpxchg8b/16b.
32757 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
32758 TargetLowering::AtomicExpansionKind
32759 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
32760 Type *MemType = LI->getType();
32762 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
32763 // can use movq to do the load. If we have X87 we can load into an 80-bit
32764 // X87 register and store it to a stack temporary.
32765 bool NoImplicitFloatOps =
32766 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
32767 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
32768 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
32769 (Subtarget.hasSSE1() || Subtarget.hasX87()))
32770 return AtomicExpansionKind::None;
32772 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
32773 : AtomicExpansionKind::None;
32776 enum BitTestKind : unsigned {
32784 static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {
32785 using namespace llvm::PatternMatch;
32786 BitTestKind BTK = UndefBit;
32787 auto *C = dyn_cast<ConstantInt>(V);
32789 // Check if V is a power of 2 or NOT power of 2.
32790 if (isPowerOf2_64(C->getZExtValue()))
32792 else if (isPowerOf2_64((~C->getValue()).getZExtValue()))
32793 BTK = NotConstantBit;
32797 // Check if V is some power of 2 pattern known to be non-zero
32798 auto *I = dyn_cast<Instruction>(V);
32801 // Check if we have a NOT
32803 if (match(I, m_c_Xor(m_Value(PeekI), m_AllOnes())) ||
32804 match(I, m_Sub(m_AllOnes(), m_Value(PeekI)))) {
32806 I = dyn_cast<Instruction>(PeekI);
32808 // If I is constant, it will fold and we can evaluate later. If its an
32809 // argument or something of that nature, we can't analyze.
32811 return {nullptr, UndefBit};
32813 // We can only use 1 << X without more sophisticated analysis. C << X where
32814 // C is a power of 2 but not 1 can result in zero which cannot be translated
32815 // to bittest. Likewise any C >> X (either arith or logical) can be zero.
32816 if (I->getOpcode() == Instruction::Shl) {
32817 // Todo(1): The cmpxchg case is pretty costly so matching `BLSI(X)`, `X &
32818 // -X` and some other provable power of 2 patterns that we can use CTZ on
32819 // may be profitable.
32820 // Todo(2): It may be possible in some cases to prove that Shl(C, X) is
32821 // non-zero even where C != 1. Likewise LShr(C, X) and AShr(C, X) may also
32822 // be provably a non-zero power of 2.
32823 // Todo(3): ROTL and ROTR patterns on a power of 2 C should also be
32824 // transformable to bittest.
32825 auto *ShiftVal = dyn_cast<ConstantInt>(I->getOperand(0));
32827 return {nullptr, UndefBit};
32828 if (ShiftVal->equalsInt(1))
32829 BTK = Not ? NotShiftBit : ShiftBit;
32831 if (BTK == UndefBit)
32832 return {nullptr, UndefBit};
32834 Value *BitV = I->getOperand(1);
32838 if (match(BitV, m_c_And(m_Value(AndOp), m_APInt(AndC)))) {
32839 // Read past a shiftmask instruction to find count
32840 if (*AndC == (I->getType()->getPrimitiveSizeInBits() - 1))
32843 return {BitV, BTK};
32846 return {nullptr, UndefBit};
32849 TargetLowering::AtomicExpansionKind
32850 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
32851 using namespace llvm::PatternMatch;
32852 // If the atomicrmw's result isn't actually used, we can just add a "lock"
32853 // prefix to a normal instruction for these operations.
32854 if (AI->use_empty())
32855 return AtomicExpansionKind::None;
32857 if (AI->getOperation() == AtomicRMWInst::Xor) {
32858 // A ^ SignBit -> A + SignBit. This allows us to use `xadd` which is
32859 // preferable to both `cmpxchg` and `btc`.
32860 if (match(AI->getOperand(1), m_SignMask()))
32861 return AtomicExpansionKind::None;
32864 // If the atomicrmw's result is used by a single bit AND, we may use
32865 // bts/btr/btc instruction for these operations.
32866 // Note: InstCombinePass can cause a de-optimization here. It replaces the
32867 // SETCC(And(AtomicRMW(P, power_of_2), power_of_2)) with LShr and Xor
32868 // (depending on CC). This pattern can only use bts/btr/btc but we don't
32870 Instruction *I = AI->user_back();
32871 auto BitChange = FindSingleBitChange(AI->getValOperand());
32872 if (BitChange.second == UndefBit || !AI->hasOneUse() ||
32873 I->getOpcode() != Instruction::And ||
32874 AI->getType()->getPrimitiveSizeInBits() == 8 ||
32875 AI->getParent() != I->getParent())
32876 return AtomicExpansionKind::CmpXChg;
32878 unsigned OtherIdx = I->getOperand(0) == AI ? 1 : 0;
32880 // This is a redundant AND, it should get cleaned up elsewhere.
32881 if (AI == I->getOperand(OtherIdx))
32882 return AtomicExpansionKind::CmpXChg;
32884 // The following instruction must be a AND single bit.
32885 if (BitChange.second == ConstantBit || BitChange.second == NotConstantBit) {
32886 auto *C1 = cast<ConstantInt>(AI->getValOperand());
32887 auto *C2 = dyn_cast<ConstantInt>(I->getOperand(OtherIdx));
32888 if (!C2 || !isPowerOf2_64(C2->getZExtValue())) {
32889 return AtomicExpansionKind::CmpXChg;
32891 if (AI->getOperation() == AtomicRMWInst::And) {
32892 return ~C1->getValue() == C2->getValue()
32893 ? AtomicExpansionKind::BitTestIntrinsic
32894 : AtomicExpansionKind::CmpXChg;
32896 return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
32897 : AtomicExpansionKind::CmpXChg;
32900 assert(BitChange.second == ShiftBit || BitChange.second == NotShiftBit);
32902 auto BitTested = FindSingleBitChange(I->getOperand(OtherIdx));
32903 if (BitTested.second != ShiftBit && BitTested.second != NotShiftBit)
32904 return AtomicExpansionKind::CmpXChg;
32906 assert(BitChange.first != nullptr && BitTested.first != nullptr);
32908 // If shift amounts are not the same we can't use BitTestIntrinsic.
32909 if (BitChange.first != BitTested.first)
32910 return AtomicExpansionKind::CmpXChg;
32912 // If atomic AND need to be masking all be one bit and testing the one bit
32913 // unset in the mask.
32914 if (AI->getOperation() == AtomicRMWInst::And)
32915 return (BitChange.second == NotShiftBit && BitTested.second == ShiftBit)
32916 ? AtomicExpansionKind::BitTestIntrinsic
32917 : AtomicExpansionKind::CmpXChg;
32919 // If atomic XOR/OR need to be setting and testing the same bit.
32920 return (BitChange.second == ShiftBit && BitTested.second == ShiftBit)
32921 ? AtomicExpansionKind::BitTestIntrinsic
32922 : AtomicExpansionKind::CmpXChg;
32925 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
32926 IRBuilder<> Builder(AI);
32927 Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
32928 Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
32929 Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
32930 switch (AI->getOperation()) {
32932 llvm_unreachable("Unknown atomic operation");
32933 case AtomicRMWInst::Or:
32934 IID_C = Intrinsic::x86_atomic_bts;
32935 IID_I = Intrinsic::x86_atomic_bts_rm;
32937 case AtomicRMWInst::Xor:
32938 IID_C = Intrinsic::x86_atomic_btc;
32939 IID_I = Intrinsic::x86_atomic_btc_rm;
32941 case AtomicRMWInst::And:
32942 IID_C = Intrinsic::x86_atomic_btr;
32943 IID_I = Intrinsic::x86_atomic_btr_rm;
32946 Instruction *I = AI->user_back();
32947 LLVMContext &Ctx = AI->getContext();
32948 Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
32949 Type::getInt8PtrTy(Ctx));
32950 Function *BitTest = nullptr;
32951 Value *Result = nullptr;
32952 auto BitTested = FindSingleBitChange(AI->getValOperand());
32953 assert(BitTested.first != nullptr);
32955 if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
32956 auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
32958 BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
32960 unsigned Imm = llvm::countr_zero(C->getZExtValue());
32961 Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
32963 BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
32965 assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
32967 Value *SI = BitTested.first;
32968 assert(SI != nullptr);
32970 // BT{S|R|C} on memory operand don't modulo bit position so we need to
32972 unsigned ShiftBits = SI->getType()->getPrimitiveSizeInBits();
32974 Builder.CreateAnd(SI, Builder.getIntN(ShiftBits, ShiftBits - 1));
32975 // Todo(1): In many cases it may be provable that SI is less than
32976 // ShiftBits in which case this mask is unnecessary
32977 // Todo(2): In the fairly idiomatic case of P[X / sizeof_bits(X)] OP 1
32978 // << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
32979 // favor of just a raw BT{S|R|C}.
32981 Result = Builder.CreateCall(BitTest, {Addr, BitPos});
32982 Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
32984 // If the result is only used for zero/non-zero status then we don't need to
32985 // shift value back. Otherwise do so.
32986 for (auto It = I->user_begin(); It != I->user_end(); ++It) {
32987 if (auto *ICmp = dyn_cast<ICmpInst>(*It)) {
32988 if (ICmp->isEquality()) {
32989 auto *C0 = dyn_cast<ConstantInt>(ICmp->getOperand(0));
32990 auto *C1 = dyn_cast<ConstantInt>(ICmp->getOperand(1));
32992 assert(C0 == nullptr || C1 == nullptr);
32993 if ((C0 ? C0 : C1)->isZero())
32998 Result = Builder.CreateShl(Result, BitPos);
33003 I->replaceAllUsesWith(Result);
33004 I->eraseFromParent();
33005 AI->eraseFromParent();
33008 static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
33009 using namespace llvm::PatternMatch;
33010 if (!AI->hasOneUse())
33013 Value *Op = AI->getOperand(1);
33014 ICmpInst::Predicate Pred;
33015 Instruction *I = AI->user_back();
33016 AtomicRMWInst::BinOp Opc = AI->getOperation();
33017 if (Opc == AtomicRMWInst::Add) {
33018 if (match(I, m_c_ICmp(Pred, m_Sub(m_ZeroInt(), m_Specific(Op)), m_Value())))
33019 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
33020 if (match(I, m_OneUse(m_c_Add(m_Specific(Op), m_Value())))) {
33021 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
33022 return Pred == CmpInst::ICMP_SLT;
33023 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
33024 return Pred == CmpInst::ICMP_SGT;
33028 if (Opc == AtomicRMWInst::Sub) {
33029 if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
33030 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
33031 if (match(I, m_OneUse(m_Sub(m_Value(), m_Specific(Op))))) {
33032 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
33033 return Pred == CmpInst::ICMP_SLT;
33034 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
33035 return Pred == CmpInst::ICMP_SGT;
33039 if ((Opc == AtomicRMWInst::Or &&
33040 match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value())))) ||
33041 (Opc == AtomicRMWInst::And &&
33042 match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))))) {
33043 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
33044 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE ||
33045 Pred == CmpInst::ICMP_SLT;
33046 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
33047 return Pred == CmpInst::ICMP_SGT;
33050 if (Opc == AtomicRMWInst::Xor) {
33051 if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
33052 return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
33053 if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value())))) {
33054 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
33055 return Pred == CmpInst::ICMP_SLT;
33056 if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
33057 return Pred == CmpInst::ICMP_SGT;
33065 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
33066 AtomicRMWInst *AI) const {
33067 IRBuilder<> Builder(AI);
33068 Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
33069 Instruction *TempI = nullptr;
33070 LLVMContext &Ctx = AI->getContext();
33071 ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
33073 TempI = AI->user_back();
33074 assert(TempI->hasOneUse() && "Must have one use");
33075 ICI = cast<ICmpInst>(TempI->user_back());
33077 X86::CondCode CC = X86::COND_INVALID;
33078 ICmpInst::Predicate Pred = ICI->getPredicate();
33081 llvm_unreachable("Not supported Pred");
33082 case CmpInst::ICMP_EQ:
33085 case CmpInst::ICMP_NE:
33088 case CmpInst::ICMP_SLT:
33091 case CmpInst::ICMP_SGT:
33095 Intrinsic::ID IID = Intrinsic::not_intrinsic;
33096 switch (AI->getOperation()) {
33098 llvm_unreachable("Unknown atomic operation");
33099 case AtomicRMWInst::Add:
33100 IID = Intrinsic::x86_atomic_add_cc;
33102 case AtomicRMWInst::Sub:
33103 IID = Intrinsic::x86_atomic_sub_cc;
33105 case AtomicRMWInst::Or:
33106 IID = Intrinsic::x86_atomic_or_cc;
33108 case AtomicRMWInst::And:
33109 IID = Intrinsic::x86_atomic_and_cc;
33111 case AtomicRMWInst::Xor:
33112 IID = Intrinsic::x86_atomic_xor_cc;
33115 Function *CmpArith =
33116 Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
33117 Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
33118 Type::getInt8PtrTy(Ctx));
33119 Value *Call = Builder.CreateCall(
33120 CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
33121 Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
33122 ICI->replaceAllUsesWith(Result);
33123 ICI->eraseFromParent();
33125 TempI->eraseFromParent();
33126 AI->eraseFromParent();
33129 TargetLowering::AtomicExpansionKind
33130 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
33131 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
33132 Type *MemType = AI->getType();
33134 // If the operand is too big, we must see if cmpxchg8/16b is available
33135 // and default to library calls otherwise.
33136 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
33137 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
33138 : AtomicExpansionKind::None;
33141 AtomicRMWInst::BinOp Op = AI->getOperation();
33143 case AtomicRMWInst::Xchg:
33144 return AtomicExpansionKind::None;
33145 case AtomicRMWInst::Add:
33146 case AtomicRMWInst::Sub:
33147 if (shouldExpandCmpArithRMWInIR(AI))
33148 return AtomicExpansionKind::CmpArithIntrinsic;
33149 // It's better to use xadd, xsub or xchg for these in other cases.
33150 return AtomicExpansionKind::None;
33151 case AtomicRMWInst::Or:
33152 case AtomicRMWInst::And:
33153 case AtomicRMWInst::Xor:
33154 if (shouldExpandCmpArithRMWInIR(AI))
33155 return AtomicExpansionKind::CmpArithIntrinsic;
33156 return shouldExpandLogicAtomicRMWInIR(AI);
33157 case AtomicRMWInst::Nand:
33158 case AtomicRMWInst::Max:
33159 case AtomicRMWInst::Min:
33160 case AtomicRMWInst::UMax:
33161 case AtomicRMWInst::UMin:
33162 case AtomicRMWInst::FAdd:
33163 case AtomicRMWInst::FSub:
33164 case AtomicRMWInst::FMax:
33165 case AtomicRMWInst::FMin:
33166 case AtomicRMWInst::UIncWrap:
33167 case AtomicRMWInst::UDecWrap:
33169 // These always require a non-trivial set of data operations on x86. We must
33170 // use a cmpxchg loop.
33171 return AtomicExpansionKind::CmpXChg;
33176 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
33177 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
33178 Type *MemType = AI->getType();
33179 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
33180 // there is no benefit in turning such RMWs into loads, and it is actually
33181 // harmful as it introduces a mfence.
33182 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
33185 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
33186 // lowering available in lowerAtomicArith.
33187 // TODO: push more cases through this path.
33188 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
33189 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
33193 IRBuilder<> Builder(AI);
33194 Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
33195 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
33196 auto SSID = AI->getSyncScopeID();
33197 // We must restrict the ordering to avoid generating loads with Release or
33198 // ReleaseAcquire orderings.
33199 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
33201 // Before the load we need a fence. Here is an example lifted from
33202 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
33205 // x.store(1, relaxed);
33206 // r1 = y.fetch_add(0, release);
33208 // y.fetch_add(42, acquire);
33209 // r2 = x.load(relaxed);
33210 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
33211 // lowered to just a load without a fence. A mfence flushes the store buffer,
33212 // making the optimization clearly correct.
33213 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
33214 // otherwise, we might be able to be more aggressive on relaxed idempotent
33215 // rmw. In practice, they do not look useful, so we don't try to be
33216 // especially clever.
33217 if (SSID == SyncScope::SingleThread)
33218 // FIXME: we could just insert an ISD::MEMBARRIER here, except we are at
33219 // the IR level, so we must wrap it in an intrinsic.
33222 if (!Subtarget.hasMFence())
33223 // FIXME: it might make sense to use a locked operation here but on a
33224 // different cache-line to prevent cache-line bouncing. In practice it
33225 // is probably a small win, and x86 processors without mfence are rare
33226 // enough that we do not bother.
33230 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
33231 Builder.CreateCall(MFence, {});
33233 // Finally we can emit the atomic load.
33234 LoadInst *Loaded = Builder.CreateAlignedLoad(
33235 AI->getType(), AI->getPointerOperand(), AI->getAlign());
33236 Loaded->setAtomic(Order, SSID);
33237 AI->replaceAllUsesWith(Loaded);
33238 AI->eraseFromParent();
33242 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
33243 if (!SI.isUnordered())
33245 return ExperimentalUnorderedISEL;
33247 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
33248 if (!LI.isUnordered())
33250 return ExperimentalUnorderedISEL;
33254 /// Emit a locked operation on a stack location which does not change any
33255 /// memory location, but does involve a lock prefix. Location is chosen to be
33256 /// a) very likely accessed only by a single thread to minimize cache traffic,
33257 /// and b) definitely dereferenceable. Returns the new Chain result.
33258 static SDValue emitLockedStackOp(SelectionDAG &DAG,
33259 const X86Subtarget &Subtarget, SDValue Chain,
33261 // Implementation notes:
33262 // 1) LOCK prefix creates a full read/write reordering barrier for memory
33263 // operations issued by the current processor. As such, the location
33264 // referenced is not relevant for the ordering properties of the instruction.
33265 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
33266 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
33267 // 2) Using an immediate operand appears to be the best encoding choice
33268 // here since it doesn't require an extra register.
33269 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
33270 // is small enough it might just be measurement noise.)
33271 // 4) When choosing offsets, there are several contributing factors:
33272 // a) If there's no redzone, we default to TOS. (We could allocate a cache
33273 // line aligned stack object to improve this case.)
33274 // b) To minimize our chances of introducing a false dependence, we prefer
33275 // to offset the stack usage from TOS slightly.
33276 // c) To minimize concerns about cross thread stack usage - in particular,
33277 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
33278 // captures state in the TOS frame and accesses it from many threads -
33279 // we want to use an offset such that the offset is in a distinct cache
33280 // line from the TOS frame.
33282 // For a general discussion of the tradeoffs and benchmark results, see:
33283 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
33285 auto &MF = DAG.getMachineFunction();
33286 auto &TFL = *Subtarget.getFrameLowering();
33287 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
33289 if (Subtarget.is64Bit()) {
33290 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
33292 DAG.getRegister(X86::RSP, MVT::i64), // Base
33293 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
33294 DAG.getRegister(0, MVT::i64), // Index
33295 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
33296 DAG.getRegister(0, MVT::i16), // Segment.
33299 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
33301 return SDValue(Res, 1);
33304 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
33306 DAG.getRegister(X86::ESP, MVT::i32), // Base
33307 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
33308 DAG.getRegister(0, MVT::i32), // Index
33309 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
33310 DAG.getRegister(0, MVT::i16), // Segment.
33314 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
33316 return SDValue(Res, 1);
33319 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
33320 SelectionDAG &DAG) {
33322 AtomicOrdering FenceOrdering =
33323 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
33324 SyncScope::ID FenceSSID =
33325 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
33327 // The only fence that needs an instruction is a sequentially-consistent
33328 // cross-thread fence.
33329 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
33330 FenceSSID == SyncScope::System) {
33331 if (Subtarget.hasMFence())
33332 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
33334 SDValue Chain = Op.getOperand(0);
33335 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
33338 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
33339 return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
33342 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
33343 SelectionDAG &DAG) {
33344 MVT T = Op.getSimpleValueType();
33348 switch(T.SimpleTy) {
33349 default: llvm_unreachable("Invalid value type!");
33350 case MVT::i8: Reg = X86::AL; size = 1; break;
33351 case MVT::i16: Reg = X86::AX; size = 2; break;
33352 case MVT::i32: Reg = X86::EAX; size = 4; break;
33354 assert(Subtarget.is64Bit() && "Node not type legal!");
33355 Reg = X86::RAX; size = 8;
33358 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
33359 Op.getOperand(2), SDValue());
33360 SDValue Ops[] = { cpIn.getValue(0),
33363 DAG.getTargetConstant(size, DL, MVT::i8),
33364 cpIn.getValue(1) };
33365 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
33366 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
33367 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
33371 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
33372 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
33373 MVT::i32, cpOut.getValue(2));
33374 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
33376 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
33377 cpOut, Success, EFLAGS.getValue(1));
33380 // Create MOVMSKB, taking into account whether we need to split for AVX1.
33381 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
33382 const X86Subtarget &Subtarget) {
33383 MVT InVT = V.getSimpleValueType();
33385 if (InVT == MVT::v64i8) {
33387 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
33388 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
33389 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
33390 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
33391 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
33392 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
33393 DAG.getConstant(32, DL, MVT::i8));
33394 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
33396 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
33398 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
33399 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
33400 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
33401 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
33402 DAG.getConstant(16, DL, MVT::i8));
33403 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
33406 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
33409 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
33410 SelectionDAG &DAG) {
33411 SDValue Src = Op.getOperand(0);
33412 MVT SrcVT = Src.getSimpleValueType();
33413 MVT DstVT = Op.getSimpleValueType();
33415 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
33416 // half to v32i1 and concatenating the result.
33417 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
33418 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
33419 assert(Subtarget.hasBWI() && "Expected BWI target");
33422 std::tie(Lo, Hi) = DAG.SplitScalar(Src, dl, MVT::i32, MVT::i32);
33423 Lo = DAG.getBitcast(MVT::v32i1, Lo);
33424 Hi = DAG.getBitcast(MVT::v32i1, Hi);
33425 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
33428 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
33429 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
33430 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
33431 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
33433 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
33434 V = getPMOVMSKB(DL, V, DAG, Subtarget);
33435 return DAG.getZExtOrTrunc(V, DL, DstVT);
33438 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
33439 SrcVT == MVT::i64) && "Unexpected VT!");
33441 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33442 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
33443 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
33444 // This conversion needs to be expanded.
33448 if (SrcVT.isVector()) {
33449 // Widen the vector in input in the case of MVT::v2i32.
33450 // Example: from MVT::v2i32 to MVT::v4i32.
33451 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
33452 SrcVT.getVectorNumElements() * 2);
33453 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
33454 DAG.getUNDEF(SrcVT));
33456 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
33457 "Unexpected source type in LowerBITCAST");
33458 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
33461 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
33462 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
33464 if (DstVT == MVT::x86mmx)
33465 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
33467 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
33468 DAG.getIntPtrConstant(0, dl));
33471 /// Compute the horizontal sum of bytes in V for the elements of VT.
33473 /// Requires V to be a byte vector and VT to be an integer vector type with
33474 /// wider elements than V's type. The width of the elements of VT determines
33475 /// how many bytes of V are summed horizontally to produce each element of the
33477 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
33478 const X86Subtarget &Subtarget,
33479 SelectionDAG &DAG) {
33481 MVT ByteVecVT = V.getSimpleValueType();
33482 MVT EltVT = VT.getVectorElementType();
33483 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
33484 "Expected value to have byte element type.");
33485 assert(EltVT != MVT::i8 &&
33486 "Horizontal byte sum only makes sense for wider elements!");
33487 unsigned VecSize = VT.getSizeInBits();
33488 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
33490 // PSADBW instruction horizontally add all bytes and leave the result in i64
33491 // chunks, thus directly computes the pop count for v2i64 and v4i64.
33492 if (EltVT == MVT::i64) {
33493 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
33494 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
33495 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
33496 return DAG.getBitcast(VT, V);
33499 if (EltVT == MVT::i32) {
33500 // We unpack the low half and high half into i32s interleaved with zeros so
33501 // that we can use PSADBW to horizontally sum them. The most useful part of
33502 // this is that it lines up the results of two PSADBW instructions to be
33503 // two v2i64 vectors which concatenated are the 4 population counts. We can
33504 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
33505 SDValue Zeros = DAG.getConstant(0, DL, VT);
33506 SDValue V32 = DAG.getBitcast(VT, V);
33507 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
33508 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
33510 // Do the horizontal sums into two v2i64s.
33511 Zeros = DAG.getConstant(0, DL, ByteVecVT);
33512 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
33513 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
33514 DAG.getBitcast(ByteVecVT, Low), Zeros);
33515 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
33516 DAG.getBitcast(ByteVecVT, High), Zeros);
33518 // Merge them together.
33519 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
33520 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
33521 DAG.getBitcast(ShortVecVT, Low),
33522 DAG.getBitcast(ShortVecVT, High));
33524 return DAG.getBitcast(VT, V);
33527 // The only element type left is i16.
33528 assert(EltVT == MVT::i16 && "Unknown how to handle type");
33530 // To obtain pop count for each i16 element starting from the pop count for
33531 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
33532 // right by 8. It is important to shift as i16s as i8 vector shift isn't
33533 // directly supported.
33534 SDValue ShifterV = DAG.getConstant(8, DL, VT);
33535 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
33536 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
33537 DAG.getBitcast(ByteVecVT, V));
33538 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
33541 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
33542 const X86Subtarget &Subtarget,
33543 SelectionDAG &DAG) {
33544 MVT VT = Op.getSimpleValueType();
33545 MVT EltVT = VT.getVectorElementType();
33546 int NumElts = VT.getVectorNumElements();
33548 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
33550 // Implement a lookup table in register by using an algorithm based on:
33551 // http://wm.ite.pl/articles/sse-popcount.html
33553 // The general idea is that every lower byte nibble in the input vector is an
33554 // index into a in-register pre-computed pop count table. We then split up the
33555 // input vector in two new ones: (1) a vector with only the shifted-right
33556 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
33557 // masked out higher ones) for each byte. PSHUFB is used separately with both
33558 // to index the in-register table. Next, both are added and the result is a
33559 // i8 vector where each element contains the pop count for input byte.
33560 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
33561 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
33562 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
33563 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
33565 SmallVector<SDValue, 64> LUTVec;
33566 for (int i = 0; i < NumElts; ++i)
33567 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
33568 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
33569 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
33572 SDValue FourV = DAG.getConstant(4, DL, VT);
33573 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
33576 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
33578 // The input vector is used as the shuffle mask that index elements into the
33579 // LUT. After counting low and high nibbles, add the vector to obtain the
33580 // final pop count per i8 element.
33581 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
33582 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
33583 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
33586 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
33587 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
33588 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
33589 SelectionDAG &DAG) {
33590 MVT VT = Op.getSimpleValueType();
33591 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
33592 "Unknown CTPOP type to handle");
33593 SDLoc DL(Op.getNode());
33594 SDValue Op0 = Op.getOperand(0);
33596 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
33597 if (Subtarget.hasVPOPCNTDQ()) {
33598 unsigned NumElems = VT.getVectorNumElements();
33599 assert((VT.getVectorElementType() == MVT::i8 ||
33600 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
33601 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
33602 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
33603 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
33604 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
33605 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
33609 // Decompose 256-bit ops into smaller 128-bit ops.
33610 if (VT.is256BitVector() && !Subtarget.hasInt256())
33611 return splitVectorIntUnary(Op, DAG);
33613 // Decompose 512-bit ops into smaller 256-bit ops.
33614 if (VT.is512BitVector() && !Subtarget.hasBWI())
33615 return splitVectorIntUnary(Op, DAG);
33617 // For element types greater than i8, do vXi8 pop counts and a bytesum.
33618 if (VT.getScalarType() != MVT::i8) {
33619 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
33620 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
33621 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
33622 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
33625 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
33626 if (!Subtarget.hasSSSE3())
33629 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
33632 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
33633 SelectionDAG &DAG) {
33634 assert(Op.getSimpleValueType().isVector() &&
33635 "We only do custom lowering for vector population count.");
33636 return LowerVectorCTPOP(Op, Subtarget, DAG);
33639 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
33640 MVT VT = Op.getSimpleValueType();
33641 SDValue In = Op.getOperand(0);
33644 // For scalars, its still beneficial to transfer to/from the SIMD unit to
33645 // perform the BITREVERSE.
33646 if (!VT.isVector()) {
33647 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
33648 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
33649 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
33650 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
33651 DAG.getIntPtrConstant(0, DL));
33654 int NumElts = VT.getVectorNumElements();
33655 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
33657 // Decompose 256-bit ops into smaller 128-bit ops.
33658 if (VT.is256BitVector())
33659 return splitVectorIntUnary(Op, DAG);
33661 assert(VT.is128BitVector() &&
33662 "Only 128-bit vector bitreverse lowering supported.");
33664 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
33665 // perform the BSWAP in the shuffle.
33666 // Its best to shuffle using the second operand as this will implicitly allow
33667 // memory folding for multiple vectors.
33668 SmallVector<SDValue, 16> MaskElts;
33669 for (int i = 0; i != NumElts; ++i) {
33670 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
33671 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
33672 int PermuteByte = SourceByte | (2 << 5);
33673 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
33677 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
33678 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
33679 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
33681 return DAG.getBitcast(VT, Res);
33684 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
33685 SelectionDAG &DAG) {
33686 MVT VT = Op.getSimpleValueType();
33688 if (Subtarget.hasXOP() && !VT.is512BitVector())
33689 return LowerBITREVERSE_XOP(Op, DAG);
33691 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
33693 SDValue In = Op.getOperand(0);
33696 assert(VT.getScalarType() == MVT::i8 &&
33697 "Only byte vector BITREVERSE supported");
33699 // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
33700 if (VT == MVT::v64i8 && !Subtarget.hasBWI())
33701 return splitVectorIntUnary(Op, DAG);
33703 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
33704 if (VT == MVT::v32i8 && !Subtarget.hasInt256())
33705 return splitVectorIntUnary(Op, DAG);
33707 unsigned NumElts = VT.getVectorNumElements();
33709 // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
33710 if (Subtarget.hasGFNI()) {
33711 MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
33712 SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
33713 Matrix = DAG.getBitcast(VT, Matrix);
33714 return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
33715 DAG.getTargetConstant(0, DL, MVT::i8));
33718 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
33719 // two nibbles and a PSHUFB lookup to find the bitreverse of each
33720 // 0-15 value (moved to the other nibble).
33721 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
33722 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
33723 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
33725 const int LoLUT[16] = {
33726 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
33727 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
33728 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
33729 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
33730 const int HiLUT[16] = {
33731 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
33732 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
33733 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
33734 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
33736 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
33737 for (unsigned i = 0; i < NumElts; ++i) {
33738 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
33739 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
33742 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
33743 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
33744 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
33745 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
33746 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
33749 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
33750 SelectionDAG &DAG) {
33752 SDValue X = Op.getOperand(0);
33753 MVT VT = Op.getSimpleValueType();
33755 // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
33756 if (VT == MVT::i8 ||
33757 DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
33758 X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
33759 SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
33760 DAG.getConstant(0, DL, MVT::i8));
33761 // Copy the inverse of the parity flag into a register with setcc.
33762 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
33763 // Extend to the original type.
33764 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
33767 // If we have POPCNT, use the default expansion.
33768 if (Subtarget.hasPOPCNT())
33771 if (VT == MVT::i64) {
33772 // Xor the high and low 16-bits together using a 32-bit operation.
33773 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
33774 DAG.getNode(ISD::SRL, DL, MVT::i64, X,
33775 DAG.getConstant(32, DL, MVT::i8)));
33776 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
33777 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
33780 if (VT != MVT::i16) {
33781 // Xor the high and low 16-bits together using a 32-bit operation.
33782 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
33783 DAG.getConstant(16, DL, MVT::i8));
33784 X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
33786 // If the input is 16-bits, we need to extend to use an i32 shift below.
33787 X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
33790 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
33791 // This should allow an h-reg to be used to save a shift.
33792 SDValue Hi = DAG.getNode(
33793 ISD::TRUNCATE, DL, MVT::i8,
33794 DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
33795 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
33796 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
33797 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
33799 // Copy the inverse of the parity flag into a register with setcc.
33800 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
33801 // Extend to the original type.
33802 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
33805 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
33806 const X86Subtarget &Subtarget) {
33807 unsigned NewOpc = 0;
33808 switch (N->getOpcode()) {
33809 case ISD::ATOMIC_LOAD_ADD:
33810 NewOpc = X86ISD::LADD;
33812 case ISD::ATOMIC_LOAD_SUB:
33813 NewOpc = X86ISD::LSUB;
33815 case ISD::ATOMIC_LOAD_OR:
33816 NewOpc = X86ISD::LOR;
33818 case ISD::ATOMIC_LOAD_XOR:
33819 NewOpc = X86ISD::LXOR;
33821 case ISD::ATOMIC_LOAD_AND:
33822 NewOpc = X86ISD::LAND;
33825 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
33828 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
33830 return DAG.getMemIntrinsicNode(
33831 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
33832 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
33833 /*MemVT=*/N->getSimpleValueType(0), MMO);
33836 /// Lower atomic_load_ops into LOCK-prefixed operations.
33837 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
33838 const X86Subtarget &Subtarget) {
33839 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
33840 SDValue Chain = N->getOperand(0);
33841 SDValue LHS = N->getOperand(1);
33842 SDValue RHS = N->getOperand(2);
33843 unsigned Opc = N->getOpcode();
33844 MVT VT = N->getSimpleValueType(0);
33847 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
33848 // can only be lowered when the result is unused. They should have already
33849 // been transformed into a cmpxchg loop in AtomicExpand.
33850 if (N->hasAnyUseOfValue(0)) {
33851 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
33852 // select LXADD if LOCK_SUB can't be selected.
33853 // Handle (atomic_load_xor p, SignBit) as (atomic_load_add p, SignBit) so we
33854 // can use LXADD as opposed to cmpxchg.
33855 if (Opc == ISD::ATOMIC_LOAD_SUB ||
33856 (Opc == ISD::ATOMIC_LOAD_XOR && isMinSignedConstant(RHS))) {
33857 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
33858 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS, RHS,
33859 AN->getMemOperand());
33861 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
33862 "Used AtomicRMW ops other than Add should have been expanded!");
33866 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
33867 // The core idea here is that since the memory location isn't actually
33868 // changing, all we need is a lowering for the *ordering* impacts of the
33869 // atomicrmw. As such, we can chose a different operation and memory
33870 // location to minimize impact on other code.
33871 // The above holds unless the node is marked volatile in which
33872 // case it needs to be preserved according to the langref.
33873 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS) && !AN->isVolatile()) {
33874 // On X86, the only ordering which actually requires an instruction is
33875 // seq_cst which isn't SingleThread, everything just needs to be preserved
33876 // during codegen and then dropped. Note that we expect (but don't assume),
33877 // that orderings other than seq_cst and acq_rel have been canonicalized to
33878 // a store or load.
33879 if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
33880 AN->getSyncScopeID() == SyncScope::System) {
33881 // Prefer a locked operation against a stack location to minimize cache
33882 // traffic. This assumes that stack locations are very likely to be
33883 // accessed only by the owning thread.
33884 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
33885 assert(!N->hasAnyUseOfValue(0));
33886 // NOTE: The getUNDEF is needed to give something for the unused result 0.
33887 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
33888 DAG.getUNDEF(VT), NewChain);
33890 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
33891 SDValue NewChain = DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Chain);
33892 assert(!N->hasAnyUseOfValue(0));
33893 // NOTE: The getUNDEF is needed to give something for the unused result 0.
33894 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
33895 DAG.getUNDEF(VT), NewChain);
33898 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
33899 // RAUW the chain, but don't worry about the result, as it's unused.
33900 assert(!N->hasAnyUseOfValue(0));
33901 // NOTE: The getUNDEF is needed to give something for the unused result 0.
33902 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
33903 DAG.getUNDEF(VT), LockOp.getValue(1));
33906 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
33907 const X86Subtarget &Subtarget) {
33908 auto *Node = cast<AtomicSDNode>(Op.getNode());
33910 EVT VT = Node->getMemoryVT();
33913 Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
33914 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
33916 // If this store is not sequentially consistent and the type is legal
33917 // we can just keep it.
33918 if (!IsSeqCst && IsTypeLegal)
33921 if (VT == MVT::i64 && !IsTypeLegal) {
33922 // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
33924 bool NoImplicitFloatOps =
33925 DAG.getMachineFunction().getFunction().hasFnAttribute(
33926 Attribute::NoImplicitFloat);
33927 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
33929 if (Subtarget.hasSSE1()) {
33930 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
33931 Node->getOperand(2));
33932 MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
33933 SclToVec = DAG.getBitcast(StVT, SclToVec);
33934 SDVTList Tys = DAG.getVTList(MVT::Other);
33935 SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
33936 Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
33937 MVT::i64, Node->getMemOperand());
33938 } else if (Subtarget.hasX87()) {
33939 // First load this into an 80-bit X87 register using a stack temporary.
33940 // This will put the whole integer into the significand.
33941 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
33942 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
33943 MachinePointerInfo MPI =
33944 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
33946 DAG.getStore(Node->getChain(), dl, Node->getOperand(2), StackPtr,
33947 MPI, MaybeAlign(), MachineMemOperand::MOStore);
33948 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
33949 SDValue LdOps[] = {Chain, StackPtr};
33950 SDValue Value = DAG.getMemIntrinsicNode(
33951 X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
33952 /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
33953 Chain = Value.getValue(1);
33955 // Now use an FIST to do the atomic store.
33956 SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
33958 DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
33959 StoreOps, MVT::i64, Node->getMemOperand());
33963 // If this is a sequentially consistent store, also emit an appropriate
33966 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
33973 // Convert seq_cst store -> xchg
33974 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
33975 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
33976 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
33977 Node->getMemoryVT(),
33978 Node->getOperand(0),
33979 Node->getOperand(1), Node->getOperand(2),
33980 Node->getMemOperand());
33981 return Swap.getValue(1);
33984 static SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {
33985 SDNode *N = Op.getNode();
33986 MVT VT = N->getSimpleValueType(0);
33987 unsigned Opc = Op.getOpcode();
33989 // Let legalize expand this if it isn't a legal type yet.
33990 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
33993 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
33996 // Set the carry flag.
33997 SDValue Carry = Op.getOperand(2);
33998 EVT CarryVT = Carry.getValueType();
33999 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
34000 Carry, DAG.getAllOnesConstant(DL, CarryVT));
34002 bool IsAdd = Opc == ISD::UADDO_CARRY || Opc == ISD::SADDO_CARRY;
34003 SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
34004 Op.getOperand(0), Op.getOperand(1),
34005 Carry.getValue(1));
34007 bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
34008 SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
34009 Sum.getValue(1), DL, DAG);
34010 if (N->getValueType(1) == MVT::i1)
34011 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
34013 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
34016 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
34017 SelectionDAG &DAG) {
34018 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
34020 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
34021 // which returns the values as { float, float } (in XMM0) or
34022 // { double, double } (which is returned in XMM0, XMM1).
34024 SDValue Arg = Op.getOperand(0);
34025 EVT ArgVT = Arg.getValueType();
34026 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
34028 TargetLowering::ArgListTy Args;
34029 TargetLowering::ArgListEntry Entry;
34033 Entry.IsSExt = false;
34034 Entry.IsZExt = false;
34035 Args.push_back(Entry);
34037 bool isF64 = ArgVT == MVT::f64;
34038 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
34039 // the small struct {f32, f32} is returned in (eax, edx). For f64,
34040 // the results are returned via SRet in memory.
34041 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34042 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
34043 const char *LibcallName = TLI.getLibcallName(LC);
34045 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
34047 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
34048 : (Type *)FixedVectorType::get(ArgTy, 4);
34050 TargetLowering::CallLoweringInfo CLI(DAG);
34051 CLI.setDebugLoc(dl)
34052 .setChain(DAG.getEntryNode())
34053 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
34055 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
34058 // Returned in xmm0 and xmm1.
34059 return CallResult.first;
34061 // Returned in bits 0:31 and 32:64 xmm0.
34062 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
34063 CallResult.first, DAG.getIntPtrConstant(0, dl));
34064 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
34065 CallResult.first, DAG.getIntPtrConstant(1, dl));
34066 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
34067 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
34070 /// Widen a vector input to a vector of NVT. The
34071 /// input vector must have the same element type as NVT.
34072 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
34073 bool FillWithZeroes = false) {
34074 // Check if InOp already has the right width.
34075 MVT InVT = InOp.getSimpleValueType();
34079 if (InOp.isUndef())
34080 return DAG.getUNDEF(NVT);
34082 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
34083 "input and widen element type must match");
34085 unsigned InNumElts = InVT.getVectorNumElements();
34086 unsigned WidenNumElts = NVT.getVectorNumElements();
34087 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
34088 "Unexpected request for vector widening");
34091 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
34092 InOp.getNumOperands() == 2) {
34093 SDValue N1 = InOp.getOperand(1);
34094 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
34096 InOp = InOp.getOperand(0);
34097 InVT = InOp.getSimpleValueType();
34098 InNumElts = InVT.getVectorNumElements();
34101 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
34102 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
34103 SmallVector<SDValue, 16> Ops;
34104 for (unsigned i = 0; i < InNumElts; ++i)
34105 Ops.push_back(InOp.getOperand(i));
34107 EVT EltVT = InOp.getOperand(0).getValueType();
34109 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
34110 DAG.getUNDEF(EltVT);
34111 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
34112 Ops.push_back(FillVal);
34113 return DAG.getBuildVector(NVT, dl, Ops);
34115 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
34117 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
34118 InOp, DAG.getIntPtrConstant(0, dl));
34121 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
34122 SelectionDAG &DAG) {
34123 assert(Subtarget.hasAVX512() &&
34124 "MGATHER/MSCATTER are supported on AVX-512 arch only");
34126 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
34127 SDValue Src = N->getValue();
34128 MVT VT = Src.getSimpleValueType();
34129 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
34132 SDValue Scale = N->getScale();
34133 SDValue Index = N->getIndex();
34134 SDValue Mask = N->getMask();
34135 SDValue Chain = N->getChain();
34136 SDValue BasePtr = N->getBasePtr();
34138 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
34139 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
34140 // If the index is v2i64 and we have VLX we can use xmm for data and index.
34141 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
34142 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34143 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
34144 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
34145 SDVTList VTs = DAG.getVTList(MVT::Other);
34146 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
34147 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
34148 N->getMemoryVT(), N->getMemOperand());
34153 MVT IndexVT = Index.getSimpleValueType();
34155 // If the index is v2i32, we're being called by type legalization and we
34156 // should just let the default handling take care of it.
34157 if (IndexVT == MVT::v2i32)
34160 // If we don't have VLX and neither the passthru or index is 512-bits, we
34161 // need to widen until one is.
34162 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
34163 !Index.getSimpleValueType().is512BitVector()) {
34164 // Determine how much we need to widen by to get a 512-bit type.
34165 unsigned Factor = std::min(512/VT.getSizeInBits(),
34166 512/IndexVT.getSizeInBits());
34167 unsigned NumElts = VT.getVectorNumElements() * Factor;
34169 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
34170 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
34171 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
34173 Src = ExtendToType(Src, VT, DAG);
34174 Index = ExtendToType(Index, IndexVT, DAG);
34175 Mask = ExtendToType(Mask, MaskVT, DAG, true);
34178 SDVTList VTs = DAG.getVTList(MVT::Other);
34179 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
34180 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
34181 N->getMemoryVT(), N->getMemOperand());
34184 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
34185 SelectionDAG &DAG) {
34187 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
34188 MVT VT = Op.getSimpleValueType();
34189 MVT ScalarVT = VT.getScalarType();
34190 SDValue Mask = N->getMask();
34191 MVT MaskVT = Mask.getSimpleValueType();
34192 SDValue PassThru = N->getPassThru();
34195 // Handle AVX masked loads which don't support passthru other than 0.
34196 if (MaskVT.getVectorElementType() != MVT::i1) {
34197 // We also allow undef in the isel pattern.
34198 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
34201 SDValue NewLoad = DAG.getMaskedLoad(
34202 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
34203 getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
34204 N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
34205 N->isExpandingLoad());
34207 SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
34208 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
34211 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
34212 "Expanding masked load is supported on AVX-512 target only!");
34214 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
34215 "Expanding masked load is supported for 32 and 64-bit types only!");
34217 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
34218 "Cannot lower masked load op.");
34220 assert((ScalarVT.getSizeInBits() >= 32 ||
34221 (Subtarget.hasBWI() &&
34222 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
34223 "Unsupported masked load op.");
34225 // This operation is legal for targets with VLX, but without
34226 // VLX the vector should be widened to 512 bit
34227 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
34228 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
34229 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
34231 // Mask element has to be i1.
34232 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
34233 "Unexpected mask type");
34235 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
34237 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
34238 SDValue NewLoad = DAG.getMaskedLoad(
34239 WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
34240 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
34241 N->getExtensionType(), N->isExpandingLoad());
34244 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
34245 DAG.getIntPtrConstant(0, dl));
34246 SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
34247 return DAG.getMergeValues(RetOps, dl);
34250 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
34251 SelectionDAG &DAG) {
34252 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
34253 SDValue DataToStore = N->getValue();
34254 MVT VT = DataToStore.getSimpleValueType();
34255 MVT ScalarVT = VT.getScalarType();
34256 SDValue Mask = N->getMask();
34259 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
34260 "Expanding masked load is supported on AVX-512 target only!");
34262 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
34263 "Expanding masked load is supported for 32 and 64-bit types only!");
34265 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
34266 "Cannot lower masked store op.");
34268 assert((ScalarVT.getSizeInBits() >= 32 ||
34269 (Subtarget.hasBWI() &&
34270 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
34271 "Unsupported masked store op.");
34273 // This operation is legal for targets with VLX, but without
34274 // VLX the vector should be widened to 512 bit
34275 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
34276 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
34278 // Mask element has to be i1.
34279 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
34280 "Unexpected mask type");
34282 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
34284 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
34285 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
34286 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
34287 N->getOffset(), Mask, N->getMemoryVT(),
34288 N->getMemOperand(), N->getAddressingMode(),
34289 N->isTruncatingStore(), N->isCompressingStore());
34292 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
34293 SelectionDAG &DAG) {
34294 assert(Subtarget.hasAVX2() &&
34295 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
34297 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
34299 MVT VT = Op.getSimpleValueType();
34300 SDValue Index = N->getIndex();
34301 SDValue Mask = N->getMask();
34302 SDValue PassThru = N->getPassThru();
34303 MVT IndexVT = Index.getSimpleValueType();
34305 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
34307 // If the index is v2i32, we're being called by type legalization.
34308 if (IndexVT == MVT::v2i32)
34311 // If we don't have VLX and neither the passthru or index is 512-bits, we
34312 // need to widen until one is.
34314 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
34315 !IndexVT.is512BitVector()) {
34316 // Determine how much we need to widen by to get a 512-bit type.
34317 unsigned Factor = std::min(512/VT.getSizeInBits(),
34318 512/IndexVT.getSizeInBits());
34320 unsigned NumElts = VT.getVectorNumElements() * Factor;
34322 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
34323 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
34324 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
34326 PassThru = ExtendToType(PassThru, VT, DAG);
34327 Index = ExtendToType(Index, IndexVT, DAG);
34328 Mask = ExtendToType(Mask, MaskVT, DAG, true);
34331 // Break dependency on the data register.
34332 if (PassThru.isUndef())
34333 PassThru = getZeroVector(VT, Subtarget, DAG, dl);
34335 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
34337 SDValue NewGather = DAG.getMemIntrinsicNode(
34338 X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
34339 N->getMemOperand());
34340 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
34341 NewGather, DAG.getIntPtrConstant(0, dl));
34342 return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
34345 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
34347 SDValue Src = Op.getOperand(0);
34348 MVT DstVT = Op.getSimpleValueType();
34350 AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
34351 unsigned SrcAS = N->getSrcAddressSpace();
34353 assert(SrcAS != N->getDestAddressSpace() &&
34354 "addrspacecast must be between different address spaces");
34356 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
34357 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
34358 } else if (DstVT == MVT::i64) {
34359 Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
34360 } else if (DstVT == MVT::i32) {
34361 Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
34363 report_fatal_error("Bad address space in addrspacecast");
34368 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
34369 SelectionDAG &DAG) const {
34370 // TODO: Eventually, the lowering of these nodes should be informed by or
34371 // deferred to the GC strategy for the function in which they appear. For
34372 // now, however, they must be lowered to something. Since they are logically
34373 // no-ops in the case of a null GC strategy (or a GC strategy which does not
34374 // require special handling for these nodes), lower them as literal NOOPs for
34376 SmallVector<SDValue, 2> Ops;
34377 Ops.push_back(Op.getOperand(0));
34378 if (Op->getGluedNode())
34379 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
34381 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
34382 return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
34385 // Custom split CVTPS2PH with wide types.
34386 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
34388 EVT VT = Op.getValueType();
34390 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
34392 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
34393 SDValue RC = Op.getOperand(1);
34394 Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
34395 Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
34396 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
34399 static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
34400 SelectionDAG &DAG) {
34401 unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
34403 // We don't support non-data prefetch without PREFETCHI.
34404 // Just preserve the chain.
34405 if (!IsData && !Subtarget.hasPREFETCHI())
34406 return Op.getOperand(0);
34411 static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
34413 const APInt Operand(32, OpNo);
34414 std::string OpNoStr = llvm::toString(Operand, 10, false);
34415 std::string Str(" $");
34417 std::string OpNoStr1(Str + OpNoStr); // e.g. " $1" (OpNo=1)
34418 std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
34420 auto I = StringRef::npos;
34421 for (auto &AsmStr : AsmStrs) {
34422 // Match the OpNo string. We should match exactly to exclude match
34423 // sub-string, e.g. "$12" contain "$1"
34424 if (AsmStr.endswith(OpNoStr1))
34425 I = AsmStr.size() - OpNoStr1.size();
34427 // Get the index of operand in AsmStr.
34428 if (I == StringRef::npos)
34429 I = AsmStr.find(OpNoStr1 + ",");
34430 if (I == StringRef::npos)
34431 I = AsmStr.find(OpNoStr2);
34433 if (I == StringRef::npos)
34436 assert(I > 0 && "Unexpected inline asm string!");
34437 // Remove the operand string and label (if exsit).
34439 // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
34441 // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
34443 // "call dword ptr "
34444 auto TmpStr = AsmStr.substr(0, I);
34445 I = TmpStr.rfind(':');
34446 if (I != StringRef::npos)
34447 TmpStr = TmpStr.substr(I + 1);
34448 return TmpStr.take_while(llvm::isAlpha);
34451 return StringRef();
34454 bool X86TargetLowering::isInlineAsmTargetBranch(
34455 const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
34456 // In a __asm block, __asm inst foo where inst is CALL or JMP should be
34457 // changed from indirect TargetLowering::C_Memory to direct
34458 // TargetLowering::C_Address.
34459 // We don't need to special case LOOP* and Jcc, which cannot target a memory
34461 StringRef Inst = getInstrStrFromOpNo(AsmStrs, OpNo);
34462 return Inst.equals_insensitive("call") || Inst.equals_insensitive("jmp");
34465 /// Provide custom lowering hooks for some operations.
34466 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
34467 switch (Op.getOpcode()) {
34468 default: llvm_unreachable("Should not custom lower this!");
34469 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
34470 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
34471 return LowerCMP_SWAP(Op, Subtarget, DAG);
34472 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
34473 case ISD::ATOMIC_LOAD_ADD:
34474 case ISD::ATOMIC_LOAD_SUB:
34475 case ISD::ATOMIC_LOAD_OR:
34476 case ISD::ATOMIC_LOAD_XOR:
34477 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
34478 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
34479 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
34480 case ISD::PARITY: return LowerPARITY(Op, Subtarget, DAG);
34481 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
34482 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
34483 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
34484 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
34485 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
34486 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
34487 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
34488 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
34489 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
34490 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
34491 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
34492 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
34493 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
34494 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
34495 case ISD::SHL_PARTS:
34496 case ISD::SRA_PARTS:
34497 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
34499 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
34500 case ISD::STRICT_SINT_TO_FP:
34501 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
34502 case ISD::STRICT_UINT_TO_FP:
34503 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
34504 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
34505 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
34506 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
34507 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
34508 case ISD::ZERO_EXTEND_VECTOR_INREG:
34509 case ISD::SIGN_EXTEND_VECTOR_INREG:
34510 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
34511 case ISD::FP_TO_SINT:
34512 case ISD::STRICT_FP_TO_SINT:
34513 case ISD::FP_TO_UINT:
34514 case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
34515 case ISD::FP_TO_SINT_SAT:
34516 case ISD::FP_TO_UINT_SAT: return LowerFP_TO_INT_SAT(Op, DAG);
34517 case ISD::FP_EXTEND:
34518 case ISD::STRICT_FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
34519 case ISD::FP_ROUND:
34520 case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG);
34521 case ISD::FP16_TO_FP:
34522 case ISD::STRICT_FP16_TO_FP: return LowerFP16_TO_FP(Op, DAG);
34523 case ISD::FP_TO_FP16:
34524 case ISD::STRICT_FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
34525 case ISD::FP_TO_BF16: return LowerFP_TO_BF16(Op, DAG);
34526 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
34527 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
34529 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
34530 case ISD::FROUND: return LowerFROUND(Op, DAG);
34532 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
34533 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
34534 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
34536 case ISD::LLRINT: return LowerLRINT_LLRINT(Op, DAG);
34538 case ISD::STRICT_FSETCC:
34539 case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG);
34540 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
34541 case ISD::SELECT: return LowerSELECT(Op, DAG);
34542 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
34543 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
34544 case ISD::VASTART: return LowerVASTART(Op, DAG);
34545 case ISD::VAARG: return LowerVAARG(Op, DAG);
34546 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
34547 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
34548 case ISD::INTRINSIC_VOID:
34549 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
34550 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
34551 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
34552 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
34553 case ISD::FRAME_TO_ARGS_OFFSET:
34554 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
34555 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
34556 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
34557 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
34558 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
34559 case ISD::EH_SJLJ_SETUP_DISPATCH:
34560 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
34561 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
34562 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
34563 case ISD::GET_ROUNDING: return LowerGET_ROUNDING(Op, DAG);
34564 case ISD::SET_ROUNDING: return LowerSET_ROUNDING(Op, DAG);
34565 case ISD::GET_FPENV_MEM: return LowerGET_FPENV_MEM(Op, DAG);
34566 case ISD::SET_FPENV_MEM: return LowerSET_FPENV_MEM(Op, DAG);
34567 case ISD::RESET_FPENV: return LowerRESET_FPENV(Op, DAG);
34569 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
34571 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
34572 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
34574 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
34576 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
34579 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
34583 case ISD::USUBO: return LowerXALUO(Op, DAG);
34585 case ISD::UMULO: return LowerMULO(Op, Subtarget, DAG);
34586 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
34587 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
34588 case ISD::SADDO_CARRY:
34589 case ISD::SSUBO_CARRY:
34590 case ISD::UADDO_CARRY:
34591 case ISD::USUBO_CARRY: return LowerADDSUBO_CARRY(Op, DAG);
34593 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
34597 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
34601 case ISD::UMIN: return LowerMINMAX(Op, Subtarget, DAG);
34602 case ISD::FMINIMUM:
34603 case ISD::FMAXIMUM:
34604 return LowerFMINIMUM_FMAXIMUM(Op, Subtarget, DAG);
34605 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
34607 case ISD::ABDU: return LowerABD(Op, Subtarget, DAG);
34608 case ISD::AVGCEILU: return LowerAVG(Op, Subtarget, DAG);
34609 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
34610 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
34611 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
34612 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
34613 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
34614 case ISD::GC_TRANSITION_START:
34615 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION(Op, DAG);
34616 case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG);
34617 case X86ISD::CVTPS2PH: return LowerCVTPS2PH(Op, DAG);
34618 case ISD::PREFETCH: return LowerPREFETCH(Op, Subtarget, DAG);
34622 /// Replace a node with an illegal result type with a new node built out of
34624 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
34625 SmallVectorImpl<SDValue>&Results,
34626 SelectionDAG &DAG) const {
34628 switch (N->getOpcode()) {
34631 dbgs() << "ReplaceNodeResults: ";
34634 llvm_unreachable("Do not know how to custom type legalize this operation!");
34635 case X86ISD::CVTPH2PS: {
34636 EVT VT = N->getValueType(0);
34638 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
34640 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
34641 Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
34642 Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
34643 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
34644 Results.push_back(Res);
34647 case X86ISD::STRICT_CVTPH2PS: {
34648 EVT VT = N->getValueType(0);
34650 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
34652 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
34653 Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
34654 {N->getOperand(0), Lo});
34655 Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
34656 {N->getOperand(0), Hi});
34657 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
34658 Lo.getValue(1), Hi.getValue(1));
34659 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
34660 Results.push_back(Res);
34661 Results.push_back(Chain);
34664 case X86ISD::CVTPS2PH:
34665 Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
34668 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
34669 // Use a v2i64 if possible.
34670 bool NoImplicitFloatOps =
34671 DAG.getMachineFunction().getFunction().hasFnAttribute(
34672 Attribute::NoImplicitFloat);
34673 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
34675 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
34676 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
34677 // Bit count should fit in 32-bits, extract it as that and then zero
34678 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
34679 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
34680 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
34681 DAG.getIntPtrConstant(0, dl));
34682 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
34683 Results.push_back(Wide);
34688 EVT VT = N->getValueType(0);
34689 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34690 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
34691 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
34692 // elements are needed.
34693 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
34694 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
34695 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
34696 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
34697 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
34698 unsigned NumConcats = 16 / VT.getVectorNumElements();
34699 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
34700 ConcatOps[0] = Res;
34701 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
34702 Results.push_back(Res);
34707 EVT VT = N->getValueType(0);
34708 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34709 VT == MVT::v2i32 && "Unexpected VT!");
34710 bool IsSigned = N->getOpcode() == ISD::SMULO;
34711 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
34712 SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
34713 SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
34714 SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
34715 // Extract the high 32 bits from each result using PSHUFD.
34716 // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
34717 SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
34718 Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
34719 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
34720 DAG.getIntPtrConstant(0, dl));
34722 // Truncate the low bits of the result. This will become PSHUFD.
34723 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
34727 // SMULO overflows if the high bits don't match the sign of the low.
34728 HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
34730 // UMULO overflows if the high bits are non-zero.
34731 HiCmp = DAG.getConstant(0, dl, VT);
34733 SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
34735 // Widen the result with by padding with undef.
34736 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
34738 Results.push_back(Res);
34739 Results.push_back(Ovf);
34742 case X86ISD::VPMADDWD: {
34743 // Legalize types for X86ISD::VPMADDWD by widening.
34744 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
34746 EVT VT = N->getValueType(0);
34747 EVT InVT = N->getOperand(0).getValueType();
34748 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
34749 "Expected a VT that divides into 128 bits.");
34750 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34751 "Unexpected type action!");
34752 unsigned NumConcat = 128 / InVT.getSizeInBits();
34754 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
34755 InVT.getVectorElementType(),
34756 NumConcat * InVT.getVectorNumElements());
34757 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
34758 VT.getVectorElementType(),
34759 NumConcat * VT.getVectorNumElements());
34761 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
34762 Ops[0] = N->getOperand(0);
34763 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
34764 Ops[0] = N->getOperand(1);
34765 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
34767 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
34768 Results.push_back(Res);
34771 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
34772 case X86ISD::FMINC:
34774 case X86ISD::FMAXC:
34775 case X86ISD::FMAX: {
34776 EVT VT = N->getValueType(0);
34777 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
34778 SDValue UNDEF = DAG.getUNDEF(VT);
34779 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
34780 N->getOperand(0), UNDEF);
34781 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
34782 N->getOperand(1), UNDEF);
34783 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
34790 EVT VT = N->getValueType(0);
34791 if (VT.isVector()) {
34792 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34793 "Unexpected type action!");
34794 // If this RHS is a constant splat vector we can widen this and let
34795 // division/remainder by constant optimize it.
34796 // TODO: Can we do something for non-splat?
34798 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
34799 unsigned NumConcats = 128 / VT.getSizeInBits();
34800 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
34801 Ops0[0] = N->getOperand(0);
34802 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
34803 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
34804 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
34805 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
34806 Results.push_back(Res);
34811 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
34812 Results.push_back(V);
34815 case ISD::TRUNCATE: {
34816 MVT VT = N->getSimpleValueType(0);
34817 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
34820 // The generic legalizer will try to widen the input type to the same
34821 // number of elements as the widened result type. But this isn't always
34822 // the best thing so do some custom legalization to avoid some cases.
34823 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
34824 SDValue In = N->getOperand(0);
34825 EVT InVT = In.getValueType();
34826 EVT InEltVT = InVT.getVectorElementType();
34827 EVT EltVT = VT.getVectorElementType();
34828 unsigned WidenNumElts = WidenVT.getVectorNumElements();
34830 unsigned InBits = InVT.getSizeInBits();
34831 if (128 % InBits == 0) {
34832 // 128 bit and smaller inputs should avoid truncate all together and
34833 // just use a build_vector that will become a shuffle.
34834 // TODO: Widen and use a shuffle directly?
34835 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
34836 // Use the original element count so we don't do more scalar opts than
34838 unsigned MinElts = VT.getVectorNumElements();
34839 for (unsigned i=0; i < MinElts; ++i) {
34840 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
34841 DAG.getIntPtrConstant(i, dl));
34842 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
34844 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
34847 // With AVX512 there are some cases that can use a target specific
34848 // truncate node to go from 256/512 to less than 128 with zeros in the
34849 // upper elements of the 128 bit result.
34850 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
34851 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
34852 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
34853 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
34856 // There's one case we can widen to 512 bits and use VTRUNC.
34857 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
34858 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
34859 DAG.getUNDEF(MVT::v4i64));
34860 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
34864 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
34865 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
34866 isTypeLegal(MVT::v4i64)) {
34867 // Input needs to be split and output needs to widened. Let's use two
34868 // VTRUNCs, and shuffle their results together into the wider type.
34870 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
34872 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
34873 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
34874 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
34875 { 0, 1, 2, 3, 16, 17, 18, 19,
34876 -1, -1, -1, -1, -1, -1, -1, -1 });
34877 Results.push_back(Res);
34881 // Attempt to widen the truncation input vector to let LowerTRUNCATE handle
34882 // this via type legalization.
34883 if ((InEltVT == MVT::i16 || InEltVT == MVT::i32 || InEltVT == MVT::i64) &&
34884 (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32) &&
34885 (!Subtarget.hasSSSE3() || (InVT == MVT::v8i64 && VT == MVT::v8i8) ||
34886 (InVT == MVT::v4i64 && VT == MVT::v4i16 && !Subtarget.hasAVX()))) {
34887 SDValue WidenIn = widenSubVector(In, false, Subtarget, DAG, dl,
34888 InEltVT.getSizeInBits() * WidenNumElts);
34889 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, WidenVT, WidenIn));
34895 case ISD::ANY_EXTEND:
34896 // Right now, only MVT::v8i8 has Custom action for an illegal type.
34897 // It's intended to custom handle the input type.
34898 assert(N->getValueType(0) == MVT::v8i8 &&
34899 "Do not know how to legalize this Node");
34901 case ISD::SIGN_EXTEND:
34902 case ISD::ZERO_EXTEND: {
34903 EVT VT = N->getValueType(0);
34904 SDValue In = N->getOperand(0);
34905 EVT InVT = In.getValueType();
34906 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
34907 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
34908 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
34909 "Unexpected type action!");
34910 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
34911 // Custom split this so we can extend i8/i16->i32 invec. This is better
34912 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
34913 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
34914 // we allow the sra from the extend to i32 to be shared by the split.
34915 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
34917 // Fill a vector with sign bits for each element.
34918 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
34919 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
34921 // Create an unpackl and unpackh to interleave the sign bits then bitcast
34923 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
34925 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
34926 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
34928 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
34930 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
34931 Results.push_back(Res);
34935 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
34936 if (!InVT.is128BitVector()) {
34937 // Not a 128 bit vector, but maybe type legalization will promote
34939 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
34941 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
34942 if (!InVT.is128BitVector())
34945 // Promote the input to 128 bits. Type legalization will turn this into
34946 // zext_inreg/sext_inreg.
34947 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
34950 // Perform custom splitting instead of the two stage extend we would get
34953 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
34954 assert(isTypeLegal(LoVT) && "Split VT not legal?");
34956 SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
34958 // We need to shift the input over by half the number of elements.
34959 unsigned NumElts = InVT.getVectorNumElements();
34960 unsigned HalfNumElts = NumElts / 2;
34961 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
34962 for (unsigned i = 0; i != HalfNumElts; ++i)
34963 ShufMask[i] = i + HalfNumElts;
34965 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
34966 Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
34968 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
34969 Results.push_back(Res);
34973 case ISD::FP_TO_SINT:
34974 case ISD::STRICT_FP_TO_SINT:
34975 case ISD::FP_TO_UINT:
34976 case ISD::STRICT_FP_TO_UINT: {
34977 bool IsStrict = N->isStrictFPOpcode();
34978 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
34979 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
34980 EVT VT = N->getValueType(0);
34981 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
34982 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
34983 EVT SrcVT = Src.getValueType();
34986 if (isSoftFP16(SrcVT)) {
34987 EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
34990 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
34991 {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
34992 {NVT, MVT::Other}, {Chain, Src})});
34993 Chain = Res.getValue(1);
34995 Res = DAG.getNode(N->getOpcode(), dl, VT,
34996 DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
34998 Results.push_back(Res);
35000 Results.push_back(Chain);
35005 if (VT.isVector() && Subtarget.hasFP16() &&
35006 SrcVT.getVectorElementType() == MVT::f16) {
35007 EVT EleVT = VT.getVectorElementType();
35008 EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
35010 if (SrcVT != MVT::v8f16) {
35012 IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
35013 SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
35015 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
35020 IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
35022 DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
35023 Chain = Res.getValue(1);
35025 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
35026 Res = DAG.getNode(Opc, dl, ResVT, Src);
35029 // TODO: Need to add exception check code for strict FP.
35030 if (EleVT.getSizeInBits() < 16) {
35031 MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
35032 Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
35034 // Now widen to 128 bits.
35035 unsigned NumConcats = 128 / TmpVT.getSizeInBits();
35036 MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
35037 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
35038 ConcatOps[0] = Res;
35039 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
35042 Results.push_back(Res);
35044 Results.push_back(Chain);
35049 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
35050 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
35051 "Unexpected type action!");
35053 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
35054 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
35055 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
35056 VT.getVectorNumElements());
35060 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
35061 {N->getOperand(0), Src});
35062 Chain = Res.getValue(1);
35064 Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
35066 // Preserve what we know about the size of the original result. If the
35067 // result is v2i32, we have to manually widen the assert.
35068 if (PromoteVT == MVT::v2i32)
35069 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
35070 DAG.getUNDEF(MVT::v2i32));
35072 Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
35073 Res.getValueType(), Res,
35074 DAG.getValueType(VT.getVectorElementType()));
35076 if (PromoteVT == MVT::v2i32)
35077 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
35078 DAG.getIntPtrConstant(0, dl));
35080 // Truncate back to the original width.
35081 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
35083 // Now widen to 128 bits.
35084 unsigned NumConcats = 128 / VT.getSizeInBits();
35085 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
35086 VT.getVectorNumElements() * NumConcats);
35087 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
35088 ConcatOps[0] = Res;
35089 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
35090 Results.push_back(Res);
35092 Results.push_back(Chain);
35097 if (VT == MVT::v2i32) {
35098 assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
35099 "Strict unsigned conversion requires AVX512");
35100 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
35101 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
35102 "Unexpected type action!");
35103 if (Src.getValueType() == MVT::v2f64) {
35104 if (!IsSigned && !Subtarget.hasAVX512()) {
35106 expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
35107 Results.push_back(Res);
35113 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
35115 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
35117 // If we have VLX we can emit a target specific FP_TO_UINT node,.
35118 if (!IsSigned && !Subtarget.hasVLX()) {
35119 // Otherwise we can defer to the generic legalizer which will widen
35120 // the input as well. This will be further widened during op
35121 // legalization to v8i32<-v8f64.
35122 // For strict nodes we'll need to widen ourselves.
35123 // FIXME: Fix the type legalizer to safely widen strict nodes?
35126 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
35127 DAG.getConstantFP(0.0, dl, MVT::v2f64));
35128 Opc = N->getOpcode();
35133 Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
35134 {N->getOperand(0), Src});
35135 Chain = Res.getValue(1);
35137 Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
35139 Results.push_back(Res);
35141 Results.push_back(Chain);
35145 // Custom widen strict v2f32->v2i32 by padding with zeros.
35146 // FIXME: Should generic type legalizer do this?
35147 if (Src.getValueType() == MVT::v2f32 && IsStrict) {
35148 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
35149 DAG.getConstantFP(0.0, dl, MVT::v2f32));
35150 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
35151 {N->getOperand(0), Src});
35152 Results.push_back(Res);
35153 Results.push_back(Res.getValue(1));
35157 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
35158 // so early out here.
35162 assert(!VT.isVector() && "Vectors should have been handled above!");
35164 if ((Subtarget.hasDQI() && VT == MVT::i64 &&
35165 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
35166 (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
35167 assert(!Subtarget.is64Bit() && "i64 should be legal");
35168 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
35169 // If we use a 128-bit result we might need to use a target specific node.
35171 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
35172 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
35173 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
35174 unsigned Opc = N->getOpcode();
35175 if (NumElts != SrcElts) {
35177 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
35179 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
35182 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
35183 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
35184 DAG.getConstantFP(0.0, dl, VecInVT), Src,
35188 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
35189 Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
35190 Chain = Res.getValue(1);
35192 Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
35193 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
35194 Results.push_back(Res);
35196 Results.push_back(Chain);
35200 if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
35202 SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
35203 Results.push_back(V);
35205 Results.push_back(Chain);
35209 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
35210 Results.push_back(V);
35212 Results.push_back(Chain);
35217 case ISD::LLRINT: {
35218 if (SDValue V = LRINT_LLRINTHelper(N, DAG))
35219 Results.push_back(V);
35223 case ISD::SINT_TO_FP:
35224 case ISD::STRICT_SINT_TO_FP:
35225 case ISD::UINT_TO_FP:
35226 case ISD::STRICT_UINT_TO_FP: {
35227 bool IsStrict = N->isStrictFPOpcode();
35228 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
35229 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
35230 EVT VT = N->getValueType(0);
35231 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
35232 if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
35233 Subtarget.hasVLX()) {
35234 if (Src.getValueType().getVectorElementType() == MVT::i16)
35237 if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
35238 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
35239 IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
35240 : DAG.getUNDEF(MVT::v2i32));
35243 IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
35244 SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
35245 {N->getOperand(0), Src});
35246 Results.push_back(Res);
35247 Results.push_back(Res.getValue(1));
35249 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
35250 Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
35254 if (VT != MVT::v2f32)
35256 EVT SrcVT = Src.getValueType();
35257 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
35259 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
35260 : X86ISD::STRICT_CVTUI2P;
35261 SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
35262 {N->getOperand(0), Src});
35263 Results.push_back(Res);
35264 Results.push_back(Res.getValue(1));
35266 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
35267 Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
35271 if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
35272 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
35273 SDValue Zero = DAG.getConstant(0, dl, SrcVT);
35274 SDValue One = DAG.getConstant(1, dl, SrcVT);
35275 SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
35276 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
35277 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
35278 SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
35279 SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
35280 SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
35281 for (int i = 0; i != 2; ++i) {
35282 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
35283 SignSrc, DAG.getIntPtrConstant(i, dl));
35286 DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
35287 {N->getOperand(0), Elt});
35289 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
35291 SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
35292 SDValue Slow, Chain;
35294 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
35295 SignCvts[0].getValue(1), SignCvts[1].getValue(1));
35296 Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
35297 {Chain, SignCvt, SignCvt});
35298 Chain = Slow.getValue(1);
35300 Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
35302 IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
35304 DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
35305 SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
35306 Results.push_back(Cvt);
35308 Results.push_back(Chain);
35312 if (SrcVT != MVT::v2i32)
35315 if (IsSigned || Subtarget.hasAVX512()) {
35319 // Custom widen strict v2i32->v2f32 to avoid scalarization.
35320 // FIXME: Should generic type legalizer do this?
35321 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
35322 DAG.getConstant(0, dl, MVT::v2i32));
35323 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
35324 {N->getOperand(0), Src});
35325 Results.push_back(Res);
35326 Results.push_back(Res.getValue(1));
35330 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
35331 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
35332 SDValue VBias = DAG.getConstantFP(
35333 llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::v2f64);
35334 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
35335 DAG.getBitcast(MVT::v2i64, VBias));
35336 Or = DAG.getBitcast(MVT::v2f64, Or);
35338 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
35339 {N->getOperand(0), Or, VBias});
35340 SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
35341 {MVT::v4f32, MVT::Other},
35342 {Sub.getValue(1), Sub});
35343 Results.push_back(Res);
35344 Results.push_back(Res.getValue(1));
35346 // TODO: Are there any fast-math-flags to propagate here?
35347 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
35348 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
35352 case ISD::STRICT_FP_ROUND:
35353 case ISD::FP_ROUND: {
35354 bool IsStrict = N->isStrictFPOpcode();
35355 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
35356 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
35357 SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
35358 EVT SrcVT = Src.getValueType();
35359 EVT VT = N->getValueType(0);
35361 if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
35362 SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
35363 : DAG.getUNDEF(MVT::v2f32);
35364 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
35366 if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
35367 assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
35368 if (SrcVT.getVectorElementType() != MVT::f32)
35372 V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
35373 {Chain, Src, Rnd});
35375 V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
35377 Results.push_back(DAG.getBitcast(MVT::v8f16, V));
35379 Results.push_back(V.getValue(1));
35382 if (!isTypeLegal(Src.getValueType()))
35384 EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
35386 V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
35389 V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
35390 Results.push_back(V);
35392 Results.push_back(V.getValue(1));
35395 case ISD::FP_EXTEND:
35396 case ISD::STRICT_FP_EXTEND: {
35397 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
35398 // No other ValueType for FP_EXTEND should reach this point.
35399 assert(N->getValueType(0) == MVT::v2f32 &&
35400 "Do not know how to legalize this Node");
35401 if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
35403 bool IsStrict = N->isStrictFPOpcode();
35404 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
35405 SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
35406 : DAG.getUNDEF(MVT::v2f16);
35407 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
35409 V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
35410 {N->getOperand(0), V});
35412 V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
35413 Results.push_back(V);
35415 Results.push_back(V.getValue(1));
35418 case ISD::INTRINSIC_W_CHAIN: {
35419 unsigned IntNo = N->getConstantOperandVal(1);
35421 default : llvm_unreachable("Do not know how to custom type "
35422 "legalize this intrinsic operation!");
35423 case Intrinsic::x86_rdtsc:
35424 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
35426 case Intrinsic::x86_rdtscp:
35427 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
35429 case Intrinsic::x86_rdpmc:
35430 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
35433 case Intrinsic::x86_rdpru:
35434 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
35437 case Intrinsic::x86_xgetbv:
35438 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
35443 case ISD::READCYCLECOUNTER: {
35444 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
35446 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
35447 EVT T = N->getValueType(0);
35448 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
35449 bool Regs64bit = T == MVT::i128;
35450 assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
35451 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
35452 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
35453 SDValue cpInL, cpInH;
35454 std::tie(cpInL, cpInH) =
35455 DAG.SplitScalar(N->getOperand(2), dl, HalfT, HalfT);
35456 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
35457 Regs64bit ? X86::RAX : X86::EAX, cpInL, SDValue());
35459 DAG.getCopyToReg(cpInL.getValue(0), dl, Regs64bit ? X86::RDX : X86::EDX,
35460 cpInH, cpInL.getValue(1));
35461 SDValue swapInL, swapInH;
35462 std::tie(swapInL, swapInH) =
35463 DAG.SplitScalar(N->getOperand(3), dl, HalfT, HalfT);
35465 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
35466 swapInH, cpInH.getValue(1));
35468 // In 64-bit mode we might need the base pointer in RBX, but we can't know
35469 // until later. So we keep the RBX input in a vreg and use a custom
35471 // Since RBX will be a reserved register the register allocator will not
35472 // make sure its value will be properly saved and restored around this
35475 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
35476 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
35478 SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
35479 swapInH.getValue(1)};
35481 DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
35483 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
35484 swapInH.getValue(1));
35485 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
35486 swapInL.getValue(1)};
35488 DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
35491 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
35492 Regs64bit ? X86::RAX : X86::EAX,
35493 HalfT, Result.getValue(1));
35494 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
35495 Regs64bit ? X86::RDX : X86::EDX,
35496 HalfT, cpOutL.getValue(2));
35497 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
35499 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
35500 MVT::i32, cpOutH.getValue(2));
35501 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
35502 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
35504 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
35505 Results.push_back(Success);
35506 Results.push_back(EFLAGS.getValue(1));
35509 case ISD::ATOMIC_LOAD: {
35510 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
35511 bool NoImplicitFloatOps =
35512 DAG.getMachineFunction().getFunction().hasFnAttribute(
35513 Attribute::NoImplicitFloat);
35514 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
35515 auto *Node = cast<AtomicSDNode>(N);
35516 if (Subtarget.hasSSE1()) {
35517 // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
35518 // Then extract the lower 64-bits.
35519 MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
35520 SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
35521 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
35522 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
35523 MVT::i64, Node->getMemOperand());
35524 if (Subtarget.hasSSE2()) {
35525 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
35526 DAG.getIntPtrConstant(0, dl));
35527 Results.push_back(Res);
35528 Results.push_back(Ld.getValue(1));
35531 // We use an alternative sequence for SSE1 that extracts as v2f32 and
35532 // then casts to i64. This avoids a 128-bit stack temporary being
35533 // created by type legalization if we were to cast v4f32->v2i64.
35534 SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
35535 DAG.getIntPtrConstant(0, dl));
35536 Res = DAG.getBitcast(MVT::i64, Res);
35537 Results.push_back(Res);
35538 Results.push_back(Ld.getValue(1));
35541 if (Subtarget.hasX87()) {
35542 // First load this into an 80-bit X87 register. This will put the whole
35543 // integer into the significand.
35544 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
35545 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
35546 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
35547 dl, Tys, Ops, MVT::i64,
35548 Node->getMemOperand());
35549 SDValue Chain = Result.getValue(1);
35551 // Now store the X87 register to a stack temporary and convert to i64.
35552 // This store is not atomic and doesn't need to be.
35553 // FIXME: We don't need a stack temporary if the result of the load
35554 // is already being stored. We could just directly store there.
35555 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
35556 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
35557 MachinePointerInfo MPI =
35558 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
35559 SDValue StoreOps[] = { Chain, Result, StackPtr };
35560 Chain = DAG.getMemIntrinsicNode(
35561 X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
35562 MPI, std::nullopt /*Align*/, MachineMemOperand::MOStore);
35564 // Finally load the value back from the stack temporary and return it.
35565 // This load is not atomic and doesn't need to be.
35566 // This load will be further type legalized.
35567 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
35568 Results.push_back(Result);
35569 Results.push_back(Result.getValue(1));
35573 // TODO: Use MOVLPS when SSE1 is available?
35574 // Delegate to generic TypeLegalization. Situations we can really handle
35575 // should have already been dealt with by AtomicExpandPass.cpp.
35578 case ISD::ATOMIC_SWAP:
35579 case ISD::ATOMIC_LOAD_ADD:
35580 case ISD::ATOMIC_LOAD_SUB:
35581 case ISD::ATOMIC_LOAD_AND:
35582 case ISD::ATOMIC_LOAD_OR:
35583 case ISD::ATOMIC_LOAD_XOR:
35584 case ISD::ATOMIC_LOAD_NAND:
35585 case ISD::ATOMIC_LOAD_MIN:
35586 case ISD::ATOMIC_LOAD_MAX:
35587 case ISD::ATOMIC_LOAD_UMIN:
35588 case ISD::ATOMIC_LOAD_UMAX:
35589 // Delegate to generic TypeLegalization. Situations we can really handle
35590 // should have already been dealt with by AtomicExpandPass.cpp.
35593 case ISD::BITCAST: {
35594 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
35595 EVT DstVT = N->getValueType(0);
35596 EVT SrcVT = N->getOperand(0).getValueType();
35598 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
35599 // we can split using the k-register rather than memory.
35600 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
35601 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
35603 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
35604 Lo = DAG.getBitcast(MVT::i32, Lo);
35605 Hi = DAG.getBitcast(MVT::i32, Hi);
35606 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
35607 Results.push_back(Res);
35611 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
35612 // FIXME: Use v4f32 for SSE1?
35613 assert(Subtarget.hasSSE2() && "Requires SSE2");
35614 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
35615 "Unexpected type action!");
35616 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
35617 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
35619 Res = DAG.getBitcast(WideVT, Res);
35620 Results.push_back(Res);
35626 case ISD::MGATHER: {
35627 EVT VT = N->getValueType(0);
35628 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
35629 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
35630 auto *Gather = cast<MaskedGatherSDNode>(N);
35631 SDValue Index = Gather->getIndex();
35632 if (Index.getValueType() != MVT::v2i64)
35634 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
35635 "Unexpected type action!");
35636 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
35637 SDValue Mask = Gather->getMask();
35638 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
35639 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
35640 Gather->getPassThru(),
35642 if (!Subtarget.hasVLX()) {
35643 // We need to widen the mask, but the instruction will only use 2
35644 // of its elements. So we can use undef.
35645 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
35646 DAG.getUNDEF(MVT::v2i1));
35647 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
35649 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
35650 Gather->getBasePtr(), Index, Gather->getScale() };
35651 SDValue Res = DAG.getMemIntrinsicNode(
35652 X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
35653 Gather->getMemoryVT(), Gather->getMemOperand());
35654 Results.push_back(Res);
35655 Results.push_back(Res.getValue(1));
35661 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
35662 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
35663 // cast since type legalization will try to use an i64 load.
35664 MVT VT = N->getSimpleValueType(0);
35665 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
35666 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
35667 "Unexpected type action!");
35668 if (!ISD::isNON_EXTLoad(N))
35670 auto *Ld = cast<LoadSDNode>(N);
35671 if (Subtarget.hasSSE2()) {
35672 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
35673 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
35674 Ld->getPointerInfo(), Ld->getOriginalAlign(),
35675 Ld->getMemOperand()->getFlags());
35676 SDValue Chain = Res.getValue(1);
35677 MVT VecVT = MVT::getVectorVT(LdVT, 2);
35678 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
35679 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
35680 Res = DAG.getBitcast(WideVT, Res);
35681 Results.push_back(Res);
35682 Results.push_back(Chain);
35685 assert(Subtarget.hasSSE1() && "Expected SSE");
35686 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
35687 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
35688 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
35689 MVT::i64, Ld->getMemOperand());
35690 Results.push_back(Res);
35691 Results.push_back(Res.getValue(1));
35694 case ISD::ADDRSPACECAST: {
35695 SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
35696 Results.push_back(V);
35699 case ISD::BITREVERSE: {
35700 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
35701 assert(Subtarget.hasXOP() && "Expected XOP");
35702 // We can use VPPERM by copying to a vector register and back. We'll need
35703 // to move the scalar in two i32 pieces.
35704 Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
35707 case ISD::EXTRACT_VECTOR_ELT: {
35708 // f16 = extract vXf16 %vec, i64 %idx
35709 assert(N->getSimpleValueType(0) == MVT::f16 &&
35710 "Unexpected Value type of EXTRACT_VECTOR_ELT!");
35711 assert(Subtarget.hasFP16() && "Expected FP16");
35712 SDValue VecOp = N->getOperand(0);
35713 EVT ExtVT = VecOp.getValueType().changeVectorElementTypeToInteger();
35714 SDValue Split = DAG.getBitcast(ExtVT, N->getOperand(0));
35715 Split = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Split,
35717 Split = DAG.getBitcast(MVT::f16, Split);
35718 Results.push_back(Split);
35724 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
35725 switch ((X86ISD::NodeType)Opcode) {
35726 case X86ISD::FIRST_NUMBER: break;
35727 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
35728 NODE_NAME_CASE(BSF)
35729 NODE_NAME_CASE(BSR)
35730 NODE_NAME_CASE(FSHL)
35731 NODE_NAME_CASE(FSHR)
35732 NODE_NAME_CASE(FAND)
35733 NODE_NAME_CASE(FANDN)
35734 NODE_NAME_CASE(FOR)
35735 NODE_NAME_CASE(FXOR)
35736 NODE_NAME_CASE(FILD)
35737 NODE_NAME_CASE(FIST)
35738 NODE_NAME_CASE(FP_TO_INT_IN_MEM)
35739 NODE_NAME_CASE(FLD)
35740 NODE_NAME_CASE(FST)
35741 NODE_NAME_CASE(CALL)
35742 NODE_NAME_CASE(CALL_RVMARKER)
35744 NODE_NAME_CASE(CMP)
35745 NODE_NAME_CASE(FCMP)
35746 NODE_NAME_CASE(STRICT_FCMP)
35747 NODE_NAME_CASE(STRICT_FCMPS)
35748 NODE_NAME_CASE(COMI)
35749 NODE_NAME_CASE(UCOMI)
35750 NODE_NAME_CASE(CMPM)
35751 NODE_NAME_CASE(CMPMM)
35752 NODE_NAME_CASE(STRICT_CMPM)
35753 NODE_NAME_CASE(CMPMM_SAE)
35754 NODE_NAME_CASE(SETCC)
35755 NODE_NAME_CASE(SETCC_CARRY)
35756 NODE_NAME_CASE(FSETCC)
35757 NODE_NAME_CASE(FSETCCM)
35758 NODE_NAME_CASE(FSETCCM_SAE)
35759 NODE_NAME_CASE(CMOV)
35760 NODE_NAME_CASE(BRCOND)
35761 NODE_NAME_CASE(RET_GLUE)
35762 NODE_NAME_CASE(IRET)
35763 NODE_NAME_CASE(REP_STOS)
35764 NODE_NAME_CASE(REP_MOVS)
35765 NODE_NAME_CASE(GlobalBaseReg)
35766 NODE_NAME_CASE(Wrapper)
35767 NODE_NAME_CASE(WrapperRIP)
35768 NODE_NAME_CASE(MOVQ2DQ)
35769 NODE_NAME_CASE(MOVDQ2Q)
35770 NODE_NAME_CASE(MMX_MOVD2W)
35771 NODE_NAME_CASE(MMX_MOVW2D)
35772 NODE_NAME_CASE(PEXTRB)
35773 NODE_NAME_CASE(PEXTRW)
35774 NODE_NAME_CASE(INSERTPS)
35775 NODE_NAME_CASE(PINSRB)
35776 NODE_NAME_CASE(PINSRW)
35777 NODE_NAME_CASE(PSHUFB)
35778 NODE_NAME_CASE(ANDNP)
35779 NODE_NAME_CASE(BLENDI)
35780 NODE_NAME_CASE(BLENDV)
35781 NODE_NAME_CASE(HADD)
35782 NODE_NAME_CASE(HSUB)
35783 NODE_NAME_CASE(FHADD)
35784 NODE_NAME_CASE(FHSUB)
35785 NODE_NAME_CASE(CONFLICT)
35786 NODE_NAME_CASE(FMAX)
35787 NODE_NAME_CASE(FMAXS)
35788 NODE_NAME_CASE(FMAX_SAE)
35789 NODE_NAME_CASE(FMAXS_SAE)
35790 NODE_NAME_CASE(FMIN)
35791 NODE_NAME_CASE(FMINS)
35792 NODE_NAME_CASE(FMIN_SAE)
35793 NODE_NAME_CASE(FMINS_SAE)
35794 NODE_NAME_CASE(FMAXC)
35795 NODE_NAME_CASE(FMINC)
35796 NODE_NAME_CASE(FRSQRT)
35797 NODE_NAME_CASE(FRCP)
35798 NODE_NAME_CASE(EXTRQI)
35799 NODE_NAME_CASE(INSERTQI)
35800 NODE_NAME_CASE(TLSADDR)
35801 NODE_NAME_CASE(TLSBASEADDR)
35802 NODE_NAME_CASE(TLSCALL)
35803 NODE_NAME_CASE(EH_SJLJ_SETJMP)
35804 NODE_NAME_CASE(EH_SJLJ_LONGJMP)
35805 NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
35806 NODE_NAME_CASE(EH_RETURN)
35807 NODE_NAME_CASE(TC_RETURN)
35808 NODE_NAME_CASE(FNSTCW16m)
35809 NODE_NAME_CASE(FLDCW16m)
35810 NODE_NAME_CASE(FNSTENVm)
35811 NODE_NAME_CASE(FLDENVm)
35812 NODE_NAME_CASE(LCMPXCHG_DAG)
35813 NODE_NAME_CASE(LCMPXCHG8_DAG)
35814 NODE_NAME_CASE(LCMPXCHG16_DAG)
35815 NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
35816 NODE_NAME_CASE(LADD)
35817 NODE_NAME_CASE(LSUB)
35818 NODE_NAME_CASE(LOR)
35819 NODE_NAME_CASE(LXOR)
35820 NODE_NAME_CASE(LAND)
35821 NODE_NAME_CASE(LBTS)
35822 NODE_NAME_CASE(LBTC)
35823 NODE_NAME_CASE(LBTR)
35824 NODE_NAME_CASE(LBTS_RM)
35825 NODE_NAME_CASE(LBTC_RM)
35826 NODE_NAME_CASE(LBTR_RM)
35827 NODE_NAME_CASE(AADD)
35828 NODE_NAME_CASE(AOR)
35829 NODE_NAME_CASE(AXOR)
35830 NODE_NAME_CASE(AAND)
35831 NODE_NAME_CASE(VZEXT_MOVL)
35832 NODE_NAME_CASE(VZEXT_LOAD)
35833 NODE_NAME_CASE(VEXTRACT_STORE)
35834 NODE_NAME_CASE(VTRUNC)
35835 NODE_NAME_CASE(VTRUNCS)
35836 NODE_NAME_CASE(VTRUNCUS)
35837 NODE_NAME_CASE(VMTRUNC)
35838 NODE_NAME_CASE(VMTRUNCS)
35839 NODE_NAME_CASE(VMTRUNCUS)
35840 NODE_NAME_CASE(VTRUNCSTORES)
35841 NODE_NAME_CASE(VTRUNCSTOREUS)
35842 NODE_NAME_CASE(VMTRUNCSTORES)
35843 NODE_NAME_CASE(VMTRUNCSTOREUS)
35844 NODE_NAME_CASE(VFPEXT)
35845 NODE_NAME_CASE(STRICT_VFPEXT)
35846 NODE_NAME_CASE(VFPEXT_SAE)
35847 NODE_NAME_CASE(VFPEXTS)
35848 NODE_NAME_CASE(VFPEXTS_SAE)
35849 NODE_NAME_CASE(VFPROUND)
35850 NODE_NAME_CASE(STRICT_VFPROUND)
35851 NODE_NAME_CASE(VMFPROUND)
35852 NODE_NAME_CASE(VFPROUND_RND)
35853 NODE_NAME_CASE(VFPROUNDS)
35854 NODE_NAME_CASE(VFPROUNDS_RND)
35855 NODE_NAME_CASE(VSHLDQ)
35856 NODE_NAME_CASE(VSRLDQ)
35857 NODE_NAME_CASE(VSHL)
35858 NODE_NAME_CASE(VSRL)
35859 NODE_NAME_CASE(VSRA)
35860 NODE_NAME_CASE(VSHLI)
35861 NODE_NAME_CASE(VSRLI)
35862 NODE_NAME_CASE(VSRAI)
35863 NODE_NAME_CASE(VSHLV)
35864 NODE_NAME_CASE(VSRLV)
35865 NODE_NAME_CASE(VSRAV)
35866 NODE_NAME_CASE(VROTLI)
35867 NODE_NAME_CASE(VROTRI)
35868 NODE_NAME_CASE(VPPERM)
35869 NODE_NAME_CASE(CMPP)
35870 NODE_NAME_CASE(STRICT_CMPP)
35871 NODE_NAME_CASE(PCMPEQ)
35872 NODE_NAME_CASE(PCMPGT)
35873 NODE_NAME_CASE(PHMINPOS)
35874 NODE_NAME_CASE(ADD)
35875 NODE_NAME_CASE(SUB)
35876 NODE_NAME_CASE(ADC)
35877 NODE_NAME_CASE(SBB)
35878 NODE_NAME_CASE(SMUL)
35879 NODE_NAME_CASE(UMUL)
35881 NODE_NAME_CASE(XOR)
35882 NODE_NAME_CASE(AND)
35883 NODE_NAME_CASE(BEXTR)
35884 NODE_NAME_CASE(BEXTRI)
35885 NODE_NAME_CASE(BZHI)
35886 NODE_NAME_CASE(PDEP)
35887 NODE_NAME_CASE(PEXT)
35888 NODE_NAME_CASE(MUL_IMM)
35889 NODE_NAME_CASE(MOVMSK)
35890 NODE_NAME_CASE(PTEST)
35891 NODE_NAME_CASE(TESTP)
35892 NODE_NAME_CASE(KORTEST)
35893 NODE_NAME_CASE(KTEST)
35894 NODE_NAME_CASE(KADD)
35895 NODE_NAME_CASE(KSHIFTL)
35896 NODE_NAME_CASE(KSHIFTR)
35897 NODE_NAME_CASE(PACKSS)
35898 NODE_NAME_CASE(PACKUS)
35899 NODE_NAME_CASE(PALIGNR)
35900 NODE_NAME_CASE(VALIGN)
35901 NODE_NAME_CASE(VSHLD)
35902 NODE_NAME_CASE(VSHRD)
35903 NODE_NAME_CASE(VSHLDV)
35904 NODE_NAME_CASE(VSHRDV)
35905 NODE_NAME_CASE(PSHUFD)
35906 NODE_NAME_CASE(PSHUFHW)
35907 NODE_NAME_CASE(PSHUFLW)
35908 NODE_NAME_CASE(SHUFP)
35909 NODE_NAME_CASE(SHUF128)
35910 NODE_NAME_CASE(MOVLHPS)
35911 NODE_NAME_CASE(MOVHLPS)
35912 NODE_NAME_CASE(MOVDDUP)
35913 NODE_NAME_CASE(MOVSHDUP)
35914 NODE_NAME_CASE(MOVSLDUP)
35915 NODE_NAME_CASE(MOVSD)
35916 NODE_NAME_CASE(MOVSS)
35917 NODE_NAME_CASE(MOVSH)
35918 NODE_NAME_CASE(UNPCKL)
35919 NODE_NAME_CASE(UNPCKH)
35920 NODE_NAME_CASE(VBROADCAST)
35921 NODE_NAME_CASE(VBROADCAST_LOAD)
35922 NODE_NAME_CASE(VBROADCASTM)
35923 NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
35924 NODE_NAME_CASE(VPERMILPV)
35925 NODE_NAME_CASE(VPERMILPI)
35926 NODE_NAME_CASE(VPERM2X128)
35927 NODE_NAME_CASE(VPERMV)
35928 NODE_NAME_CASE(VPERMV3)
35929 NODE_NAME_CASE(VPERMI)
35930 NODE_NAME_CASE(VPTERNLOG)
35931 NODE_NAME_CASE(VFIXUPIMM)
35932 NODE_NAME_CASE(VFIXUPIMM_SAE)
35933 NODE_NAME_CASE(VFIXUPIMMS)
35934 NODE_NAME_CASE(VFIXUPIMMS_SAE)
35935 NODE_NAME_CASE(VRANGE)
35936 NODE_NAME_CASE(VRANGE_SAE)
35937 NODE_NAME_CASE(VRANGES)
35938 NODE_NAME_CASE(VRANGES_SAE)
35939 NODE_NAME_CASE(PMULUDQ)
35940 NODE_NAME_CASE(PMULDQ)
35941 NODE_NAME_CASE(PSADBW)
35942 NODE_NAME_CASE(DBPSADBW)
35943 NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
35944 NODE_NAME_CASE(VAARG_64)
35945 NODE_NAME_CASE(VAARG_X32)
35946 NODE_NAME_CASE(DYN_ALLOCA)
35947 NODE_NAME_CASE(MFENCE)
35948 NODE_NAME_CASE(SEG_ALLOCA)
35949 NODE_NAME_CASE(PROBED_ALLOCA)
35950 NODE_NAME_CASE(RDRAND)
35951 NODE_NAME_CASE(RDSEED)
35952 NODE_NAME_CASE(RDPKRU)
35953 NODE_NAME_CASE(WRPKRU)
35954 NODE_NAME_CASE(VPMADDUBSW)
35955 NODE_NAME_CASE(VPMADDWD)
35956 NODE_NAME_CASE(VPSHA)
35957 NODE_NAME_CASE(VPSHL)
35958 NODE_NAME_CASE(VPCOM)
35959 NODE_NAME_CASE(VPCOMU)
35960 NODE_NAME_CASE(VPERMIL2)
35961 NODE_NAME_CASE(FMSUB)
35962 NODE_NAME_CASE(STRICT_FMSUB)
35963 NODE_NAME_CASE(FNMADD)
35964 NODE_NAME_CASE(STRICT_FNMADD)
35965 NODE_NAME_CASE(FNMSUB)
35966 NODE_NAME_CASE(STRICT_FNMSUB)
35967 NODE_NAME_CASE(FMADDSUB)
35968 NODE_NAME_CASE(FMSUBADD)
35969 NODE_NAME_CASE(FMADD_RND)
35970 NODE_NAME_CASE(FNMADD_RND)
35971 NODE_NAME_CASE(FMSUB_RND)
35972 NODE_NAME_CASE(FNMSUB_RND)
35973 NODE_NAME_CASE(FMADDSUB_RND)
35974 NODE_NAME_CASE(FMSUBADD_RND)
35975 NODE_NAME_CASE(VFMADDC)
35976 NODE_NAME_CASE(VFMADDC_RND)
35977 NODE_NAME_CASE(VFCMADDC)
35978 NODE_NAME_CASE(VFCMADDC_RND)
35979 NODE_NAME_CASE(VFMULC)
35980 NODE_NAME_CASE(VFMULC_RND)
35981 NODE_NAME_CASE(VFCMULC)
35982 NODE_NAME_CASE(VFCMULC_RND)
35983 NODE_NAME_CASE(VFMULCSH)
35984 NODE_NAME_CASE(VFMULCSH_RND)
35985 NODE_NAME_CASE(VFCMULCSH)
35986 NODE_NAME_CASE(VFCMULCSH_RND)
35987 NODE_NAME_CASE(VFMADDCSH)
35988 NODE_NAME_CASE(VFMADDCSH_RND)
35989 NODE_NAME_CASE(VFCMADDCSH)
35990 NODE_NAME_CASE(VFCMADDCSH_RND)
35991 NODE_NAME_CASE(VPMADD52H)
35992 NODE_NAME_CASE(VPMADD52L)
35993 NODE_NAME_CASE(VRNDSCALE)
35994 NODE_NAME_CASE(STRICT_VRNDSCALE)
35995 NODE_NAME_CASE(VRNDSCALE_SAE)
35996 NODE_NAME_CASE(VRNDSCALES)
35997 NODE_NAME_CASE(VRNDSCALES_SAE)
35998 NODE_NAME_CASE(VREDUCE)
35999 NODE_NAME_CASE(VREDUCE_SAE)
36000 NODE_NAME_CASE(VREDUCES)
36001 NODE_NAME_CASE(VREDUCES_SAE)
36002 NODE_NAME_CASE(VGETMANT)
36003 NODE_NAME_CASE(VGETMANT_SAE)
36004 NODE_NAME_CASE(VGETMANTS)
36005 NODE_NAME_CASE(VGETMANTS_SAE)
36006 NODE_NAME_CASE(PCMPESTR)
36007 NODE_NAME_CASE(PCMPISTR)
36008 NODE_NAME_CASE(XTEST)
36009 NODE_NAME_CASE(COMPRESS)
36010 NODE_NAME_CASE(EXPAND)
36011 NODE_NAME_CASE(SELECTS)
36012 NODE_NAME_CASE(ADDSUB)
36013 NODE_NAME_CASE(RCP14)
36014 NODE_NAME_CASE(RCP14S)
36015 NODE_NAME_CASE(RCP28)
36016 NODE_NAME_CASE(RCP28_SAE)
36017 NODE_NAME_CASE(RCP28S)
36018 NODE_NAME_CASE(RCP28S_SAE)
36019 NODE_NAME_CASE(EXP2)
36020 NODE_NAME_CASE(EXP2_SAE)
36021 NODE_NAME_CASE(RSQRT14)
36022 NODE_NAME_CASE(RSQRT14S)
36023 NODE_NAME_CASE(RSQRT28)
36024 NODE_NAME_CASE(RSQRT28_SAE)
36025 NODE_NAME_CASE(RSQRT28S)
36026 NODE_NAME_CASE(RSQRT28S_SAE)
36027 NODE_NAME_CASE(FADD_RND)
36028 NODE_NAME_CASE(FADDS)
36029 NODE_NAME_CASE(FADDS_RND)
36030 NODE_NAME_CASE(FSUB_RND)
36031 NODE_NAME_CASE(FSUBS)
36032 NODE_NAME_CASE(FSUBS_RND)
36033 NODE_NAME_CASE(FMUL_RND)
36034 NODE_NAME_CASE(FMULS)
36035 NODE_NAME_CASE(FMULS_RND)
36036 NODE_NAME_CASE(FDIV_RND)
36037 NODE_NAME_CASE(FDIVS)
36038 NODE_NAME_CASE(FDIVS_RND)
36039 NODE_NAME_CASE(FSQRT_RND)
36040 NODE_NAME_CASE(FSQRTS)
36041 NODE_NAME_CASE(FSQRTS_RND)
36042 NODE_NAME_CASE(FGETEXP)
36043 NODE_NAME_CASE(FGETEXP_SAE)
36044 NODE_NAME_CASE(FGETEXPS)
36045 NODE_NAME_CASE(FGETEXPS_SAE)
36046 NODE_NAME_CASE(SCALEF)
36047 NODE_NAME_CASE(SCALEF_RND)
36048 NODE_NAME_CASE(SCALEFS)
36049 NODE_NAME_CASE(SCALEFS_RND)
36050 NODE_NAME_CASE(MULHRS)
36051 NODE_NAME_CASE(SINT_TO_FP_RND)
36052 NODE_NAME_CASE(UINT_TO_FP_RND)
36053 NODE_NAME_CASE(CVTTP2SI)
36054 NODE_NAME_CASE(CVTTP2UI)
36055 NODE_NAME_CASE(STRICT_CVTTP2SI)
36056 NODE_NAME_CASE(STRICT_CVTTP2UI)
36057 NODE_NAME_CASE(MCVTTP2SI)
36058 NODE_NAME_CASE(MCVTTP2UI)
36059 NODE_NAME_CASE(CVTTP2SI_SAE)
36060 NODE_NAME_CASE(CVTTP2UI_SAE)
36061 NODE_NAME_CASE(CVTTS2SI)
36062 NODE_NAME_CASE(CVTTS2UI)
36063 NODE_NAME_CASE(CVTTS2SI_SAE)
36064 NODE_NAME_CASE(CVTTS2UI_SAE)
36065 NODE_NAME_CASE(CVTSI2P)
36066 NODE_NAME_CASE(CVTUI2P)
36067 NODE_NAME_CASE(STRICT_CVTSI2P)
36068 NODE_NAME_CASE(STRICT_CVTUI2P)
36069 NODE_NAME_CASE(MCVTSI2P)
36070 NODE_NAME_CASE(MCVTUI2P)
36071 NODE_NAME_CASE(VFPCLASS)
36072 NODE_NAME_CASE(VFPCLASSS)
36073 NODE_NAME_CASE(MULTISHIFT)
36074 NODE_NAME_CASE(SCALAR_SINT_TO_FP)
36075 NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
36076 NODE_NAME_CASE(SCALAR_UINT_TO_FP)
36077 NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
36078 NODE_NAME_CASE(CVTPS2PH)
36079 NODE_NAME_CASE(STRICT_CVTPS2PH)
36080 NODE_NAME_CASE(CVTPS2PH_SAE)
36081 NODE_NAME_CASE(MCVTPS2PH)
36082 NODE_NAME_CASE(MCVTPS2PH_SAE)
36083 NODE_NAME_CASE(CVTPH2PS)
36084 NODE_NAME_CASE(STRICT_CVTPH2PS)
36085 NODE_NAME_CASE(CVTPH2PS_SAE)
36086 NODE_NAME_CASE(CVTP2SI)
36087 NODE_NAME_CASE(CVTP2UI)
36088 NODE_NAME_CASE(MCVTP2SI)
36089 NODE_NAME_CASE(MCVTP2UI)
36090 NODE_NAME_CASE(CVTP2SI_RND)
36091 NODE_NAME_CASE(CVTP2UI_RND)
36092 NODE_NAME_CASE(CVTS2SI)
36093 NODE_NAME_CASE(CVTS2UI)
36094 NODE_NAME_CASE(CVTS2SI_RND)
36095 NODE_NAME_CASE(CVTS2UI_RND)
36096 NODE_NAME_CASE(CVTNE2PS2BF16)
36097 NODE_NAME_CASE(CVTNEPS2BF16)
36098 NODE_NAME_CASE(MCVTNEPS2BF16)
36099 NODE_NAME_CASE(DPBF16PS)
36100 NODE_NAME_CASE(LWPINS)
36101 NODE_NAME_CASE(MGATHER)
36102 NODE_NAME_CASE(MSCATTER)
36103 NODE_NAME_CASE(VPDPBUSD)
36104 NODE_NAME_CASE(VPDPBUSDS)
36105 NODE_NAME_CASE(VPDPWSSD)
36106 NODE_NAME_CASE(VPDPWSSDS)
36107 NODE_NAME_CASE(VPSHUFBITQMB)
36108 NODE_NAME_CASE(GF2P8MULB)
36109 NODE_NAME_CASE(GF2P8AFFINEQB)
36110 NODE_NAME_CASE(GF2P8AFFINEINVQB)
36111 NODE_NAME_CASE(NT_CALL)
36112 NODE_NAME_CASE(NT_BRIND)
36113 NODE_NAME_CASE(UMWAIT)
36114 NODE_NAME_CASE(TPAUSE)
36115 NODE_NAME_CASE(ENQCMD)
36116 NODE_NAME_CASE(ENQCMDS)
36117 NODE_NAME_CASE(VP2INTERSECT)
36118 NODE_NAME_CASE(VPDPBSUD)
36119 NODE_NAME_CASE(VPDPBSUDS)
36120 NODE_NAME_CASE(VPDPBUUD)
36121 NODE_NAME_CASE(VPDPBUUDS)
36122 NODE_NAME_CASE(VPDPBSSD)
36123 NODE_NAME_CASE(VPDPBSSDS)
36124 NODE_NAME_CASE(AESENC128KL)
36125 NODE_NAME_CASE(AESDEC128KL)
36126 NODE_NAME_CASE(AESENC256KL)
36127 NODE_NAME_CASE(AESDEC256KL)
36128 NODE_NAME_CASE(AESENCWIDE128KL)
36129 NODE_NAME_CASE(AESDECWIDE128KL)
36130 NODE_NAME_CASE(AESENCWIDE256KL)
36131 NODE_NAME_CASE(AESDECWIDE256KL)
36132 NODE_NAME_CASE(CMPCCXADD)
36133 NODE_NAME_CASE(TESTUI)
36134 NODE_NAME_CASE(FP80_ADD)
36135 NODE_NAME_CASE(STRICT_FP80_ADD)
36138 #undef NODE_NAME_CASE
36141 /// Return true if the addressing mode represented by AM is legal for this
36142 /// target, for a load/store of the specified type.
36143 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
36144 const AddrMode &AM, Type *Ty,
36146 Instruction *I) const {
36147 // X86 supports extremely general addressing modes.
36148 CodeModel::Model M = getTargetMachine().getCodeModel();
36150 // X86 allows a sign-extended 32-bit immediate field as a displacement.
36151 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
36155 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
36157 // If a reference to this global requires an extra load, we can't fold it.
36158 if (isGlobalStubReference(GVFlags))
36161 // If BaseGV requires a register for the PIC base, we cannot also have a
36162 // BaseReg specified.
36163 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
36166 // If lower 4G is not available, then we must use rip-relative addressing.
36167 if ((M != CodeModel::Small || isPositionIndependent()) &&
36168 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
36172 switch (AM.Scale) {
36178 // These scales always work.
36183 // These scales are formed with basereg+scalereg. Only accept if there is
36188 default: // Other stuff never works.
36195 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
36196 unsigned Bits = Ty->getScalarSizeInBits();
36198 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
36199 // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
36200 if (Subtarget.hasXOP() &&
36201 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
36204 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
36205 // shifts just as cheap as scalar ones.
36206 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
36209 // AVX512BW has shifts such as vpsllvw.
36210 if (Subtarget.hasBWI() && Bits == 16)
36213 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
36214 // fully general vector.
36218 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
36220 // These are non-commutative binops.
36221 // TODO: Add more X86ISD opcodes once we have test coverage.
36222 case X86ISD::ANDNP:
36223 case X86ISD::PCMPGT:
36226 case X86ISD::FANDN:
36227 case X86ISD::VPSHA:
36228 case X86ISD::VPSHL:
36229 case X86ISD::VSHLV:
36230 case X86ISD::VSRLV:
36231 case X86ISD::VSRAV:
36235 return TargetLoweringBase::isBinOp(Opcode);
36238 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
36240 // TODO: Add more X86ISD opcodes once we have test coverage.
36241 case X86ISD::PCMPEQ:
36242 case X86ISD::PMULDQ:
36243 case X86ISD::PMULUDQ:
36244 case X86ISD::FMAXC:
36245 case X86ISD::FMINC:
36252 return TargetLoweringBase::isCommutativeBinOp(Opcode);
36255 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
36256 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
36258 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
36259 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
36260 return NumBits1 > NumBits2;
36263 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
36264 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
36267 if (!isTypeLegal(EVT::getEVT(Ty1)))
36270 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
36272 // Assuming the caller doesn't have a zeroext or signext return parameter,
36273 // truncation all the way down to i1 is valid.
36277 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
36278 return isInt<32>(Imm);
36281 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
36282 // Can also use sub to handle negated immediates.
36283 return isInt<32>(Imm);
36286 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
36287 return isInt<32>(Imm);
36290 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
36291 if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
36293 unsigned NumBits1 = VT1.getSizeInBits();
36294 unsigned NumBits2 = VT2.getSizeInBits();
36295 return NumBits1 > NumBits2;
36298 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
36299 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
36300 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
36303 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
36304 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
36305 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
36308 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
36309 EVT VT1 = Val.getValueType();
36310 if (isZExtFree(VT1, VT2))
36313 if (Val.getOpcode() != ISD::LOAD)
36316 if (!VT1.isSimple() || !VT1.isInteger() ||
36317 !VT2.isSimple() || !VT2.isInteger())
36320 switch (VT1.getSimpleVT().SimpleTy) {
36325 // X86 has 8, 16, and 32-bit zero-extending loads.
36332 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
36333 SmallVectorImpl<Use *> &Ops) const {
36334 using namespace llvm::PatternMatch;
36336 FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
36340 if (I->getOpcode() == Instruction::Mul &&
36341 VTy->getElementType()->isIntegerTy(64)) {
36342 for (auto &Op : I->operands()) {
36343 // Make sure we are not already sinking this operand
36344 if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
36347 // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
36348 // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
36349 if (Subtarget.hasSSE41() &&
36350 match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
36351 m_SpecificInt(32)))) {
36352 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
36353 Ops.push_back(&Op);
36354 } else if (Subtarget.hasSSE2() &&
36356 m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
36357 Ops.push_back(&Op);
36361 return !Ops.empty();
36364 // A uniform shift amount in a vector shift or funnel shift may be much
36365 // cheaper than a generic variable vector shift, so make that pattern visible
36366 // to SDAG by sinking the shuffle instruction next to the shift.
36367 int ShiftAmountOpNum = -1;
36369 ShiftAmountOpNum = 1;
36370 else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
36371 if (II->getIntrinsicID() == Intrinsic::fshl ||
36372 II->getIntrinsicID() == Intrinsic::fshr)
36373 ShiftAmountOpNum = 2;
36376 if (ShiftAmountOpNum == -1)
36379 auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
36380 if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
36381 isVectorShiftByScalarCheap(I->getType())) {
36382 Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
36389 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
36390 if (!Subtarget.is64Bit())
36392 return TargetLowering::shouldConvertPhiType(From, To);
36395 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
36396 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
36399 EVT SrcVT = ExtVal.getOperand(0).getValueType();
36401 // There is no extending load for vXi1.
36402 if (SrcVT.getScalarType() == MVT::i1)
36408 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
36410 if (!Subtarget.hasAnyFMA())
36413 VT = VT.getScalarType();
36415 if (!VT.isSimple())
36418 switch (VT.getSimpleVT().SimpleTy) {
36420 return Subtarget.hasFP16();
36431 bool X86TargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
36432 // i16 instructions are longer (0x66 prefix) and potentially slower.
36433 return !(SrcVT == MVT::i32 && DestVT == MVT::i16);
36436 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
36438 // TODO: This is too general. There are cases where pre-AVX512 codegen would
36439 // benefit. The transform may also be profitable for scalar code.
36440 if (!Subtarget.hasAVX512())
36442 if (!Subtarget.hasVLX() && !VT.is512BitVector())
36444 if (!VT.isVector() || VT.getScalarType() == MVT::i1)
36450 /// Targets can use this to indicate that they only support *some*
36451 /// VECTOR_SHUFFLE operations, those with specific masks.
36452 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
36453 /// are assumed to be legal.
36454 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
36455 if (!VT.isSimple())
36458 // Not for i1 vectors
36459 if (VT.getSimpleVT().getScalarType() == MVT::i1)
36462 // Very little shuffling can be done for 64-bit vectors right now.
36463 if (VT.getSimpleVT().getSizeInBits() == 64)
36466 // We only care that the types being shuffled are legal. The lowering can
36467 // handle any possible shuffle mask that results.
36468 return isTypeLegal(VT.getSimpleVT());
36471 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
36473 // Don't convert an 'and' into a shuffle that we don't directly support.
36474 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
36475 if (!Subtarget.hasAVX2())
36476 if (VT == MVT::v32i8 || VT == MVT::v16i16)
36479 // Just delegate to the generic legality, clear masks aren't special.
36480 return isShuffleMaskLegal(Mask, VT);
36483 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
36484 // If the subtarget is using thunks, we need to not generate jump tables.
36485 if (Subtarget.useIndirectThunkBranches())
36488 // Otherwise, fallback on the generic logic.
36489 return TargetLowering::areJTsAllowed(Fn);
36492 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
36493 EVT ConditionVT) const {
36494 // Avoid 8 and 16 bit types because they increase the chance for unnecessary
36495 // zero-extensions.
36496 if (ConditionVT.getSizeInBits() < 32)
36498 return TargetLoweringBase::getPreferredSwitchConditionType(Context,
36502 //===----------------------------------------------------------------------===//
36503 // X86 Scheduler Hooks
36504 //===----------------------------------------------------------------------===//
36506 // Returns true if EFLAG is consumed after this iterator in the rest of the
36507 // basic block or any successors of the basic block.
36508 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
36509 MachineBasicBlock *BB) {
36510 // Scan forward through BB for a use/def of EFLAGS.
36511 for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
36512 if (mi.readsRegister(X86::EFLAGS))
36514 // If we found a def, we can stop searching.
36515 if (mi.definesRegister(X86::EFLAGS))
36519 // If we hit the end of the block, check whether EFLAGS is live into a
36521 for (MachineBasicBlock *Succ : BB->successors())
36522 if (Succ->isLiveIn(X86::EFLAGS))
36528 /// Utility function to emit xbegin specifying the start of an RTM region.
36529 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
36530 const TargetInstrInfo *TII) {
36531 const MIMetadata MIMD(MI);
36533 const BasicBlock *BB = MBB->getBasicBlock();
36534 MachineFunction::iterator I = ++MBB->getIterator();
36536 // For the v = xbegin(), we generate
36545 // eax = # XABORT_DEF
36549 // v = phi(s0/mainBB, s1/fallBB)
36551 MachineBasicBlock *thisMBB = MBB;
36552 MachineFunction *MF = MBB->getParent();
36553 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
36554 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
36555 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
36556 MF->insert(I, mainMBB);
36557 MF->insert(I, fallMBB);
36558 MF->insert(I, sinkMBB);
36560 if (isEFLAGSLiveAfter(MI, MBB)) {
36561 mainMBB->addLiveIn(X86::EFLAGS);
36562 fallMBB->addLiveIn(X86::EFLAGS);
36563 sinkMBB->addLiveIn(X86::EFLAGS);
36566 // Transfer the remainder of BB and its successor edges to sinkMBB.
36567 sinkMBB->splice(sinkMBB->begin(), MBB,
36568 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
36569 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
36571 MachineRegisterInfo &MRI = MF->getRegInfo();
36572 Register DstReg = MI.getOperand(0).getReg();
36573 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
36574 Register mainDstReg = MRI.createVirtualRegister(RC);
36575 Register fallDstReg = MRI.createVirtualRegister(RC);
36579 // # fallthrough to mainMBB
36580 // # abortion to fallMBB
36581 BuildMI(thisMBB, MIMD, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
36582 thisMBB->addSuccessor(mainMBB);
36583 thisMBB->addSuccessor(fallMBB);
36586 // mainDstReg := -1
36587 BuildMI(mainMBB, MIMD, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
36588 BuildMI(mainMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
36589 mainMBB->addSuccessor(sinkMBB);
36592 // ; pseudo instruction to model hardware's definition from XABORT
36593 // EAX := XABORT_DEF
36594 // fallDstReg := EAX
36595 BuildMI(fallMBB, MIMD, TII->get(X86::XABORT_DEF));
36596 BuildMI(fallMBB, MIMD, TII->get(TargetOpcode::COPY), fallDstReg)
36598 fallMBB->addSuccessor(sinkMBB);
36601 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
36602 BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
36603 .addReg(mainDstReg).addMBB(mainMBB)
36604 .addReg(fallDstReg).addMBB(fallMBB);
36606 MI.eraseFromParent();
36610 MachineBasicBlock *
36611 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
36612 MachineBasicBlock *MBB) const {
36613 // Emit va_arg instruction on X86-64.
36615 // Operands to this pseudo-instruction:
36616 // 0 ) Output : destination address (reg)
36617 // 1-5) Input : va_list address (addr, i64mem)
36618 // 6 ) ArgSize : Size (in bytes) of vararg type
36619 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
36620 // 8 ) Align : Alignment of type
36621 // 9 ) EFLAGS (implicit-def)
36623 assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
36624 static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
36626 Register DestReg = MI.getOperand(0).getReg();
36627 MachineOperand &Base = MI.getOperand(1);
36628 MachineOperand &Scale = MI.getOperand(2);
36629 MachineOperand &Index = MI.getOperand(3);
36630 MachineOperand &Disp = MI.getOperand(4);
36631 MachineOperand &Segment = MI.getOperand(5);
36632 unsigned ArgSize = MI.getOperand(6).getImm();
36633 unsigned ArgMode = MI.getOperand(7).getImm();
36634 Align Alignment = Align(MI.getOperand(8).getImm());
36636 MachineFunction *MF = MBB->getParent();
36638 // Memory Reference
36639 assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
36641 MachineMemOperand *OldMMO = MI.memoperands().front();
36643 // Clone the MMO into two separate MMOs for loading and storing
36644 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
36645 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
36646 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
36647 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
36649 // Machine Information
36650 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36651 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
36652 const TargetRegisterClass *AddrRegClass =
36653 getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
36654 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
36655 const MIMetadata MIMD(MI);
36657 // struct va_list {
36660 // i64 overflow_area (address)
36661 // i64 reg_save_area (address)
36663 // sizeof(va_list) = 24
36664 // alignment(va_list) = 8
36666 unsigned TotalNumIntRegs = 6;
36667 unsigned TotalNumXMMRegs = 8;
36668 bool UseGPOffset = (ArgMode == 1);
36669 bool UseFPOffset = (ArgMode == 2);
36670 unsigned MaxOffset = TotalNumIntRegs * 8 +
36671 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
36673 /* Align ArgSize to a multiple of 8 */
36674 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
36675 bool NeedsAlign = (Alignment > 8);
36677 MachineBasicBlock *thisMBB = MBB;
36678 MachineBasicBlock *overflowMBB;
36679 MachineBasicBlock *offsetMBB;
36680 MachineBasicBlock *endMBB;
36682 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
36683 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
36684 unsigned OffsetReg = 0;
36686 if (!UseGPOffset && !UseFPOffset) {
36687 // If we only pull from the overflow region, we don't create a branch.
36688 // We don't need to alter control flow.
36689 OffsetDestReg = 0; // unused
36690 OverflowDestReg = DestReg;
36692 offsetMBB = nullptr;
36693 overflowMBB = thisMBB;
36696 // First emit code to check if gp_offset (or fp_offset) is below the bound.
36697 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
36698 // If not, pull from overflow_area. (branch to overflowMBB)
36703 // offsetMBB overflowMBB
36708 // Registers for the PHI in endMBB
36709 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
36710 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
36712 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
36713 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36714 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36715 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36717 MachineFunction::iterator MBBIter = ++MBB->getIterator();
36719 // Insert the new basic blocks
36720 MF->insert(MBBIter, offsetMBB);
36721 MF->insert(MBBIter, overflowMBB);
36722 MF->insert(MBBIter, endMBB);
36724 // Transfer the remainder of MBB and its successor edges to endMBB.
36725 endMBB->splice(endMBB->begin(), thisMBB,
36726 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
36727 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
36729 // Make offsetMBB and overflowMBB successors of thisMBB
36730 thisMBB->addSuccessor(offsetMBB);
36731 thisMBB->addSuccessor(overflowMBB);
36733 // endMBB is a successor of both offsetMBB and overflowMBB
36734 offsetMBB->addSuccessor(endMBB);
36735 overflowMBB->addSuccessor(endMBB);
36737 // Load the offset value into a register
36738 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
36739 BuildMI(thisMBB, MIMD, TII->get(X86::MOV32rm), OffsetReg)
36743 .addDisp(Disp, UseFPOffset ? 4 : 0)
36745 .setMemRefs(LoadOnlyMMO);
36747 // Check if there is enough room left to pull this argument.
36748 BuildMI(thisMBB, MIMD, TII->get(X86::CMP32ri))
36750 .addImm(MaxOffset + 8 - ArgSizeA8);
36752 // Branch to "overflowMBB" if offset >= max
36753 // Fall through to "offsetMBB" otherwise
36754 BuildMI(thisMBB, MIMD, TII->get(X86::JCC_1))
36755 .addMBB(overflowMBB).addImm(X86::COND_AE);
36758 // In offsetMBB, emit code to use the reg_save_area.
36760 assert(OffsetReg != 0);
36762 // Read the reg_save_area address.
36763 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
36766 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
36771 .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
36773 .setMemRefs(LoadOnlyMMO);
36775 if (Subtarget.isTarget64BitLP64()) {
36776 // Zero-extend the offset
36777 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
36778 BuildMI(offsetMBB, MIMD, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
36781 .addImm(X86::sub_32bit);
36783 // Add the offset to the reg_save_area to get the final address.
36784 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD64rr), OffsetDestReg)
36785 .addReg(OffsetReg64)
36786 .addReg(RegSaveReg);
36788 // Add the offset to the reg_save_area to get the final address.
36789 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32rr), OffsetDestReg)
36791 .addReg(RegSaveReg);
36794 // Compute the offset for the next argument
36795 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
36796 BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32ri), NextOffsetReg)
36798 .addImm(UseFPOffset ? 16 : 8);
36800 // Store it back into the va_list.
36801 BuildMI(offsetMBB, MIMD, TII->get(X86::MOV32mr))
36805 .addDisp(Disp, UseFPOffset ? 4 : 0)
36807 .addReg(NextOffsetReg)
36808 .setMemRefs(StoreOnlyMMO);
36811 BuildMI(offsetMBB, MIMD, TII->get(X86::JMP_1))
36816 // Emit code to use overflow area
36819 // Load the overflow_area address into a register.
36820 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
36821 BuildMI(overflowMBB, MIMD,
36822 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
36829 .setMemRefs(LoadOnlyMMO);
36831 // If we need to align it, do so. Otherwise, just copy the address
36832 // to OverflowDestReg.
36834 // Align the overflow address
36835 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
36837 // aligned_addr = (addr + (align-1)) & ~(align-1)
36840 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
36842 .addReg(OverflowAddrReg)
36843 .addImm(Alignment.value() - 1);
36847 TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
36850 .addImm(~(uint64_t)(Alignment.value() - 1));
36852 BuildMI(overflowMBB, MIMD, TII->get(TargetOpcode::COPY), OverflowDestReg)
36853 .addReg(OverflowAddrReg);
36856 // Compute the next overflow address after this argument.
36857 // (the overflow address should be kept 8-byte aligned)
36858 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
36861 TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
36863 .addReg(OverflowDestReg)
36864 .addImm(ArgSizeA8);
36866 // Store the new overflow address.
36867 BuildMI(overflowMBB, MIMD,
36868 TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
36874 .addReg(NextAddrReg)
36875 .setMemRefs(StoreOnlyMMO);
36877 // If we branched, emit the PHI to the front of endMBB.
36879 BuildMI(*endMBB, endMBB->begin(), MIMD,
36880 TII->get(X86::PHI), DestReg)
36881 .addReg(OffsetDestReg).addMBB(offsetMBB)
36882 .addReg(OverflowDestReg).addMBB(overflowMBB);
36885 // Erase the pseudo instruction
36886 MI.eraseFromParent();
36891 // The EFLAGS operand of SelectItr might be missing a kill marker
36892 // because there were multiple uses of EFLAGS, and ISel didn't know
36893 // which to mark. Figure out whether SelectItr should have had a
36894 // kill marker, and set it if it should. Returns the correct kill
36896 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
36897 MachineBasicBlock* BB,
36898 const TargetRegisterInfo* TRI) {
36899 if (isEFLAGSLiveAfter(SelectItr, BB))
36902 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
36903 // out. SelectMI should have a kill flag on EFLAGS.
36904 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
36908 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
36909 // together with other CMOV pseudo-opcodes into a single basic-block with
36910 // conditional jump around it.
36911 static bool isCMOVPseudo(MachineInstr &MI) {
36912 switch (MI.getOpcode()) {
36913 case X86::CMOV_FR16:
36914 case X86::CMOV_FR16X:
36915 case X86::CMOV_FR32:
36916 case X86::CMOV_FR32X:
36917 case X86::CMOV_FR64:
36918 case X86::CMOV_FR64X:
36919 case X86::CMOV_GR8:
36920 case X86::CMOV_GR16:
36921 case X86::CMOV_GR32:
36922 case X86::CMOV_RFP32:
36923 case X86::CMOV_RFP64:
36924 case X86::CMOV_RFP80:
36925 case X86::CMOV_VR64:
36926 case X86::CMOV_VR128:
36927 case X86::CMOV_VR128X:
36928 case X86::CMOV_VR256:
36929 case X86::CMOV_VR256X:
36930 case X86::CMOV_VR512:
36931 case X86::CMOV_VK1:
36932 case X86::CMOV_VK2:
36933 case X86::CMOV_VK4:
36934 case X86::CMOV_VK8:
36935 case X86::CMOV_VK16:
36936 case X86::CMOV_VK32:
36937 case X86::CMOV_VK64:
36945 // Helper function, which inserts PHI functions into SinkMBB:
36946 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
36947 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
36948 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
36949 // the last PHI function inserted.
36950 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
36951 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
36952 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
36953 MachineBasicBlock *SinkMBB) {
36954 MachineFunction *MF = TrueMBB->getParent();
36955 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
36956 const MIMetadata MIMD(*MIItBegin);
36958 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
36959 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
36961 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
36963 // As we are creating the PHIs, we have to be careful if there is more than
36964 // one. Later CMOVs may reference the results of earlier CMOVs, but later
36965 // PHIs have to reference the individual true/false inputs from earlier PHIs.
36966 // That also means that PHI construction must work forward from earlier to
36967 // later, and that the code must maintain a mapping from earlier PHI's
36968 // destination registers, and the registers that went into the PHI.
36969 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
36970 MachineInstrBuilder MIB;
36972 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
36973 Register DestReg = MIIt->getOperand(0).getReg();
36974 Register Op1Reg = MIIt->getOperand(1).getReg();
36975 Register Op2Reg = MIIt->getOperand(2).getReg();
36977 // If this CMOV we are generating is the opposite condition from
36978 // the jump we generated, then we have to swap the operands for the
36979 // PHI that is going to be generated.
36980 if (MIIt->getOperand(3).getImm() == OppCC)
36981 std::swap(Op1Reg, Op2Reg);
36983 if (RegRewriteTable.contains(Op1Reg))
36984 Op1Reg = RegRewriteTable[Op1Reg].first;
36986 if (RegRewriteTable.contains(Op2Reg))
36987 Op2Reg = RegRewriteTable[Op2Reg].second;
36990 BuildMI(*SinkMBB, SinkInsertionPoint, MIMD, TII->get(X86::PHI), DestReg)
36996 // Add this PHI to the rewrite table.
36997 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
37003 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
37004 MachineBasicBlock *
37005 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
37006 MachineInstr &SecondCascadedCMOV,
37007 MachineBasicBlock *ThisMBB) const {
37008 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37009 const MIMetadata MIMD(FirstCMOV);
37011 // We lower cascaded CMOVs such as
37013 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
37015 // to two successive branches.
37017 // Without this, we would add a PHI between the two jumps, which ends up
37018 // creating a few copies all around. For instance, for
37020 // (sitofp (zext (fcmp une)))
37022 // we would generate:
37024 // ucomiss %xmm1, %xmm0
37025 // movss <1.0f>, %xmm0
37026 // movaps %xmm0, %xmm1
37028 // xorps %xmm1, %xmm1
37031 // movaps %xmm1, %xmm0
37035 // because this custom-inserter would have generated:
37047 // A: X = ...; Y = ...
37049 // C: Z = PHI [X, A], [Y, B]
37051 // E: PHI [X, C], [Z, D]
37053 // If we lower both CMOVs in a single step, we can instead generate:
37065 // A: X = ...; Y = ...
37067 // E: PHI [X, A], [X, C], [Y, D]
37069 // Which, in our sitofp/fcmp example, gives us something like:
37071 // ucomiss %xmm1, %xmm0
37072 // movss <1.0f>, %xmm0
37075 // xorps %xmm0, %xmm0
37080 // We lower cascaded CMOV into two successive branches to the same block.
37081 // EFLAGS is used by both, so mark it as live in the second.
37082 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
37083 MachineFunction *F = ThisMBB->getParent();
37084 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
37085 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
37086 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
37088 MachineFunction::iterator It = ++ThisMBB->getIterator();
37089 F->insert(It, FirstInsertedMBB);
37090 F->insert(It, SecondInsertedMBB);
37091 F->insert(It, SinkMBB);
37093 // For a cascaded CMOV, we lower it to two successive branches to
37094 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
37095 // the FirstInsertedMBB.
37096 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
37098 // If the EFLAGS register isn't dead in the terminator, then claim that it's
37099 // live into the sink and copy blocks.
37100 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
37101 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
37102 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
37103 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
37104 SinkMBB->addLiveIn(X86::EFLAGS);
37107 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
37108 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
37109 std::next(MachineBasicBlock::iterator(FirstCMOV)),
37111 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
37113 // Fallthrough block for ThisMBB.
37114 ThisMBB->addSuccessor(FirstInsertedMBB);
37115 // The true block target of the first branch is always SinkMBB.
37116 ThisMBB->addSuccessor(SinkMBB);
37117 // Fallthrough block for FirstInsertedMBB.
37118 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
37119 // The true block for the branch of FirstInsertedMBB.
37120 FirstInsertedMBB->addSuccessor(SinkMBB);
37121 // This is fallthrough.
37122 SecondInsertedMBB->addSuccessor(SinkMBB);
37124 // Create the conditional branch instructions.
37125 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
37126 BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
37128 X86::CondCode SecondCC =
37129 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
37130 BuildMI(FirstInsertedMBB, MIMD, TII->get(X86::JCC_1))
37135 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
37136 Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
37137 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
37138 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
37139 MachineInstrBuilder MIB =
37140 BuildMI(*SinkMBB, SinkMBB->begin(), MIMD, TII->get(X86::PHI), DestReg)
37142 .addMBB(SecondInsertedMBB)
37146 // The second SecondInsertedMBB provides the same incoming value as the
37147 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
37148 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
37150 // Now remove the CMOVs.
37151 FirstCMOV.eraseFromParent();
37152 SecondCascadedCMOV.eraseFromParent();
37157 MachineBasicBlock *
37158 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
37159 MachineBasicBlock *ThisMBB) const {
37160 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37161 const MIMetadata MIMD(MI);
37163 // To "insert" a SELECT_CC instruction, we actually have to insert the
37164 // diamond control-flow pattern. The incoming instruction knows the
37165 // destination vreg to set, the condition code register to branch on, the
37166 // true/false values to select between and a branch opcode to use.
37171 // cmpTY ccX, r1, r2
37173 // fallthrough --> FalseMBB
37175 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
37176 // as described above, by inserting a BB, and then making a PHI at the join
37177 // point to select the true and false operands of the CMOV in the PHI.
37179 // The code also handles two different cases of multiple CMOV opcodes
37183 // In this case, there are multiple CMOVs in a row, all which are based on
37184 // the same condition setting (or the exact opposite condition setting).
37185 // In this case we can lower all the CMOVs using a single inserted BB, and
37186 // then make a number of PHIs at the join point to model the CMOVs. The only
37187 // trickiness here, is that in a case like:
37189 // t2 = CMOV cond1 t1, f1
37190 // t3 = CMOV cond1 t2, f2
37192 // when rewriting this into PHIs, we have to perform some renaming on the
37193 // temps since you cannot have a PHI operand refer to a PHI result earlier
37194 // in the same block. The "simple" but wrong lowering would be:
37196 // t2 = PHI t1(BB1), f1(BB2)
37197 // t3 = PHI t2(BB1), f2(BB2)
37199 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
37200 // renaming is to note that on the path through BB1, t2 is really just a
37201 // copy of t1, and do that renaming, properly generating:
37203 // t2 = PHI t1(BB1), f1(BB2)
37204 // t3 = PHI t1(BB1), f2(BB2)
37207 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
37208 // function - EmitLoweredCascadedSelect.
37210 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
37211 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
37212 MachineInstr *LastCMOV = &MI;
37213 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
37215 // Check for case 1, where there are multiple CMOVs with the same condition
37216 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
37217 // number of jumps the most.
37219 if (isCMOVPseudo(MI)) {
37220 // See if we have a string of CMOVS with the same condition. Skip over
37221 // intervening debug insts.
37222 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
37223 (NextMIIt->getOperand(3).getImm() == CC ||
37224 NextMIIt->getOperand(3).getImm() == OppCC)) {
37225 LastCMOV = &*NextMIIt;
37226 NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
37230 // This checks for case 2, but only do this if we didn't already find
37231 // case 1, as indicated by LastCMOV == MI.
37232 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
37233 NextMIIt->getOpcode() == MI.getOpcode() &&
37234 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
37235 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
37236 NextMIIt->getOperand(1).isKill()) {
37237 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
37240 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
37241 MachineFunction *F = ThisMBB->getParent();
37242 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
37243 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
37245 MachineFunction::iterator It = ++ThisMBB->getIterator();
37246 F->insert(It, FalseMBB);
37247 F->insert(It, SinkMBB);
37249 // If the EFLAGS register isn't dead in the terminator, then claim that it's
37250 // live into the sink and copy blocks.
37251 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
37252 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
37253 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
37254 FalseMBB->addLiveIn(X86::EFLAGS);
37255 SinkMBB->addLiveIn(X86::EFLAGS);
37258 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
37259 auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
37260 MachineBasicBlock::iterator(LastCMOV));
37261 for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
37262 if (MI.isDebugInstr())
37263 SinkMBB->push_back(MI.removeFromParent());
37265 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
37266 SinkMBB->splice(SinkMBB->end(), ThisMBB,
37267 std::next(MachineBasicBlock::iterator(LastCMOV)),
37269 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
37271 // Fallthrough block for ThisMBB.
37272 ThisMBB->addSuccessor(FalseMBB);
37273 // The true block target of the first (or only) branch is always a SinkMBB.
37274 ThisMBB->addSuccessor(SinkMBB);
37275 // Fallthrough block for FalseMBB.
37276 FalseMBB->addSuccessor(SinkMBB);
37278 // Create the conditional branch instruction.
37279 BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
37282 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
37284 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
37285 MachineBasicBlock::iterator MIItEnd =
37286 std::next(MachineBasicBlock::iterator(LastCMOV));
37287 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
37289 // Now remove the CMOV(s).
37290 ThisMBB->erase(MIItBegin, MIItEnd);
37295 static unsigned getSUBriOpcode(bool IsLP64) {
37297 return X86::SUB64ri32;
37299 return X86::SUB32ri;
37302 MachineBasicBlock *
37303 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
37304 MachineBasicBlock *MBB) const {
37305 MachineFunction *MF = MBB->getParent();
37306 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37307 const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
37308 const MIMetadata MIMD(MI);
37309 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
37311 const unsigned ProbeSize = getStackProbeSize(*MF);
37313 MachineRegisterInfo &MRI = MF->getRegInfo();
37314 MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
37315 MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
37316 MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
37318 MachineFunction::iterator MBBIter = ++MBB->getIterator();
37319 MF->insert(MBBIter, testMBB);
37320 MF->insert(MBBIter, blockMBB);
37321 MF->insert(MBBIter, tailMBB);
37323 Register sizeVReg = MI.getOperand(1).getReg();
37325 Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
37327 Register TmpStackPtr = MRI.createVirtualRegister(
37328 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
37329 Register FinalStackPtr = MRI.createVirtualRegister(
37330 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
37332 BuildMI(*MBB, {MI}, MIMD, TII->get(TargetOpcode::COPY), TmpStackPtr)
37333 .addReg(physSPReg);
37335 const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
37336 BuildMI(*MBB, {MI}, MIMD, TII->get(Opc), FinalStackPtr)
37337 .addReg(TmpStackPtr)
37343 BuildMI(testMBB, MIMD,
37344 TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
37345 .addReg(FinalStackPtr)
37346 .addReg(physSPReg);
37348 BuildMI(testMBB, MIMD, TII->get(X86::JCC_1))
37350 .addImm(X86::COND_GE);
37351 testMBB->addSuccessor(blockMBB);
37352 testMBB->addSuccessor(tailMBB);
37354 // Touch the block then extend it. This is done on the opposite side of
37355 // static probe where we allocate then touch, to avoid the need of probing the
37356 // tail of the static alloca. Possible scenarios are:
37358 // + ---- <- ------------ <- ------------- <- ------------ +
37360 // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
37362 // + <- ----------- <- ------------ <- ----------- <- ------------ +
37364 // The property we want to enforce is to never have more than [page alloc] between two probes.
37366 const unsigned XORMIOpc =
37367 TFI.Uses64BitFramePtr ? X86::XOR64mi32 : X86::XOR32mi;
37368 addRegOffset(BuildMI(blockMBB, MIMD, TII->get(XORMIOpc)), physSPReg, false, 0)
37371 BuildMI(blockMBB, MIMD, TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr)),
37374 .addImm(ProbeSize);
37376 BuildMI(blockMBB, MIMD, TII->get(X86::JMP_1)).addMBB(testMBB);
37377 blockMBB->addSuccessor(testMBB);
37379 // Replace original instruction by the expected stack ptr
37380 BuildMI(tailMBB, MIMD, TII->get(TargetOpcode::COPY),
37381 MI.getOperand(0).getReg())
37382 .addReg(FinalStackPtr);
37384 tailMBB->splice(tailMBB->end(), MBB,
37385 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
37386 tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
37387 MBB->addSuccessor(testMBB);
37389 // Delete the original pseudo instruction.
37390 MI.eraseFromParent();
37396 MachineBasicBlock *
37397 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
37398 MachineBasicBlock *BB) const {
37399 MachineFunction *MF = BB->getParent();
37400 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37401 const MIMetadata MIMD(MI);
37402 const BasicBlock *LLVM_BB = BB->getBasicBlock();
37404 assert(MF->shouldSplitStack());
37406 const bool Is64Bit = Subtarget.is64Bit();
37407 const bool IsLP64 = Subtarget.isTarget64BitLP64();
37409 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
37410 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
37413 // ... [Till the alloca]
37414 // If stacklet is not large enough, jump to mallocMBB
37417 // Allocate by subtracting from RSP
37418 // Jump to continueMBB
37421 // Allocate by call to runtime
37425 // [rest of original BB]
37428 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
37429 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
37430 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
37432 MachineRegisterInfo &MRI = MF->getRegInfo();
37433 const TargetRegisterClass *AddrRegClass =
37434 getRegClassFor(getPointerTy(MF->getDataLayout()));
37436 Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
37437 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
37438 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
37439 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
37440 sizeVReg = MI.getOperand(1).getReg(),
37442 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
37444 MachineFunction::iterator MBBIter = ++BB->getIterator();
37446 MF->insert(MBBIter, bumpMBB);
37447 MF->insert(MBBIter, mallocMBB);
37448 MF->insert(MBBIter, continueMBB);
37450 continueMBB->splice(continueMBB->begin(), BB,
37451 std::next(MachineBasicBlock::iterator(MI)), BB->end());
37452 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
37454 // Add code to the main basic block to check if the stack limit has been hit,
37455 // and if so, jump to mallocMBB otherwise to bumpMBB.
37456 BuildMI(BB, MIMD, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
37457 BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
37458 .addReg(tmpSPVReg).addReg(sizeVReg);
37459 BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
37460 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
37461 .addReg(SPLimitVReg);
37462 BuildMI(BB, MIMD, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
37464 // bumpMBB simply decreases the stack pointer, since we know the current
37465 // stacklet has enough space.
37466 BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), physSPReg)
37467 .addReg(SPLimitVReg);
37468 BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
37469 .addReg(SPLimitVReg);
37470 BuildMI(bumpMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
37472 // Calls into a routine in libgcc to allocate more space from the heap.
37473 const uint32_t *RegMask =
37474 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
37476 BuildMI(mallocMBB, MIMD, TII->get(X86::MOV64rr), X86::RDI)
37478 BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
37479 .addExternalSymbol("__morestack_allocate_stack_space")
37480 .addRegMask(RegMask)
37481 .addReg(X86::RDI, RegState::Implicit)
37482 .addReg(X86::RAX, RegState::ImplicitDefine);
37483 } else if (Is64Bit) {
37484 BuildMI(mallocMBB, MIMD, TII->get(X86::MOV32rr), X86::EDI)
37486 BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
37487 .addExternalSymbol("__morestack_allocate_stack_space")
37488 .addRegMask(RegMask)
37489 .addReg(X86::EDI, RegState::Implicit)
37490 .addReg(X86::EAX, RegState::ImplicitDefine);
37492 BuildMI(mallocMBB, MIMD, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
37494 BuildMI(mallocMBB, MIMD, TII->get(X86::PUSH32r)).addReg(sizeVReg);
37495 BuildMI(mallocMBB, MIMD, TII->get(X86::CALLpcrel32))
37496 .addExternalSymbol("__morestack_allocate_stack_space")
37497 .addRegMask(RegMask)
37498 .addReg(X86::EAX, RegState::ImplicitDefine);
37502 BuildMI(mallocMBB, MIMD, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
37505 BuildMI(mallocMBB, MIMD, TII->get(TargetOpcode::COPY), mallocPtrVReg)
37506 .addReg(IsLP64 ? X86::RAX : X86::EAX);
37507 BuildMI(mallocMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
37509 // Set up the CFG correctly.
37510 BB->addSuccessor(bumpMBB);
37511 BB->addSuccessor(mallocMBB);
37512 mallocMBB->addSuccessor(continueMBB);
37513 bumpMBB->addSuccessor(continueMBB);
37515 // Take care of the PHI nodes.
37516 BuildMI(*continueMBB, continueMBB->begin(), MIMD, TII->get(X86::PHI),
37517 MI.getOperand(0).getReg())
37518 .addReg(mallocPtrVReg)
37520 .addReg(bumpSPPtrVReg)
37523 // Delete the original pseudo instruction.
37524 MI.eraseFromParent();
37527 return continueMBB;
37530 MachineBasicBlock *
37531 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
37532 MachineBasicBlock *BB) const {
37533 MachineFunction *MF = BB->getParent();
37534 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
37535 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
37536 const MIMetadata MIMD(MI);
37538 assert(!isAsynchronousEHPersonality(
37539 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
37540 "SEH does not use catchret!");
37542 // Only 32-bit EH needs to worry about manually restoring stack pointers.
37543 if (!Subtarget.is32Bit())
37546 // C++ EH creates a new target block to hold the restore code, and wires up
37547 // the new block to the return destination with a normal JMP_4.
37548 MachineBasicBlock *RestoreMBB =
37549 MF->CreateMachineBasicBlock(BB->getBasicBlock());
37550 assert(BB->succ_size() == 1);
37551 MF->insert(std::next(BB->getIterator()), RestoreMBB);
37552 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
37553 BB->addSuccessor(RestoreMBB);
37554 MI.getOperand(0).setMBB(RestoreMBB);
37556 // Marking this as an EH pad but not a funclet entry block causes PEI to
37557 // restore stack pointers in the block.
37558 RestoreMBB->setIsEHPad(true);
37560 auto RestoreMBBI = RestoreMBB->begin();
37561 BuildMI(*RestoreMBB, RestoreMBBI, MIMD, TII.get(X86::JMP_4)).addMBB(TargetMBB);
37565 MachineBasicBlock *
37566 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
37567 MachineBasicBlock *BB) const {
37568 // So, here we replace TLSADDR with the sequence:
37569 // adjust_stackdown -> TLSADDR -> adjust_stackup.
37570 // We need this because TLSADDR is lowered into calls
37571 // inside MC, therefore without the two markers shrink-wrapping
37572 // may push the prologue/epilogue pass them.
37573 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
37574 const MIMetadata MIMD(MI);
37575 MachineFunction &MF = *BB->getParent();
37577 // Emit CALLSEQ_START right before the instruction.
37578 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
37579 MachineInstrBuilder CallseqStart =
37580 BuildMI(MF, MIMD, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
37581 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
37583 // Emit CALLSEQ_END right after the instruction.
37584 // We don't call erase from parent because we want to keep the
37585 // original instruction around.
37586 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
37587 MachineInstrBuilder CallseqEnd =
37588 BuildMI(MF, MIMD, TII.get(AdjStackUp)).addImm(0).addImm(0);
37589 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
37594 MachineBasicBlock *
37595 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
37596 MachineBasicBlock *BB) const {
37597 // This is pretty easy. We're taking the value that we received from
37598 // our load from the relocation, sticking it in either RDI (x86-64)
37599 // or EAX and doing an indirect call. The return value will then
37600 // be in the normal return register.
37601 MachineFunction *F = BB->getParent();
37602 const X86InstrInfo *TII = Subtarget.getInstrInfo();
37603 const MIMetadata MIMD(MI);
37605 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
37606 assert(MI.getOperand(3).isGlobal() && "This should be a global");
37608 // Get a register mask for the lowered call.
37609 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
37610 // proper register mask.
37611 const uint32_t *RegMask =
37612 Subtarget.is64Bit() ?
37613 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
37614 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
37615 if (Subtarget.is64Bit()) {
37616 MachineInstrBuilder MIB =
37617 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV64rm), X86::RDI)
37621 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
37622 MI.getOperand(3).getTargetFlags())
37624 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL64m));
37625 addDirectMem(MIB, X86::RDI);
37626 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
37627 } else if (!isPositionIndependent()) {
37628 MachineInstrBuilder MIB =
37629 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
37633 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
37634 MI.getOperand(3).getTargetFlags())
37636 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
37637 addDirectMem(MIB, X86::EAX);
37638 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
37640 MachineInstrBuilder MIB =
37641 BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
37642 .addReg(TII->getGlobalBaseReg(F))
37645 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
37646 MI.getOperand(3).getTargetFlags())
37648 MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
37649 addDirectMem(MIB, X86::EAX);
37650 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
37653 MI.eraseFromParent(); // The pseudo instruction is gone now.
37657 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
37659 case X86::INDIRECT_THUNK_CALL32:
37660 return X86::CALLpcrel32;
37661 case X86::INDIRECT_THUNK_CALL64:
37662 return X86::CALL64pcrel32;
37663 case X86::INDIRECT_THUNK_TCRETURN32:
37664 return X86::TCRETURNdi;
37665 case X86::INDIRECT_THUNK_TCRETURN64:
37666 return X86::TCRETURNdi64;
37668 llvm_unreachable("not indirect thunk opcode");
37671 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
37673 if (Subtarget.useRetpolineExternalThunk()) {
37674 // When using an external thunk for retpolines, we pick names that match the
37675 // names GCC happens to use as well. This helps simplify the implementation
37676 // of the thunks for kernels where they have no easy ability to create
37677 // aliases and are doing non-trivial configuration of the thunk's body. For
37678 // example, the Linux kernel will do boot-time hot patching of the thunk
37679 // bodies and cannot easily export aliases of these to loaded modules.
37681 // Note that at any point in the future, we may need to change the semantics
37682 // of how we implement retpolines and at that time will likely change the
37683 // name of the called thunk. Essentially, there is no hard guarantee that
37684 // LLVM will generate calls to specific thunks, we merely make a best-effort
37685 // attempt to help out kernels and other systems where duplicating the
37686 // thunks is costly.
37689 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37690 return "__x86_indirect_thunk_eax";
37692 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37693 return "__x86_indirect_thunk_ecx";
37695 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37696 return "__x86_indirect_thunk_edx";
37698 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37699 return "__x86_indirect_thunk_edi";
37701 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
37702 return "__x86_indirect_thunk_r11";
37704 llvm_unreachable("unexpected reg for external indirect thunk");
37707 if (Subtarget.useRetpolineIndirectCalls() ||
37708 Subtarget.useRetpolineIndirectBranches()) {
37709 // When targeting an internal COMDAT thunk use an LLVM-specific name.
37712 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37713 return "__llvm_retpoline_eax";
37715 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37716 return "__llvm_retpoline_ecx";
37718 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37719 return "__llvm_retpoline_edx";
37721 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
37722 return "__llvm_retpoline_edi";
37724 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
37725 return "__llvm_retpoline_r11";
37727 llvm_unreachable("unexpected reg for retpoline");
37730 if (Subtarget.useLVIControlFlowIntegrity()) {
37731 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
37732 return "__llvm_lvi_thunk_r11";
37734 llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
37737 MachineBasicBlock *
37738 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
37739 MachineBasicBlock *BB) const {
37740 // Copy the virtual register into the R11 physical register and
37741 // call the retpoline thunk.
37742 const MIMetadata MIMD(MI);
37743 const X86InstrInfo *TII = Subtarget.getInstrInfo();
37744 Register CalleeVReg = MI.getOperand(0).getReg();
37745 unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
37747 // Find an available scratch register to hold the callee. On 64-bit, we can
37748 // just use R11, but we scan for uses anyway to ensure we don't generate
37749 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
37750 // already a register use operand to the call to hold the callee. If none
37751 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
37752 // register and ESI is the base pointer to realigned stack frames with VLAs.
37753 SmallVector<unsigned, 3> AvailableRegs;
37754 if (Subtarget.is64Bit())
37755 AvailableRegs.push_back(X86::R11);
37757 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
37759 // Zero out any registers that are already used.
37760 for (const auto &MO : MI.operands()) {
37761 if (MO.isReg() && MO.isUse())
37762 for (unsigned &Reg : AvailableRegs)
37763 if (Reg == MO.getReg())
37767 // Choose the first remaining non-zero available register.
37768 unsigned AvailableReg = 0;
37769 for (unsigned MaybeReg : AvailableRegs) {
37771 AvailableReg = MaybeReg;
37776 report_fatal_error("calling convention incompatible with retpoline, no "
37777 "available registers");
37779 const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
37781 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), AvailableReg)
37782 .addReg(CalleeVReg);
37783 MI.getOperand(0).ChangeToES(Symbol);
37784 MI.setDesc(TII->get(Opc));
37785 MachineInstrBuilder(*BB->getParent(), &MI)
37786 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
37790 /// SetJmp implies future control flow change upon calling the corresponding
37792 /// Instead of using the 'return' instruction, the long jump fixes the stack and
37793 /// performs an indirect branch. To do so it uses the registers that were stored
37794 /// in the jump buffer (when calling SetJmp).
37795 /// In case the shadow stack is enabled we need to fix it as well, because some
37796 /// return addresses will be skipped.
37797 /// The function will save the SSP for future fixing in the function
37798 /// emitLongJmpShadowStackFix.
37799 /// \sa emitLongJmpShadowStackFix
37800 /// \param [in] MI The temporary Machine Instruction for the builtin.
37801 /// \param [in] MBB The Machine Basic Block that will be modified.
37802 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
37803 MachineBasicBlock *MBB) const {
37804 const MIMetadata MIMD(MI);
37805 MachineFunction *MF = MBB->getParent();
37806 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37807 MachineRegisterInfo &MRI = MF->getRegInfo();
37808 MachineInstrBuilder MIB;
37810 // Memory Reference.
37811 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
37812 MI.memoperands_end());
37814 // Initialize a register with zero.
37815 MVT PVT = getPointerTy(MF->getDataLayout());
37816 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
37817 Register ZReg = MRI.createVirtualRegister(PtrRC);
37818 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
37819 BuildMI(*MBB, MI, MIMD, TII->get(XorRROpc))
37821 .addReg(ZReg, RegState::Undef)
37822 .addReg(ZReg, RegState::Undef);
37824 // Read the current SSP Register value to the zeroed register.
37825 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
37826 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
37827 BuildMI(*MBB, MI, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
37829 // Write the SSP register value to offset 3 in input memory buffer.
37830 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
37831 MIB = BuildMI(*MBB, MI, MIMD, TII->get(PtrStoreOpc));
37832 const int64_t SSPOffset = 3 * PVT.getStoreSize();
37833 const unsigned MemOpndSlot = 1;
37834 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
37835 if (i == X86::AddrDisp)
37836 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
37838 MIB.add(MI.getOperand(MemOpndSlot + i));
37840 MIB.addReg(SSPCopyReg);
37841 MIB.setMemRefs(MMOs);
37844 MachineBasicBlock *
37845 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
37846 MachineBasicBlock *MBB) const {
37847 const MIMetadata MIMD(MI);
37848 MachineFunction *MF = MBB->getParent();
37849 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37850 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
37851 MachineRegisterInfo &MRI = MF->getRegInfo();
37853 const BasicBlock *BB = MBB->getBasicBlock();
37854 MachineFunction::iterator I = ++MBB->getIterator();
37856 // Memory Reference
37857 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
37858 MI.memoperands_end());
37861 unsigned MemOpndSlot = 0;
37863 unsigned CurOp = 0;
37865 DstReg = MI.getOperand(CurOp++).getReg();
37866 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
37867 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
37869 Register mainDstReg = MRI.createVirtualRegister(RC);
37870 Register restoreDstReg = MRI.createVirtualRegister(RC);
37872 MemOpndSlot = CurOp;
37874 MVT PVT = getPointerTy(MF->getDataLayout());
37875 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
37876 "Invalid Pointer Size!");
37878 // For v = setjmp(buf), we generate
37881 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
37882 // SjLjSetup restoreMBB
37888 // v = phi(main, restore)
37891 // if base pointer being used, load it from frame
37894 MachineBasicBlock *thisMBB = MBB;
37895 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
37896 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
37897 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
37898 MF->insert(I, mainMBB);
37899 MF->insert(I, sinkMBB);
37900 MF->push_back(restoreMBB);
37901 restoreMBB->setMachineBlockAddressTaken();
37903 MachineInstrBuilder MIB;
37905 // Transfer the remainder of BB and its successor edges to sinkMBB.
37906 sinkMBB->splice(sinkMBB->begin(), MBB,
37907 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
37908 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
37911 unsigned PtrStoreOpc = 0;
37912 unsigned LabelReg = 0;
37913 const int64_t LabelOffset = 1 * PVT.getStoreSize();
37914 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
37915 !isPositionIndependent();
37917 // Prepare IP either in reg or imm.
37918 if (!UseImmLabel) {
37919 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
37920 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
37921 LabelReg = MRI.createVirtualRegister(PtrRC);
37922 if (Subtarget.is64Bit()) {
37923 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA64r), LabelReg)
37927 .addMBB(restoreMBB)
37930 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
37931 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA32r), LabelReg)
37932 .addReg(XII->getGlobalBaseReg(MF))
37935 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
37939 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
37941 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrStoreOpc));
37942 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
37943 if (i == X86::AddrDisp)
37944 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
37946 MIB.add(MI.getOperand(MemOpndSlot + i));
37949 MIB.addReg(LabelReg);
37951 MIB.addMBB(restoreMBB);
37952 MIB.setMemRefs(MMOs);
37954 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
37955 emitSetJmpShadowStackFix(MI, thisMBB);
37959 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::EH_SjLj_Setup))
37960 .addMBB(restoreMBB);
37962 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
37963 MIB.addRegMask(RegInfo->getNoPreservedMask());
37964 thisMBB->addSuccessor(mainMBB);
37965 thisMBB->addSuccessor(restoreMBB);
37969 BuildMI(mainMBB, MIMD, TII->get(X86::MOV32r0), mainDstReg);
37970 mainMBB->addSuccessor(sinkMBB);
37973 BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
37974 .addReg(mainDstReg)
37976 .addReg(restoreDstReg)
37977 .addMBB(restoreMBB);
37980 if (RegInfo->hasBasePointer(*MF)) {
37981 const bool Uses64BitFramePtr =
37982 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
37983 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
37984 X86FI->setRestoreBasePointer(MF);
37985 Register FramePtr = RegInfo->getFrameRegister(*MF);
37986 Register BasePtr = RegInfo->getBaseRegister();
37987 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
37988 addRegOffset(BuildMI(restoreMBB, MIMD, TII->get(Opm), BasePtr),
37989 FramePtr, true, X86FI->getRestoreBasePointerOffset())
37990 .setMIFlag(MachineInstr::FrameSetup);
37992 BuildMI(restoreMBB, MIMD, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
37993 BuildMI(restoreMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
37994 restoreMBB->addSuccessor(sinkMBB);
37996 MI.eraseFromParent();
38000 /// Fix the shadow stack using the previously saved SSP pointer.
38001 /// \sa emitSetJmpShadowStackFix
38002 /// \param [in] MI The temporary Machine Instruction for the builtin.
38003 /// \param [in] MBB The Machine Basic Block that will be modified.
38004 /// \return The sink MBB that will perform the future indirect branch.
38005 MachineBasicBlock *
38006 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
38007 MachineBasicBlock *MBB) const {
38008 const MIMetadata MIMD(MI);
38009 MachineFunction *MF = MBB->getParent();
38010 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
38011 MachineRegisterInfo &MRI = MF->getRegInfo();
38013 // Memory Reference
38014 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
38015 MI.memoperands_end());
38017 MVT PVT = getPointerTy(MF->getDataLayout());
38018 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
38021 // xor vreg1, vreg1
38023 // test vreg1, vreg1
38024 // je sinkMBB # Jump if Shadow Stack is not supported
38026 // mov buf+24/12(%rip), vreg2
38027 // sub vreg1, vreg2
38028 // jbe sinkMBB # No need to fix the Shadow Stack
38031 // incssp vreg2 # fix the SSP according to the lower 8 bits
38034 // fixShadowLoopPrepareMBB:
38037 // fixShadowLoopMBB:
38040 // jne fixShadowLoopMBB # Iterate until you finish fixing
38041 // # the Shadow Stack
38044 MachineFunction::iterator I = ++MBB->getIterator();
38045 const BasicBlock *BB = MBB->getBasicBlock();
38047 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
38048 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
38049 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
38050 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
38051 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
38052 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
38053 MF->insert(I, checkSspMBB);
38054 MF->insert(I, fallMBB);
38055 MF->insert(I, fixShadowMBB);
38056 MF->insert(I, fixShadowLoopPrepareMBB);
38057 MF->insert(I, fixShadowLoopMBB);
38058 MF->insert(I, sinkMBB);
38060 // Transfer the remainder of BB and its successor edges to sinkMBB.
38061 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
38063 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
38065 MBB->addSuccessor(checkSspMBB);
38067 // Initialize a register with zero.
38068 Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
38069 BuildMI(checkSspMBB, MIMD, TII->get(X86::MOV32r0), ZReg);
38071 if (PVT == MVT::i64) {
38072 Register TmpZReg = MRI.createVirtualRegister(PtrRC);
38073 BuildMI(checkSspMBB, MIMD, TII->get(X86::SUBREG_TO_REG), TmpZReg)
38076 .addImm(X86::sub_32bit);
38080 // Read the current SSP Register value to the zeroed register.
38081 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
38082 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
38083 BuildMI(checkSspMBB, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
38085 // Check whether the result of the SSP register is zero and jump directly
38087 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
38088 BuildMI(checkSspMBB, MIMD, TII->get(TestRROpc))
38089 .addReg(SSPCopyReg)
38090 .addReg(SSPCopyReg);
38091 BuildMI(checkSspMBB, MIMD, TII->get(X86::JCC_1))
38093 .addImm(X86::COND_E);
38094 checkSspMBB->addSuccessor(sinkMBB);
38095 checkSspMBB->addSuccessor(fallMBB);
38097 // Reload the previously saved SSP register value.
38098 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
38099 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
38100 const int64_t SPPOffset = 3 * PVT.getStoreSize();
38101 MachineInstrBuilder MIB =
38102 BuildMI(fallMBB, MIMD, TII->get(PtrLoadOpc), PrevSSPReg);
38103 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
38104 const MachineOperand &MO = MI.getOperand(i);
38105 if (i == X86::AddrDisp)
38106 MIB.addDisp(MO, SPPOffset);
38107 else if (MO.isReg()) // Don't add the whole operand, we don't want to
38108 // preserve kill flags.
38109 MIB.addReg(MO.getReg());
38113 MIB.setMemRefs(MMOs);
38115 // Subtract the current SSP from the previous SSP.
38116 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
38117 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
38118 BuildMI(fallMBB, MIMD, TII->get(SubRROpc), SspSubReg)
38119 .addReg(PrevSSPReg)
38120 .addReg(SSPCopyReg);
38122 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
38123 BuildMI(fallMBB, MIMD, TII->get(X86::JCC_1))
38125 .addImm(X86::COND_BE);
38126 fallMBB->addSuccessor(sinkMBB);
38127 fallMBB->addSuccessor(fixShadowMBB);
38129 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
38130 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
38131 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
38132 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
38133 BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspFirstShrReg)
38137 // Increase SSP when looking only on the lower 8 bits of the delta.
38138 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
38139 BuildMI(fixShadowMBB, MIMD, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
38141 // Reset the lower 8 bits.
38142 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
38143 BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspSecondShrReg)
38144 .addReg(SspFirstShrReg)
38147 // Jump if the result of the shift is zero.
38148 BuildMI(fixShadowMBB, MIMD, TII->get(X86::JCC_1))
38150 .addImm(X86::COND_E);
38151 fixShadowMBB->addSuccessor(sinkMBB);
38152 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
38154 // Do a single shift left.
38155 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64ri : X86::SHL32ri;
38156 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
38157 BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(ShlR1Opc), SspAfterShlReg)
38158 .addReg(SspSecondShrReg)
38161 // Save the value 128 to a register (will be used next with incssp).
38162 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
38163 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
38164 BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(MovRIOpc), Value128InReg)
38166 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
38168 // Since incssp only looks at the lower 8 bits, we might need to do several
38169 // iterations of incssp until we finish fixing the shadow stack.
38170 Register DecReg = MRI.createVirtualRegister(PtrRC);
38171 Register CounterReg = MRI.createVirtualRegister(PtrRC);
38172 BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::PHI), CounterReg)
38173 .addReg(SspAfterShlReg)
38174 .addMBB(fixShadowLoopPrepareMBB)
38176 .addMBB(fixShadowLoopMBB);
38178 // Every iteration we increase the SSP by 128.
38179 BuildMI(fixShadowLoopMBB, MIMD, TII->get(IncsspOpc)).addReg(Value128InReg);
38181 // Every iteration we decrement the counter by 1.
38182 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
38183 BuildMI(fixShadowLoopMBB, MIMD, TII->get(DecROpc), DecReg).addReg(CounterReg);
38185 // Jump if the counter is not zero yet.
38186 BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::JCC_1))
38187 .addMBB(fixShadowLoopMBB)
38188 .addImm(X86::COND_NE);
38189 fixShadowLoopMBB->addSuccessor(sinkMBB);
38190 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
38195 MachineBasicBlock *
38196 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
38197 MachineBasicBlock *MBB) const {
38198 const MIMetadata MIMD(MI);
38199 MachineFunction *MF = MBB->getParent();
38200 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
38201 MachineRegisterInfo &MRI = MF->getRegInfo();
38203 // Memory Reference
38204 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
38205 MI.memoperands_end());
38207 MVT PVT = getPointerTy(MF->getDataLayout());
38208 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
38209 "Invalid Pointer Size!");
38211 const TargetRegisterClass *RC =
38212 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
38213 Register Tmp = MRI.createVirtualRegister(RC);
38214 // Since FP is only updated here but NOT referenced, it's treated as GPR.
38215 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
38216 Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
38217 Register SP = RegInfo->getStackRegister();
38219 MachineInstrBuilder MIB;
38221 const int64_t LabelOffset = 1 * PVT.getStoreSize();
38222 const int64_t SPOffset = 2 * PVT.getStoreSize();
38224 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
38225 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
38227 MachineBasicBlock *thisMBB = MBB;
38229 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
38230 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
38231 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
38235 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), FP);
38236 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
38237 const MachineOperand &MO = MI.getOperand(i);
38238 if (MO.isReg()) // Don't add the whole operand, we don't want to
38239 // preserve kill flags.
38240 MIB.addReg(MO.getReg());
38244 MIB.setMemRefs(MMOs);
38247 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), Tmp);
38248 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
38249 const MachineOperand &MO = MI.getOperand(i);
38250 if (i == X86::AddrDisp)
38251 MIB.addDisp(MO, LabelOffset);
38252 else if (MO.isReg()) // Don't add the whole operand, we don't want to
38253 // preserve kill flags.
38254 MIB.addReg(MO.getReg());
38258 MIB.setMemRefs(MMOs);
38261 MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), SP);
38262 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
38263 if (i == X86::AddrDisp)
38264 MIB.addDisp(MI.getOperand(i), SPOffset);
38266 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
38267 // the last instruction of the expansion.
38269 MIB.setMemRefs(MMOs);
38272 BuildMI(*thisMBB, MI, MIMD, TII->get(IJmpOpc)).addReg(Tmp);
38274 MI.eraseFromParent();
38278 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
38279 MachineBasicBlock *MBB,
38280 MachineBasicBlock *DispatchBB,
38282 const MIMetadata MIMD(MI);
38283 MachineFunction *MF = MBB->getParent();
38284 MachineRegisterInfo *MRI = &MF->getRegInfo();
38285 const X86InstrInfo *TII = Subtarget.getInstrInfo();
38287 MVT PVT = getPointerTy(MF->getDataLayout());
38288 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
38293 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
38294 !isPositionIndependent();
38297 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
38299 const TargetRegisterClass *TRC =
38300 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
38301 VR = MRI->createVirtualRegister(TRC);
38302 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
38304 if (Subtarget.is64Bit())
38305 BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA64r), VR)
38309 .addMBB(DispatchBB)
38312 BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA32r), VR)
38313 .addReg(0) /* TII->getGlobalBaseReg(MF) */
38316 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
38320 MachineInstrBuilder MIB = BuildMI(*MBB, MI, MIMD, TII->get(Op));
38321 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
38323 MIB.addMBB(DispatchBB);
38328 MachineBasicBlock *
38329 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
38330 MachineBasicBlock *BB) const {
38331 const MIMetadata MIMD(MI);
38332 MachineFunction *MF = BB->getParent();
38333 MachineRegisterInfo *MRI = &MF->getRegInfo();
38334 const X86InstrInfo *TII = Subtarget.getInstrInfo();
38335 int FI = MF->getFrameInfo().getFunctionContextIndex();
38337 // Get a mapping of the call site numbers to all of the landing pads they're
38338 // associated with.
38339 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
38340 unsigned MaxCSNum = 0;
38341 for (auto &MBB : *MF) {
38342 if (!MBB.isEHPad())
38345 MCSymbol *Sym = nullptr;
38346 for (const auto &MI : MBB) {
38347 if (MI.isDebugInstr())
38350 assert(MI.isEHLabel() && "expected EH_LABEL");
38351 Sym = MI.getOperand(0).getMCSymbol();
38355 if (!MF->hasCallSiteLandingPad(Sym))
38358 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
38359 CallSiteNumToLPad[CSI].push_back(&MBB);
38360 MaxCSNum = std::max(MaxCSNum, CSI);
38364 // Get an ordered list of the machine basic blocks for the jump table.
38365 std::vector<MachineBasicBlock *> LPadList;
38366 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
38367 LPadList.reserve(CallSiteNumToLPad.size());
38369 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
38370 for (auto &LP : CallSiteNumToLPad[CSI]) {
38371 LPadList.push_back(LP);
38372 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
38376 assert(!LPadList.empty() &&
38377 "No landing pad destinations for the dispatch jump table!");
38379 // Create the MBBs for the dispatch code.
38381 // Shove the dispatch's address into the return slot in the function context.
38382 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
38383 DispatchBB->setIsEHPad(true);
38385 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
38386 BuildMI(TrapBB, MIMD, TII->get(X86::TRAP));
38387 DispatchBB->addSuccessor(TrapBB);
38389 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
38390 DispatchBB->addSuccessor(DispContBB);
38393 MF->push_back(DispatchBB);
38394 MF->push_back(DispContBB);
38395 MF->push_back(TrapBB);
38397 // Insert code into the entry block that creates and registers the function
38399 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
38401 // Create the jump table and associated information
38402 unsigned JTE = getJumpTableEncoding();
38403 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
38404 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
38406 const X86RegisterInfo &RI = TII->getRegisterInfo();
38407 // Add a register mask with no preserved registers. This results in all
38408 // registers being marked as clobbered.
38409 if (RI.hasBasePointer(*MF)) {
38410 const bool FPIs64Bit =
38411 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
38412 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
38413 MFI->setRestoreBasePointer(MF);
38415 Register FP = RI.getFrameRegister(*MF);
38416 Register BP = RI.getBaseRegister();
38417 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
38418 addRegOffset(BuildMI(DispatchBB, MIMD, TII->get(Op), BP), FP, true,
38419 MFI->getRestoreBasePointerOffset())
38420 .addRegMask(RI.getNoPreservedMask());
38422 BuildMI(DispatchBB, MIMD, TII->get(X86::NOOP))
38423 .addRegMask(RI.getNoPreservedMask());
38426 // IReg is used as an index in a memory operand and therefore can't be SP
38427 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
38428 addFrameReference(BuildMI(DispatchBB, MIMD, TII->get(X86::MOV32rm), IReg), FI,
38429 Subtarget.is64Bit() ? 8 : 4);
38430 BuildMI(DispatchBB, MIMD, TII->get(X86::CMP32ri))
38432 .addImm(LPadList.size());
38433 BuildMI(DispatchBB, MIMD, TII->get(X86::JCC_1))
38435 .addImm(X86::COND_AE);
38437 if (Subtarget.is64Bit()) {
38438 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
38439 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
38441 // leaq .LJTI0_0(%rip), BReg
38442 BuildMI(DispContBB, MIMD, TII->get(X86::LEA64r), BReg)
38446 .addJumpTableIndex(MJTI)
38448 // movzx IReg64, IReg
38449 BuildMI(DispContBB, MIMD, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
38452 .addImm(X86::sub_32bit);
38455 case MachineJumpTableInfo::EK_BlockAddress:
38456 // jmpq *(BReg,IReg64,8)
38457 BuildMI(DispContBB, MIMD, TII->get(X86::JMP64m))
38464 case MachineJumpTableInfo::EK_LabelDifference32: {
38465 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
38466 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
38467 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
38469 // movl (BReg,IReg64,4), OReg
38470 BuildMI(DispContBB, MIMD, TII->get(X86::MOV32rm), OReg)
38476 // movsx OReg64, OReg
38477 BuildMI(DispContBB, MIMD, TII->get(X86::MOVSX64rr32), OReg64)
38479 // addq BReg, OReg64, TReg
38480 BuildMI(DispContBB, MIMD, TII->get(X86::ADD64rr), TReg)
38484 BuildMI(DispContBB, MIMD, TII->get(X86::JMP64r)).addReg(TReg);
38488 llvm_unreachable("Unexpected jump table encoding");
38491 // jmpl *.LJTI0_0(,IReg,4)
38492 BuildMI(DispContBB, MIMD, TII->get(X86::JMP32m))
38496 .addJumpTableIndex(MJTI)
38500 // Add the jump table entries as successors to the MBB.
38501 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
38502 for (auto &LP : LPadList)
38503 if (SeenMBBs.insert(LP).second)
38504 DispContBB->addSuccessor(LP);
38506 // N.B. the order the invoke BBs are processed in doesn't matter here.
38507 SmallVector<MachineBasicBlock *, 64> MBBLPads;
38508 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
38509 for (MachineBasicBlock *MBB : InvokeBBs) {
38510 // Remove the landing pad successor from the invoke block and replace it
38511 // with the new dispatch block.
38512 // Keep a copy of Successors since it's modified inside the loop.
38513 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
38515 // FIXME: Avoid quadratic complexity.
38516 for (auto *MBBS : Successors) {
38517 if (MBBS->isEHPad()) {
38518 MBB->removeSuccessor(MBBS);
38519 MBBLPads.push_back(MBBS);
38523 MBB->addSuccessor(DispatchBB);
38525 // Find the invoke call and mark all of the callee-saved registers as
38526 // 'implicit defined' so that they're spilled. This prevents code from
38527 // moving instructions to before the EH block, where they will never be
38529 for (auto &II : reverse(*MBB)) {
38533 DenseMap<unsigned, bool> DefRegs;
38534 for (auto &MOp : II.operands())
38536 DefRegs[MOp.getReg()] = true;
38538 MachineInstrBuilder MIB(*MF, &II);
38539 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
38540 unsigned Reg = SavedRegs[RegIdx];
38542 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
38549 // Mark all former landing pads as non-landing pads. The dispatch is the only
38550 // landing pad now.
38551 for (auto &LP : MBBLPads)
38552 LP->setIsEHPad(false);
38554 // The instruction is gone now.
38555 MI.eraseFromParent();
38559 MachineBasicBlock *
38560 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
38561 MachineBasicBlock *BB) const {
38562 MachineFunction *MF = BB->getParent();
38563 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
38564 const MIMetadata MIMD(MI);
38566 auto TMMImmToTMMReg = [](unsigned Imm) {
38567 assert (Imm < 8 && "Illegal tmm index");
38568 return X86::TMM0 + Imm;
38570 switch (MI.getOpcode()) {
38571 default: llvm_unreachable("Unexpected instr type to insert");
38572 case X86::TLS_addr32:
38573 case X86::TLS_addr64:
38574 case X86::TLS_addrX32:
38575 case X86::TLS_base_addr32:
38576 case X86::TLS_base_addr64:
38577 case X86::TLS_base_addrX32:
38578 return EmitLoweredTLSAddr(MI, BB);
38579 case X86::INDIRECT_THUNK_CALL32:
38580 case X86::INDIRECT_THUNK_CALL64:
38581 case X86::INDIRECT_THUNK_TCRETURN32:
38582 case X86::INDIRECT_THUNK_TCRETURN64:
38583 return EmitLoweredIndirectThunk(MI, BB);
38584 case X86::CATCHRET:
38585 return EmitLoweredCatchRet(MI, BB);
38586 case X86::SEG_ALLOCA_32:
38587 case X86::SEG_ALLOCA_64:
38588 return EmitLoweredSegAlloca(MI, BB);
38589 case X86::PROBED_ALLOCA_32:
38590 case X86::PROBED_ALLOCA_64:
38591 return EmitLoweredProbedAlloca(MI, BB);
38592 case X86::TLSCall_32:
38593 case X86::TLSCall_64:
38594 return EmitLoweredTLSCall(MI, BB);
38595 case X86::CMOV_FR16:
38596 case X86::CMOV_FR16X:
38597 case X86::CMOV_FR32:
38598 case X86::CMOV_FR32X:
38599 case X86::CMOV_FR64:
38600 case X86::CMOV_FR64X:
38601 case X86::CMOV_GR8:
38602 case X86::CMOV_GR16:
38603 case X86::CMOV_GR32:
38604 case X86::CMOV_RFP32:
38605 case X86::CMOV_RFP64:
38606 case X86::CMOV_RFP80:
38607 case X86::CMOV_VR64:
38608 case X86::CMOV_VR128:
38609 case X86::CMOV_VR128X:
38610 case X86::CMOV_VR256:
38611 case X86::CMOV_VR256X:
38612 case X86::CMOV_VR512:
38613 case X86::CMOV_VK1:
38614 case X86::CMOV_VK2:
38615 case X86::CMOV_VK4:
38616 case X86::CMOV_VK8:
38617 case X86::CMOV_VK16:
38618 case X86::CMOV_VK32:
38619 case X86::CMOV_VK64:
38620 return EmitLoweredSelect(MI, BB);
38622 case X86::FP80_ADDr:
38623 case X86::FP80_ADDm32: {
38624 // Change the floating point control register to use double extended
38625 // precision when performing the addition.
38626 int OrigCWFrameIdx =
38627 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
38628 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
38631 // Load the old value of the control word...
38632 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
38633 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
38636 // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
38638 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
38639 BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
38640 .addReg(OldCW, RegState::Kill)
38643 // Extract to 16 bits.
38645 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
38646 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
38647 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
38649 // Prepare memory for FLDCW.
38650 int NewCWFrameIdx =
38651 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
38652 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
38654 .addReg(NewCW16, RegState::Kill);
38656 // Reload the modified control word now...
38657 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
38660 // Do the addition.
38661 if (MI.getOpcode() == X86::FP80_ADDr) {
38662 BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80))
38663 .add(MI.getOperand(0))
38664 .add(MI.getOperand(1))
38665 .add(MI.getOperand(2));
38667 BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80m32))
38668 .add(MI.getOperand(0))
38669 .add(MI.getOperand(1))
38670 .add(MI.getOperand(2))
38671 .add(MI.getOperand(3))
38672 .add(MI.getOperand(4))
38673 .add(MI.getOperand(5))
38674 .add(MI.getOperand(6));
38677 // Reload the original control word now.
38678 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
38681 MI.eraseFromParent(); // The pseudo instruction is gone now.
38685 case X86::FP32_TO_INT16_IN_MEM:
38686 case X86::FP32_TO_INT32_IN_MEM:
38687 case X86::FP32_TO_INT64_IN_MEM:
38688 case X86::FP64_TO_INT16_IN_MEM:
38689 case X86::FP64_TO_INT32_IN_MEM:
38690 case X86::FP64_TO_INT64_IN_MEM:
38691 case X86::FP80_TO_INT16_IN_MEM:
38692 case X86::FP80_TO_INT32_IN_MEM:
38693 case X86::FP80_TO_INT64_IN_MEM: {
38694 // Change the floating point control register to use "round towards zero"
38695 // mode when truncating to an integer value.
38696 int OrigCWFrameIdx =
38697 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
38698 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
38701 // Load the old value of the control word...
38702 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
38703 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
38706 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
38707 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
38708 BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
38709 .addReg(OldCW, RegState::Kill).addImm(0xC00);
38711 // Extract to 16 bits.
38713 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
38714 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
38715 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
38717 // Prepare memory for FLDCW.
38718 int NewCWFrameIdx =
38719 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
38720 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
38722 .addReg(NewCW16, RegState::Kill);
38724 // Reload the modified control word now...
38725 addFrameReference(BuildMI(*BB, MI, MIMD,
38726 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
38728 // Get the X86 opcode to use.
38730 switch (MI.getOpcode()) {
38731 default: llvm_unreachable("illegal opcode!");
38732 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
38733 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
38734 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
38735 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
38736 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
38737 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
38738 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
38739 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
38740 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
38743 X86AddressMode AM = getAddressFromInstr(&MI, 0);
38744 addFullAddress(BuildMI(*BB, MI, MIMD, TII->get(Opc)), AM)
38745 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
38747 // Reload the original control word now.
38748 addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
38751 MI.eraseFromParent(); // The pseudo instruction is gone now.
38757 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
38759 case X86::VAARG_64:
38760 case X86::VAARG_X32:
38761 return EmitVAARGWithCustomInserter(MI, BB);
38763 case X86::EH_SjLj_SetJmp32:
38764 case X86::EH_SjLj_SetJmp64:
38765 return emitEHSjLjSetJmp(MI, BB);
38767 case X86::EH_SjLj_LongJmp32:
38768 case X86::EH_SjLj_LongJmp64:
38769 return emitEHSjLjLongJmp(MI, BB);
38771 case X86::Int_eh_sjlj_setup_dispatch:
38772 return EmitSjLjDispatchBlock(MI, BB);
38774 case TargetOpcode::STATEPOINT:
38775 // As an implementation detail, STATEPOINT shares the STACKMAP format at
38776 // this point in the process. We diverge later.
38777 return emitPatchPoint(MI, BB);
38779 case TargetOpcode::STACKMAP:
38780 case TargetOpcode::PATCHPOINT:
38781 return emitPatchPoint(MI, BB);
38783 case TargetOpcode::PATCHABLE_EVENT_CALL:
38784 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
38787 case X86::LCMPXCHG8B: {
38788 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
38789 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
38790 // requires a memory operand. If it happens that current architecture is
38791 // i686 and for current function we need a base pointer
38792 // - which is ESI for i686 - register allocator would not be able to
38793 // allocate registers for an address in form of X(%reg, %reg, Y)
38794 // - there never would be enough unreserved registers during regalloc
38795 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
38796 // We are giving a hand to register allocator by precomputing the address in
38797 // a new vreg using LEA.
38799 // If it is not i686 or there is no base pointer - nothing to do here.
38800 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
38803 // Even though this code does not necessarily needs the base pointer to
38804 // be ESI, we check for that. The reason: if this assert fails, there are
38805 // some changes happened in the compiler base pointer handling, which most
38806 // probably have to be addressed somehow here.
38807 assert(TRI->getBaseRegister() == X86::ESI &&
38808 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
38809 "base pointer in mind");
38811 MachineRegisterInfo &MRI = MF->getRegInfo();
38812 MVT SPTy = getPointerTy(MF->getDataLayout());
38813 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
38814 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
38816 X86AddressMode AM = getAddressFromInstr(&MI, 0);
38817 // Regalloc does not need any help when the memory operand of CMPXCHG8B
38818 // does not use index register.
38819 if (AM.IndexReg == X86::NoRegister)
38822 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
38823 // four operand definitions that are E[ABCD] registers. We skip them and
38824 // then insert the LEA.
38825 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
38826 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
38827 RMBBI->definesRegister(X86::EBX) ||
38828 RMBBI->definesRegister(X86::ECX) ||
38829 RMBBI->definesRegister(X86::EDX))) {
38832 MachineBasicBlock::iterator MBBI(RMBBI);
38834 BuildMI(*BB, *MBBI, MIMD, TII->get(X86::LEA32r), computedAddrVReg), AM);
38836 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
38840 case X86::LCMPXCHG16B_NO_RBX: {
38841 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
38842 Register BasePtr = TRI->getBaseRegister();
38843 if (TRI->hasBasePointer(*MF) &&
38844 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
38845 if (!BB->isLiveIn(BasePtr))
38846 BB->addLiveIn(BasePtr);
38847 // Save RBX into a virtual register.
38849 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
38850 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
38852 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
38853 MachineInstrBuilder MIB =
38854 BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
38855 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
38856 MIB.add(MI.getOperand(Idx));
38857 MIB.add(MI.getOperand(X86::AddrNumOperands));
38858 MIB.addReg(SaveRBX);
38860 // Simple case, just copy the virtual register to RBX.
38861 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::RBX)
38862 .add(MI.getOperand(X86::AddrNumOperands));
38863 MachineInstrBuilder MIB =
38864 BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B));
38865 for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
38866 MIB.add(MI.getOperand(Idx));
38868 MI.eraseFromParent();
38871 case X86::MWAITX: {
38872 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
38873 Register BasePtr = TRI->getBaseRegister();
38874 bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
38875 // If no need to save the base pointer, we generate MWAITXrrr,
38876 // else we generate pseudo MWAITX_SAVE_RBX.
38877 if (!IsRBX || !TRI->hasBasePointer(*MF)) {
38878 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
38879 .addReg(MI.getOperand(0).getReg());
38880 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
38881 .addReg(MI.getOperand(1).getReg());
38882 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EBX)
38883 .addReg(MI.getOperand(2).getReg());
38884 BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITXrrr));
38885 MI.eraseFromParent();
38887 if (!BB->isLiveIn(BasePtr)) {
38888 BB->addLiveIn(BasePtr);
38890 // Parameters can be copied into ECX and EAX but not EBX yet.
38891 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
38892 .addReg(MI.getOperand(0).getReg());
38893 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
38894 .addReg(MI.getOperand(1).getReg());
38895 assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
38896 // Save RBX into a virtual register.
38898 MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
38899 BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
38901 // Generate mwaitx pseudo.
38902 Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
38903 BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITX_SAVE_RBX))
38904 .addDef(Dst) // Destination tied in with SaveRBX.
38905 .addReg(MI.getOperand(2).getReg()) // input value of EBX.
38906 .addUse(SaveRBX); // Save of base pointer.
38907 MI.eraseFromParent();
38911 case TargetOpcode::PREALLOCATED_SETUP: {
38912 assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
38913 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
38914 MFI->setHasPreallocatedCall(true);
38915 int64_t PreallocatedId = MI.getOperand(0).getImm();
38916 size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
38917 assert(StackAdjustment != 0 && "0 stack adjustment");
38918 LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
38919 << StackAdjustment << "\n");
38920 BuildMI(*BB, MI, MIMD, TII->get(X86::SUB32ri), X86::ESP)
38922 .addImm(StackAdjustment);
38923 MI.eraseFromParent();
38926 case TargetOpcode::PREALLOCATED_ARG: {
38927 assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
38928 int64_t PreallocatedId = MI.getOperand(1).getImm();
38929 int64_t ArgIdx = MI.getOperand(2).getImm();
38930 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
38931 size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
38932 LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
38933 << ", arg offset " << ArgOffset << "\n");
38934 // stack pointer + offset
38935 addRegOffset(BuildMI(*BB, MI, MIMD, TII->get(X86::LEA32r),
38936 MI.getOperand(0).getReg()),
38937 X86::ESP, false, ArgOffset);
38938 MI.eraseFromParent();
38941 case X86::PTDPBSSD:
38942 case X86::PTDPBSUD:
38943 case X86::PTDPBUSD:
38944 case X86::PTDPBUUD:
38945 case X86::PTDPBF16PS:
38946 case X86::PTDPFP16PS: {
38948 switch (MI.getOpcode()) {
38949 default: llvm_unreachable("illegal opcode!");
38950 case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
38951 case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
38952 case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
38953 case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
38954 case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
38955 case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
38958 MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
38959 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
38960 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
38961 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
38962 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
38964 MI.eraseFromParent(); // The pseudo is gone now.
38967 case X86::PTILEZERO: {
38968 unsigned Imm = MI.getOperand(0).getImm();
38969 BuildMI(*BB, MI, MIMD, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
38970 MI.eraseFromParent(); // The pseudo is gone now.
38973 case X86::PTILELOADD:
38974 case X86::PTILELOADDT1:
38975 case X86::PTILESTORED: {
38977 switch (MI.getOpcode()) {
38978 default: llvm_unreachable("illegal opcode!");
38979 case X86::PTILELOADD: Opc = X86::TILELOADD; break;
38980 case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
38981 case X86::PTILESTORED: Opc = X86::TILESTORED; break;
38984 MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
38985 unsigned CurOp = 0;
38986 if (Opc != X86::TILESTORED)
38987 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
38990 MIB.add(MI.getOperand(CurOp++)); // base
38991 MIB.add(MI.getOperand(CurOp++)); // scale
38992 MIB.add(MI.getOperand(CurOp++)); // index -- stride
38993 MIB.add(MI.getOperand(CurOp++)); // displacement
38994 MIB.add(MI.getOperand(CurOp++)); // segment
38996 if (Opc == X86::TILESTORED)
38997 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
39000 MI.eraseFromParent(); // The pseudo is gone now.
39003 case X86::PTCMMIMFP16PS:
39004 case X86::PTCMMRLFP16PS: {
39005 const MIMetadata MIMD(MI);
39007 switch (MI.getOpcode()) {
39008 default: llvm_unreachable("Unexpected instruction!");
39009 case X86::PTCMMIMFP16PS: Opc = X86::TCMMIMFP16PS; break;
39010 case X86::PTCMMRLFP16PS: Opc = X86::TCMMRLFP16PS; break;
39012 MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
39013 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
39014 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
39015 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
39016 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
39017 MI.eraseFromParent(); // The pseudo is gone now.
39023 //===----------------------------------------------------------------------===//
39024 // X86 Optimization Hooks
39025 //===----------------------------------------------------------------------===//
39028 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
39029 const APInt &DemandedBits,
39030 const APInt &DemandedElts,
39031 TargetLoweringOpt &TLO) const {
39032 EVT VT = Op.getValueType();
39033 unsigned Opcode = Op.getOpcode();
39034 unsigned EltSize = VT.getScalarSizeInBits();
39036 if (VT.isVector()) {
39037 // If the constant is only all signbits in the active bits, then we should
39038 // extend it to the entire constant to allow it act as a boolean constant
39040 auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
39041 if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
39043 for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
39044 if (!DemandedElts[i] || V.getOperand(i).isUndef())
39046 const APInt &Val = V.getConstantOperandAPInt(i);
39047 if (Val.getBitWidth() > Val.getNumSignBits() &&
39048 Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
39053 // For vectors - if we have a constant, then try to sign extend.
39054 // TODO: Handle AND cases.
39055 unsigned ActiveBits = DemandedBits.getActiveBits();
39056 if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
39057 (Opcode == ISD::OR || Opcode == ISD::XOR || Opcode == X86ISD::ANDNP) &&
39058 NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
39059 EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
39060 EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
39061 VT.getVectorNumElements());
39063 TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
39064 Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
39066 TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
39067 return TLO.CombineTo(Op, NewOp);
39072 // Only optimize Ands to prevent shrinking a constant that could be
39073 // matched by movzx.
39074 if (Opcode != ISD::AND)
39077 // Make sure the RHS really is a constant.
39078 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
39082 const APInt &Mask = C->getAPIntValue();
39084 // Clear all non-demanded bits initially.
39085 APInt ShrunkMask = Mask & DemandedBits;
39087 // Find the width of the shrunk mask.
39088 unsigned Width = ShrunkMask.getActiveBits();
39090 // If the mask is all 0s there's nothing to do here.
39094 // Find the next power of 2 width, rounding up to a byte.
39095 Width = llvm::bit_ceil(std::max(Width, 8U));
39096 // Truncate the width to size to handle illegal types.
39097 Width = std::min(Width, EltSize);
39099 // Calculate a possible zero extend mask for this constant.
39100 APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
39102 // If we aren't changing the mask, just return true to keep it and prevent
39103 // the caller from optimizing.
39104 if (ZeroExtendMask == Mask)
39107 // Make sure the new mask can be represented by a combination of mask bits
39108 // and non-demanded bits.
39109 if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
39112 // Replace the constant with the zero extend mask.
39114 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
39115 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
39116 return TLO.CombineTo(Op, NewOp);
39119 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
39121 const APInt &DemandedElts,
39122 const SelectionDAG &DAG,
39123 unsigned Depth) const {
39124 unsigned BitWidth = Known.getBitWidth();
39125 unsigned NumElts = DemandedElts.getBitWidth();
39126 unsigned Opc = Op.getOpcode();
39127 EVT VT = Op.getValueType();
39128 assert((Opc >= ISD::BUILTIN_OP_END ||
39129 Opc == ISD::INTRINSIC_WO_CHAIN ||
39130 Opc == ISD::INTRINSIC_W_CHAIN ||
39131 Opc == ISD::INTRINSIC_VOID) &&
39132 "Should use MaskedValueIsZero if you don't know whether Op"
39133 " is a target node!");
39138 case X86ISD::MUL_IMM: {
39140 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39141 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39142 Known = KnownBits::mul(Known, Known2);
39145 case X86ISD::SETCC:
39146 Known.Zero.setBitsFrom(1);
39148 case X86ISD::MOVMSK: {
39149 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
39150 Known.Zero.setBitsFrom(NumLoBits);
39153 case X86ISD::PEXTRB:
39154 case X86ISD::PEXTRW: {
39155 SDValue Src = Op.getOperand(0);
39156 EVT SrcVT = Src.getValueType();
39157 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
39158 Op.getConstantOperandVal(1));
39159 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
39160 Known = Known.anyextOrTrunc(BitWidth);
39161 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
39164 case X86ISD::VSRAI:
39165 case X86ISD::VSHLI:
39166 case X86ISD::VSRLI: {
39167 unsigned ShAmt = Op.getConstantOperandVal(1);
39168 if (ShAmt >= VT.getScalarSizeInBits()) {
39169 // Out of range logical bit shifts are guaranteed to be zero.
39170 // Out of range arithmetic bit shifts splat the sign bit.
39171 if (Opc != X86ISD::VSRAI) {
39172 Known.setAllZero();
39176 ShAmt = VT.getScalarSizeInBits() - 1;
39179 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39180 if (Opc == X86ISD::VSHLI) {
39181 Known.Zero <<= ShAmt;
39182 Known.One <<= ShAmt;
39183 // Low bits are known zero.
39184 Known.Zero.setLowBits(ShAmt);
39185 } else if (Opc == X86ISD::VSRLI) {
39186 Known.Zero.lshrInPlace(ShAmt);
39187 Known.One.lshrInPlace(ShAmt);
39188 // High bits are known zero.
39189 Known.Zero.setHighBits(ShAmt);
39191 Known.Zero.ashrInPlace(ShAmt);
39192 Known.One.ashrInPlace(ShAmt);
39196 case X86ISD::PACKUS: {
39197 // PACKUS is just a truncation if the upper half is zero.
39198 APInt DemandedLHS, DemandedRHS;
39199 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
39201 Known.One = APInt::getAllOnes(BitWidth * 2);
39202 Known.Zero = APInt::getAllOnes(BitWidth * 2);
39205 if (!!DemandedLHS) {
39206 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
39207 Known = Known.intersectWith(Known2);
39209 if (!!DemandedRHS) {
39210 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
39211 Known = Known.intersectWith(Known2);
39214 if (Known.countMinLeadingZeros() < BitWidth)
39216 Known = Known.trunc(BitWidth);
39219 case X86ISD::VBROADCAST: {
39220 SDValue Src = Op.getOperand(0);
39221 if (!Src.getSimpleValueType().isVector()) {
39222 Known = DAG.computeKnownBits(Src, Depth + 1);
39227 case X86ISD::AND: {
39228 if (Op.getResNo() == 0) {
39230 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39231 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39236 case X86ISD::ANDNP: {
39238 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39239 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39241 // ANDNP = (~X & Y);
39242 Known.One &= Known2.Zero;
39243 Known.Zero |= Known2.One;
39246 case X86ISD::FOR: {
39248 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39249 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39254 case X86ISD::PSADBW: {
39255 assert(VT.getScalarType() == MVT::i64 &&
39256 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
39257 "Unexpected PSADBW types");
39259 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
39260 Known.Zero.setBitsFrom(16);
39263 case X86ISD::PCMPGT:
39264 case X86ISD::PCMPEQ: {
39265 KnownBits KnownLhs =
39266 DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39267 KnownBits KnownRhs =
39268 DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39269 std::optional<bool> Res = Opc == X86ISD::PCMPEQ
39270 ? KnownBits::eq(KnownLhs, KnownRhs)
39271 : KnownBits::sgt(KnownLhs, KnownRhs);
39274 Known.setAllOnes();
39276 Known.setAllZero();
39280 case X86ISD::PMULUDQ: {
39282 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39283 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39285 Known = Known.trunc(BitWidth / 2).zext(BitWidth);
39286 Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
39287 Known = KnownBits::mul(Known, Known2);
39290 case X86ISD::CMOV: {
39291 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
39292 // If we don't know any bits, early out.
39293 if (Known.isUnknown())
39295 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
39297 // Only known if known in both the LHS and RHS.
39298 Known = Known.intersectWith(Known2);
39301 case X86ISD::BEXTR:
39302 case X86ISD::BEXTRI: {
39303 SDValue Op0 = Op.getOperand(0);
39304 SDValue Op1 = Op.getOperand(1);
39306 if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
39307 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
39308 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
39310 // If the length is 0, the result is 0.
39312 Known.setAllZero();
39316 if ((Shift + Length) <= BitWidth) {
39317 Known = DAG.computeKnownBits(Op0, Depth + 1);
39318 Known = Known.extractBits(Length, Shift);
39319 Known = Known.zextOrTrunc(BitWidth);
39324 case X86ISD::PDEP: {
39326 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39327 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
39328 // Zeros are retained from the mask operand. But not ones.
39329 Known.One.clearAllBits();
39330 // The result will have at least as many trailing zeros as the non-mask
39331 // operand since bits can only map to the same or higher bit position.
39332 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
39335 case X86ISD::PEXT: {
39336 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
39337 // The result has as many leading zeros as the number of zeroes in the mask.
39338 unsigned Count = Known.Zero.popcount();
39339 Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
39340 Known.One.clearAllBits();
39343 case X86ISD::VTRUNC:
39344 case X86ISD::VTRUNCS:
39345 case X86ISD::VTRUNCUS:
39346 case X86ISD::CVTSI2P:
39347 case X86ISD::CVTUI2P:
39348 case X86ISD::CVTP2SI:
39349 case X86ISD::CVTP2UI:
39350 case X86ISD::MCVTP2SI:
39351 case X86ISD::MCVTP2UI:
39352 case X86ISD::CVTTP2SI:
39353 case X86ISD::CVTTP2UI:
39354 case X86ISD::MCVTTP2SI:
39355 case X86ISD::MCVTTP2UI:
39356 case X86ISD::MCVTSI2P:
39357 case X86ISD::MCVTUI2P:
39358 case X86ISD::VFPROUND:
39359 case X86ISD::VMFPROUND:
39360 case X86ISD::CVTPS2PH:
39361 case X86ISD::MCVTPS2PH: {
39362 // Truncations/Conversions - upper elements are known zero.
39363 EVT SrcVT = Op.getOperand(0).getValueType();
39364 if (SrcVT.isVector()) {
39365 unsigned NumSrcElts = SrcVT.getVectorNumElements();
39366 if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
39367 Known.setAllZero();
39371 case X86ISD::STRICT_CVTTP2SI:
39372 case X86ISD::STRICT_CVTTP2UI:
39373 case X86ISD::STRICT_CVTSI2P:
39374 case X86ISD::STRICT_CVTUI2P:
39375 case X86ISD::STRICT_VFPROUND:
39376 case X86ISD::STRICT_CVTPS2PH: {
39377 // Strict Conversions - upper elements are known zero.
39378 EVT SrcVT = Op.getOperand(1).getValueType();
39379 if (SrcVT.isVector()) {
39380 unsigned NumSrcElts = SrcVT.getVectorNumElements();
39381 if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
39382 Known.setAllZero();
39386 case X86ISD::MOVQ2DQ: {
39387 // Move from MMX to XMM. Upper half of XMM should be 0.
39388 if (DemandedElts.countr_zero() >= (NumElts / 2))
39389 Known.setAllZero();
39392 case X86ISD::VBROADCAST_LOAD: {
39394 SmallVector<APInt, 16> EltBits;
39395 if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
39396 /*AllowWholeUndefs*/ false,
39397 /*AllowPartialUndefs*/ false)) {
39398 Known.Zero.setAllBits();
39399 Known.One.setAllBits();
39400 for (unsigned I = 0; I != NumElts; ++I) {
39401 if (!DemandedElts[I])
39403 if (UndefElts[I]) {
39407 KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
39408 Known = Known.intersectWith(Known2);
39416 // Handle target shuffles.
39417 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
39418 if (isTargetShuffle(Opc)) {
39419 SmallVector<int, 64> Mask;
39420 SmallVector<SDValue, 2> Ops;
39421 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
39422 unsigned NumOps = Ops.size();
39423 unsigned NumElts = VT.getVectorNumElements();
39424 if (Mask.size() == NumElts) {
39425 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
39426 Known.Zero.setAllBits(); Known.One.setAllBits();
39427 for (unsigned i = 0; i != NumElts; ++i) {
39428 if (!DemandedElts[i])
39431 if (M == SM_SentinelUndef) {
39432 // For UNDEF elements, we don't know anything about the common state
39433 // of the shuffle result.
39437 if (M == SM_SentinelZero) {
39438 Known.One.clearAllBits();
39441 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
39442 "Shuffle index out of range");
39444 unsigned OpIdx = (unsigned)M / NumElts;
39445 unsigned EltIdx = (unsigned)M % NumElts;
39446 if (Ops[OpIdx].getValueType() != VT) {
39447 // TODO - handle target shuffle ops with different value types.
39451 DemandedOps[OpIdx].setBit(EltIdx);
39453 // Known bits are the values that are shared by every demanded element.
39454 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
39455 if (!DemandedOps[i])
39458 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
39459 Known = Known.intersectWith(Known2);
39466 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
39467 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
39468 unsigned Depth) const {
39469 EVT VT = Op.getValueType();
39470 unsigned VTBits = VT.getScalarSizeInBits();
39471 unsigned Opcode = Op.getOpcode();
39473 case X86ISD::SETCC_CARRY:
39474 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
39477 case X86ISD::VTRUNC: {
39478 SDValue Src = Op.getOperand(0);
39479 MVT SrcVT = Src.getSimpleValueType();
39480 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
39481 assert(VTBits < NumSrcBits && "Illegal truncation input type");
39482 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
39483 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
39484 if (Tmp > (NumSrcBits - VTBits))
39485 return Tmp - (NumSrcBits - VTBits);
39489 case X86ISD::PACKSS: {
39490 // PACKSS is just a truncation if the sign bits extend to the packed size.
39491 APInt DemandedLHS, DemandedRHS;
39492 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
39495 // Helper to detect PACKSSDW(BITCAST(PACKSSDW(X)),BITCAST(PACKSSDW(Y)))
39496 // patterns often used to compact vXi64 allsignbit patterns.
39497 auto NumSignBitsPACKSS = [&](SDValue V, const APInt &Elts) -> unsigned {
39498 SDValue BC = peekThroughBitcasts(V);
39499 if (BC.getOpcode() == X86ISD::PACKSS &&
39500 BC.getScalarValueSizeInBits() == 16 &&
39501 V.getScalarValueSizeInBits() == 32) {
39502 SDValue BC0 = peekThroughBitcasts(BC.getOperand(0));
39503 SDValue BC1 = peekThroughBitcasts(BC.getOperand(1));
39504 if (BC0.getScalarValueSizeInBits() == 64 &&
39505 BC1.getScalarValueSizeInBits() == 64 &&
39506 DAG.ComputeNumSignBits(BC0, Depth + 1) == 64 &&
39507 DAG.ComputeNumSignBits(BC1, Depth + 1) == 64)
39510 return DAG.ComputeNumSignBits(V, Elts, Depth + 1);
39513 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
39514 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
39516 Tmp0 = NumSignBitsPACKSS(Op.getOperand(0), DemandedLHS);
39518 Tmp1 = NumSignBitsPACKSS(Op.getOperand(1), DemandedRHS);
39519 unsigned Tmp = std::min(Tmp0, Tmp1);
39520 if (Tmp > (SrcBits - VTBits))
39521 return Tmp - (SrcBits - VTBits);
39525 case X86ISD::VBROADCAST: {
39526 SDValue Src = Op.getOperand(0);
39527 if (!Src.getSimpleValueType().isVector())
39528 return DAG.ComputeNumSignBits(Src, Depth + 1);
39532 case X86ISD::VSHLI: {
39533 SDValue Src = Op.getOperand(0);
39534 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
39535 if (ShiftVal.uge(VTBits))
39536 return VTBits; // Shifted all bits out --> zero.
39537 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
39538 if (ShiftVal.uge(Tmp))
39539 return 1; // Shifted all sign bits out --> unknown.
39540 return Tmp - ShiftVal.getZExtValue();
39543 case X86ISD::VSRAI: {
39544 SDValue Src = Op.getOperand(0);
39545 APInt ShiftVal = Op.getConstantOperandAPInt(1);
39546 if (ShiftVal.uge(VTBits - 1))
39547 return VTBits; // Sign splat.
39548 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
39550 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
39553 case X86ISD::FSETCC:
39554 // cmpss/cmpsd return zero/all-bits result values in the bottom element.
39555 if (VT == MVT::f32 || VT == MVT::f64 ||
39556 ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
39560 case X86ISD::PCMPGT:
39561 case X86ISD::PCMPEQ:
39563 case X86ISD::VPCOM:
39564 case X86ISD::VPCOMU:
39565 // Vector compares return zero/all-bits result values.
39568 case X86ISD::ANDNP: {
39570 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
39571 if (Tmp0 == 1) return 1; // Early out.
39573 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
39574 return std::min(Tmp0, Tmp1);
39577 case X86ISD::CMOV: {
39578 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
39579 if (Tmp0 == 1) return 1; // Early out.
39580 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
39581 return std::min(Tmp0, Tmp1);
39585 // Handle target shuffles.
39586 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
39587 if (isTargetShuffle(Opcode)) {
39588 SmallVector<int, 64> Mask;
39589 SmallVector<SDValue, 2> Ops;
39590 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
39591 unsigned NumOps = Ops.size();
39592 unsigned NumElts = VT.getVectorNumElements();
39593 if (Mask.size() == NumElts) {
39594 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
39595 for (unsigned i = 0; i != NumElts; ++i) {
39596 if (!DemandedElts[i])
39599 if (M == SM_SentinelUndef) {
39600 // For UNDEF elements, we don't know anything about the common state
39601 // of the shuffle result.
39603 } else if (M == SM_SentinelZero) {
39604 // Zero = all sign bits.
39607 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
39608 "Shuffle index out of range");
39610 unsigned OpIdx = (unsigned)M / NumElts;
39611 unsigned EltIdx = (unsigned)M % NumElts;
39612 if (Ops[OpIdx].getValueType() != VT) {
39613 // TODO - handle target shuffle ops with different value types.
39616 DemandedOps[OpIdx].setBit(EltIdx);
39618 unsigned Tmp0 = VTBits;
39619 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
39620 if (!DemandedOps[i])
39623 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
39624 Tmp0 = std::min(Tmp0, Tmp1);
39635 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
39636 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
39637 return N->getOperand(0);
39641 // Helper to look for a normal load that can be narrowed into a vzload with the
39642 // specified VT and memory VT. Returns SDValue() on failure.
39643 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
39644 SelectionDAG &DAG) {
39645 // Can't if the load is volatile or atomic.
39646 if (!LN->isSimple())
39649 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39650 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
39651 return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
39652 LN->getPointerInfo(), LN->getOriginalAlign(),
39653 LN->getMemOperand()->getFlags());
39656 // Attempt to match a combined shuffle mask against supported unary shuffle
39658 // TODO: Investigate sharing more of this with shuffle lowering.
39659 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
39660 bool AllowFloatDomain, bool AllowIntDomain,
39661 SDValue V1, const SelectionDAG &DAG,
39662 const X86Subtarget &Subtarget, unsigned &Shuffle,
39663 MVT &SrcVT, MVT &DstVT) {
39664 unsigned NumMaskElts = Mask.size();
39665 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
39667 // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
39668 if (Mask[0] == 0 &&
39669 (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
39670 if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
39671 (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39672 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
39673 Shuffle = X86ISD::VZEXT_MOVL;
39674 if (MaskEltSize == 16)
39675 SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
39677 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
39682 // Match against a ANY/SIGN/ZERO_EXTEND_VECTOR_INREG instruction.
39683 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
39684 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
39685 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
39686 unsigned MaxScale = 64 / MaskEltSize;
39687 bool UseSign = V1.getScalarValueSizeInBits() == MaskEltSize &&
39688 DAG.ComputeNumSignBits(V1) == MaskEltSize;
39689 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
39690 bool MatchAny = true;
39691 bool MatchZero = true;
39692 bool MatchSign = UseSign;
39693 unsigned NumDstElts = NumMaskElts / Scale;
39694 for (unsigned i = 0;
39695 i != NumDstElts && (MatchAny || MatchSign || MatchZero); ++i) {
39696 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
39697 MatchAny = MatchSign = MatchZero = false;
39700 unsigned Pos = (i * Scale) + 1;
39701 unsigned Len = Scale - 1;
39702 MatchAny &= isUndefInRange(Mask, Pos, Len);
39703 MatchZero &= isUndefOrZeroInRange(Mask, Pos, Len);
39704 MatchSign &= isUndefOrEqualInRange(Mask, (int)i, Pos, Len);
39706 if (MatchAny || MatchSign || MatchZero) {
39707 assert((MatchSign || MatchZero) &&
39708 "Failed to match sext/zext but matched aext?");
39709 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
39710 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType()
39711 : MVT::getIntegerVT(MaskEltSize);
39712 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
39714 Shuffle = unsigned(
39715 MatchAny ? ISD::ANY_EXTEND
39716 : (MatchSign ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND));
39717 if (SrcVT.getVectorNumElements() != NumDstElts)
39718 Shuffle = DAG.getOpcode_EXTEND_VECTOR_INREG(Shuffle);
39720 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
39721 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
39727 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
39728 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
39729 (MaskEltSize == 16 && Subtarget.hasFP16())) &&
39730 isUndefOrEqual(Mask[0], 0) &&
39731 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
39732 Shuffle = X86ISD::VZEXT_MOVL;
39733 if (MaskEltSize == 16)
39734 SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
39736 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
39740 // Check if we have SSE3 which will let us use MOVDDUP etc. The
39741 // instructions are no slower than UNPCKLPD but has the option to
39742 // fold the input operand into even an unaligned memory load.
39743 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
39744 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
39745 Shuffle = X86ISD::MOVDDUP;
39746 SrcVT = DstVT = MVT::v2f64;
39749 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
39750 Shuffle = X86ISD::MOVSLDUP;
39751 SrcVT = DstVT = MVT::v4f32;
39754 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
39755 Shuffle = X86ISD::MOVSHDUP;
39756 SrcVT = DstVT = MVT::v4f32;
39761 if (MaskVT.is256BitVector() && AllowFloatDomain) {
39762 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
39763 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
39764 Shuffle = X86ISD::MOVDDUP;
39765 SrcVT = DstVT = MVT::v4f64;
39768 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
39770 Shuffle = X86ISD::MOVSLDUP;
39771 SrcVT = DstVT = MVT::v8f32;
39774 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
39776 Shuffle = X86ISD::MOVSHDUP;
39777 SrcVT = DstVT = MVT::v8f32;
39782 if (MaskVT.is512BitVector() && AllowFloatDomain) {
39783 assert(Subtarget.hasAVX512() &&
39784 "AVX512 required for 512-bit vector shuffles");
39785 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
39787 Shuffle = X86ISD::MOVDDUP;
39788 SrcVT = DstVT = MVT::v8f64;
39791 if (isTargetShuffleEquivalent(
39793 {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
39794 Shuffle = X86ISD::MOVSLDUP;
39795 SrcVT = DstVT = MVT::v16f32;
39798 if (isTargetShuffleEquivalent(
39800 {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
39801 Shuffle = X86ISD::MOVSHDUP;
39802 SrcVT = DstVT = MVT::v16f32;
39810 // Attempt to match a combined shuffle mask against supported unary immediate
39811 // permute instructions.
39812 // TODO: Investigate sharing more of this with shuffle lowering.
39813 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
39814 const APInt &Zeroable,
39815 bool AllowFloatDomain, bool AllowIntDomain,
39816 const SelectionDAG &DAG,
39817 const X86Subtarget &Subtarget,
39818 unsigned &Shuffle, MVT &ShuffleVT,
39819 unsigned &PermuteImm) {
39820 unsigned NumMaskElts = Mask.size();
39821 unsigned InputSizeInBits = MaskVT.getSizeInBits();
39822 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
39823 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
39824 bool ContainsZeros = isAnyZero(Mask);
39826 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
39827 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
39828 // Check for lane crossing permutes.
39829 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
39830 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
39831 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
39832 Shuffle = X86ISD::VPERMI;
39833 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
39834 PermuteImm = getV4X86ShuffleImm(Mask);
39837 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
39838 SmallVector<int, 4> RepeatedMask;
39839 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
39840 Shuffle = X86ISD::VPERMI;
39841 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
39842 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
39846 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
39847 // VPERMILPD can permute with a non-repeating shuffle.
39848 Shuffle = X86ISD::VPERMILPI;
39849 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
39851 for (int i = 0, e = Mask.size(); i != e; ++i) {
39853 if (M == SM_SentinelUndef)
39855 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
39856 PermuteImm |= (M & 1) << i;
39862 // We are checking for shuffle match or shift match. Loop twice so we can
39863 // order which we try and match first depending on target preference.
39864 for (unsigned Order = 0; Order < 2; ++Order) {
39865 if (Subtarget.preferLowerShuffleAsShift() ? (Order == 1) : (Order == 0)) {
39866 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
39867 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
39868 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
39869 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
39870 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
39871 SmallVector<int, 4> RepeatedMask;
39872 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
39873 // Narrow the repeated mask to create 32-bit element permutes.
39874 SmallVector<int, 4> WordMask = RepeatedMask;
39875 if (MaskScalarSizeInBits == 64)
39876 narrowShuffleMaskElts(2, RepeatedMask, WordMask);
39878 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
39879 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
39880 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
39881 PermuteImm = getV4X86ShuffleImm(WordMask);
39886 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
39887 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
39888 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
39889 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
39890 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
39891 SmallVector<int, 4> RepeatedMask;
39892 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
39893 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
39894 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
39896 // PSHUFLW: permute lower 4 elements only.
39897 if (isUndefOrInRange(LoMask, 0, 4) &&
39898 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
39899 Shuffle = X86ISD::PSHUFLW;
39900 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
39901 PermuteImm = getV4X86ShuffleImm(LoMask);
39905 // PSHUFHW: permute upper 4 elements only.
39906 if (isUndefOrInRange(HiMask, 4, 8) &&
39907 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
39908 // Offset the HiMask so that we can create the shuffle immediate.
39909 int OffsetHiMask[4];
39910 for (int i = 0; i != 4; ++i)
39911 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
39913 Shuffle = X86ISD::PSHUFHW;
39914 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
39915 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
39921 // Attempt to match against bit rotates.
39922 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
39923 ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
39924 Subtarget.hasAVX512())) {
39925 int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
39927 if (0 < RotateAmt) {
39928 Shuffle = X86ISD::VROTLI;
39929 PermuteImm = (unsigned)RotateAmt;
39934 // Attempt to match against byte/bit shifts.
39935 if (AllowIntDomain &&
39936 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
39937 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
39938 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
39940 matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits, Mask, 0,
39941 Zeroable, Subtarget);
39942 if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
39943 32 <= ShuffleVT.getScalarSizeInBits())) {
39944 // Byte shifts can be slower so only match them on second attempt.
39946 (Shuffle == X86ISD::VSHLDQ || Shuffle == X86ISD::VSRLDQ))
39949 PermuteImm = (unsigned)ShiftAmt;
39959 // Attempt to match a combined unary shuffle mask against supported binary
39960 // shuffle instructions.
39961 // TODO: Investigate sharing more of this with shuffle lowering.
39962 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
39963 bool AllowFloatDomain, bool AllowIntDomain,
39964 SDValue &V1, SDValue &V2, const SDLoc &DL,
39965 SelectionDAG &DAG, const X86Subtarget &Subtarget,
39966 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
39968 unsigned NumMaskElts = Mask.size();
39969 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
39970 unsigned SizeInBits = MaskVT.getSizeInBits();
39972 if (MaskVT.is128BitVector()) {
39973 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
39974 AllowFloatDomain) {
39976 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
39977 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
39978 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
39981 if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
39982 AllowFloatDomain) {
39984 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
39985 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
39988 if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
39989 Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
39991 Shuffle = X86ISD::MOVSD;
39992 SrcVT = DstVT = MVT::v2f64;
39995 if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
39996 (AllowFloatDomain || !Subtarget.hasSSE41())) {
39997 Shuffle = X86ISD::MOVSS;
39998 SrcVT = DstVT = MVT::v4f32;
40001 if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
40003 Subtarget.hasFP16()) {
40004 Shuffle = X86ISD::MOVSH;
40005 SrcVT = DstVT = MVT::v8f16;
40010 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
40011 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
40012 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
40013 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
40014 if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
40020 // TODO: Can we handle this inside matchShuffleWithPACK?
40021 if (MaskVT == MVT::v4i32 && Subtarget.hasSSE2() &&
40022 isTargetShuffleEquivalent(MaskVT, Mask, {0, 2, 4, 6}, DAG) &&
40023 V1.getScalarValueSizeInBits() == 64 &&
40024 V2.getScalarValueSizeInBits() == 64) {
40025 // Use (SSE41) PACKUSWD if the leading zerobits goto the lowest 16-bits.
40026 unsigned MinLZV1 = DAG.computeKnownBits(V1).countMinLeadingZeros();
40027 unsigned MinLZV2 = DAG.computeKnownBits(V2).countMinLeadingZeros();
40028 if (Subtarget.hasSSE41() && MinLZV1 >= 48 && MinLZV2 >= 48) {
40029 SrcVT = MVT::v4i32;
40030 DstVT = MVT::v8i16;
40031 Shuffle = X86ISD::PACKUS;
40034 // Use PACKUSBW if the leading zerobits goto the lowest 8-bits.
40035 if (MinLZV1 >= 56 && MinLZV2 >= 56) {
40036 SrcVT = MVT::v8i16;
40037 DstVT = MVT::v16i8;
40038 Shuffle = X86ISD::PACKUS;
40041 // Use PACKSSWD if the signbits extend to the lowest 16-bits.
40042 if (DAG.ComputeNumSignBits(V1) > 48 && DAG.ComputeNumSignBits(V2) > 48) {
40043 SrcVT = MVT::v4i32;
40044 DstVT = MVT::v8i16;
40045 Shuffle = X86ISD::PACKSS;
40050 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
40051 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
40052 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
40053 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
40054 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
40055 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
40056 if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
40058 SrcVT = DstVT = MaskVT;
40059 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
40060 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
40065 // Attempt to match against a OR if we're performing a blend shuffle and the
40066 // non-blended source element is zero in each case.
40067 // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
40068 if (SizeInBits == V1.getValueSizeInBits() &&
40069 SizeInBits == V2.getValueSizeInBits() &&
40070 (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
40071 (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
40072 bool IsBlend = true;
40073 unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
40074 unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
40075 unsigned Scale1 = NumV1Elts / NumMaskElts;
40076 unsigned Scale2 = NumV2Elts / NumMaskElts;
40077 APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
40078 APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
40079 for (unsigned i = 0; i != NumMaskElts; ++i) {
40081 if (M == SM_SentinelUndef)
40083 if (M == SM_SentinelZero) {
40084 DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
40085 DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
40089 DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
40092 if (M == (int)(i + NumMaskElts)) {
40093 DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
40100 if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
40101 DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
40103 SrcVT = DstVT = MaskVT.changeTypeToInteger();
40106 if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
40107 // FIXME: handle mismatched sizes?
40108 // TODO: investigate if `ISD::OR` handling in
40109 // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
40110 auto computeKnownBitsElementWise = [&DAG](SDValue V) {
40111 unsigned NumElts = V.getValueType().getVectorNumElements();
40112 KnownBits Known(NumElts);
40113 for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
40114 APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
40115 KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
40116 if (PeepholeKnown.isZero())
40117 Known.Zero.setBit(EltIdx);
40118 if (PeepholeKnown.isAllOnes())
40119 Known.One.setBit(EltIdx);
40124 KnownBits V1Known = computeKnownBitsElementWise(V1);
40125 KnownBits V2Known = computeKnownBitsElementWise(V2);
40127 for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
40129 if (M == SM_SentinelUndef)
40131 if (M == SM_SentinelZero) {
40132 IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
40136 IsBlend &= V2Known.Zero[i] || V1Known.One[i];
40139 if (M == (int)(i + NumMaskElts)) {
40140 IsBlend &= V1Known.Zero[i] || V2Known.One[i];
40143 llvm_unreachable("will not get here.");
40147 SrcVT = DstVT = MaskVT.changeTypeToInteger();
40157 static bool matchBinaryPermuteShuffle(
40158 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
40159 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
40160 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
40161 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
40162 unsigned NumMaskElts = Mask.size();
40163 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
40165 // Attempt to match against VALIGND/VALIGNQ rotate.
40166 if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
40167 ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
40168 (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
40169 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
40170 if (!isAnyZero(Mask)) {
40171 int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
40172 if (0 < Rotation) {
40173 Shuffle = X86ISD::VALIGN;
40174 if (EltSizeInBits == 64)
40175 ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
40177 ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
40178 PermuteImm = Rotation;
40184 // Attempt to match against PALIGNR byte rotate.
40185 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
40186 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
40187 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
40188 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
40189 if (0 < ByteRotation) {
40190 Shuffle = X86ISD::PALIGNR;
40191 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
40192 PermuteImm = ByteRotation;
40197 // Attempt to combine to X86ISD::BLENDI.
40198 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
40199 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
40200 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
40201 uint64_t BlendMask = 0;
40202 bool ForceV1Zero = false, ForceV2Zero = false;
40203 SmallVector<int, 8> TargetMask(Mask);
40204 if (matchShuffleAsBlend(MaskVT, V1, V2, TargetMask, Zeroable, ForceV1Zero,
40205 ForceV2Zero, BlendMask)) {
40206 if (MaskVT == MVT::v16i16) {
40207 // We can only use v16i16 PBLENDW if the lanes are repeated.
40208 SmallVector<int, 8> RepeatedMask;
40209 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
40211 assert(RepeatedMask.size() == 8 &&
40212 "Repeated mask size doesn't match!");
40214 for (int i = 0; i < 8; ++i)
40215 if (RepeatedMask[i] >= 8)
40216 PermuteImm |= 1 << i;
40217 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
40218 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
40219 Shuffle = X86ISD::BLENDI;
40220 ShuffleVT = MaskVT;
40224 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
40225 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
40226 PermuteImm = (unsigned)BlendMask;
40227 Shuffle = X86ISD::BLENDI;
40228 ShuffleVT = MaskVT;
40234 // Attempt to combine to INSERTPS, but only if it has elements that need to
40236 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
40237 MaskVT.is128BitVector() && isAnyZero(Mask) &&
40238 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
40239 Shuffle = X86ISD::INSERTPS;
40240 ShuffleVT = MVT::v4f32;
40244 // Attempt to combine to SHUFPD.
40245 if (AllowFloatDomain && EltSizeInBits == 64 &&
40246 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
40247 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
40248 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
40249 bool ForceV1Zero = false, ForceV2Zero = false;
40250 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
40251 PermuteImm, Mask, Zeroable)) {
40252 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
40253 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
40254 Shuffle = X86ISD::SHUFP;
40255 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
40260 // Attempt to combine to SHUFPS.
40261 if (AllowFloatDomain && EltSizeInBits == 32 &&
40262 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
40263 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
40264 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
40265 SmallVector<int, 4> RepeatedMask;
40266 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
40267 // Match each half of the repeated mask, to determine if its just
40268 // referencing one of the vectors, is zeroable or entirely undef.
40269 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
40270 int M0 = RepeatedMask[Offset];
40271 int M1 = RepeatedMask[Offset + 1];
40273 if (isUndefInRange(RepeatedMask, Offset, 2)) {
40274 return DAG.getUNDEF(MaskVT);
40275 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
40276 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
40277 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
40278 return getZeroVector(MaskVT, Subtarget, DAG, DL);
40279 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
40280 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
40281 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
40283 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
40284 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
40285 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
40292 int ShufMask[4] = {-1, -1, -1, -1};
40293 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
40294 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
40299 Shuffle = X86ISD::SHUFP;
40300 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
40301 PermuteImm = getV4X86ShuffleImm(ShufMask);
40307 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
40308 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
40309 MaskVT.is128BitVector() &&
40310 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
40311 Shuffle = X86ISD::INSERTPS;
40312 ShuffleVT = MVT::v4f32;
40319 static SDValue combineX86ShuffleChainWithExtract(
40320 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
40321 bool HasVariableMask, bool AllowVariableCrossLaneMask,
40322 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
40323 const X86Subtarget &Subtarget);
40325 /// Combine an arbitrary chain of shuffles into a single instruction if
40328 /// This is the leaf of the recursive combine below. When we have found some
40329 /// chain of single-use x86 shuffle instructions and accumulated the combined
40330 /// shuffle mask represented by them, this will try to pattern match that mask
40331 /// into either a single instruction if there is a special purpose instruction
40332 /// for this operation, or into a PSHUFB instruction which is a fully general
40333 /// instruction but should only be used to replace chains over a certain depth.
40334 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
40335 ArrayRef<int> BaseMask, int Depth,
40336 bool HasVariableMask,
40337 bool AllowVariableCrossLaneMask,
40338 bool AllowVariablePerLaneMask,
40340 const X86Subtarget &Subtarget) {
40341 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
40342 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
40343 "Unexpected number of shuffle inputs!");
40346 MVT RootVT = Root.getSimpleValueType();
40347 unsigned RootSizeInBits = RootVT.getSizeInBits();
40348 unsigned NumRootElts = RootVT.getVectorNumElements();
40350 // Canonicalize shuffle input op to the requested type.
40351 auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
40352 if (VT.getSizeInBits() > Op.getValueSizeInBits())
40353 Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
40354 else if (VT.getSizeInBits() < Op.getValueSizeInBits())
40355 Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
40356 return DAG.getBitcast(VT, Op);
40359 // Find the inputs that enter the chain. Note that multiple uses are OK
40360 // here, we're not going to remove the operands we find.
40361 bool UnaryShuffle = (Inputs.size() == 1);
40362 SDValue V1 = peekThroughBitcasts(Inputs[0]);
40363 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
40364 : peekThroughBitcasts(Inputs[1]));
40366 MVT VT1 = V1.getSimpleValueType();
40367 MVT VT2 = V2.getSimpleValueType();
40368 assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
40369 (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
40373 unsigned NumBaseMaskElts = BaseMask.size();
40374 if (NumBaseMaskElts == 1) {
40375 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
40376 return CanonicalizeShuffleInput(RootVT, V1);
40379 bool OptForSize = DAG.shouldOptForSize();
40380 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
40381 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
40382 (RootVT.isFloatingPoint() && Depth >= 1) ||
40383 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
40385 // Don't combine if we are a AVX512/EVEX target and the mask element size
40386 // is different from the root element size - this would prevent writemasks
40387 // from being reused.
40388 bool IsMaskedShuffle = false;
40389 if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
40390 if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
40391 Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
40392 IsMaskedShuffle = true;
40396 // If we are shuffling a splat (and not introducing zeros) then we can just
40397 // use it directly. This works for smaller elements as well as they already
40398 // repeat across each mask element.
40399 if (UnaryShuffle && !isAnyZero(BaseMask) &&
40400 V1.getValueSizeInBits() >= RootSizeInBits &&
40401 (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
40402 DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
40403 return CanonicalizeShuffleInput(RootVT, V1);
40406 SmallVector<int, 64> Mask(BaseMask);
40408 // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
40409 // etc. can be simplified.
40410 if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
40411 SmallVector<int> ScaledMask, IdentityMask;
40412 unsigned NumElts = VT1.getVectorNumElements();
40413 if (Mask.size() <= NumElts &&
40414 scaleShuffleElements(Mask, NumElts, ScaledMask)) {
40415 for (unsigned i = 0; i != NumElts; ++i)
40416 IdentityMask.push_back(i);
40417 if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
40419 return CanonicalizeShuffleInput(RootVT, V1);
40423 // Handle 128/256-bit lane shuffles of 512-bit vectors.
40424 if (RootVT.is512BitVector() &&
40425 (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
40426 // If the upper subvectors are zeroable, then an extract+insert is more
40427 // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
40428 // to zero the upper subvectors.
40429 if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
40430 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
40431 return SDValue(); // Nothing to do!
40432 assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
40433 "Unexpected lane shuffle");
40434 Res = CanonicalizeShuffleInput(RootVT, V1);
40435 unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
40436 bool UseZero = isAnyZero(Mask);
40437 Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
40438 return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
40441 // Narrow shuffle mask to v4x128.
40442 SmallVector<int, 4> ScaledMask;
40443 assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
40444 narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
40446 // Try to lower to vshuf64x2/vshuf32x4.
40447 auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
40448 ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
40449 SelectionDAG &DAG) {
40450 unsigned PermMask = 0;
40451 // Insure elements came from the same Op.
40452 SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
40453 for (int i = 0; i < 4; ++i) {
40454 assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
40455 if (ScaledMask[i] < 0)
40458 SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
40459 unsigned OpIndex = i / 2;
40460 if (Ops[OpIndex].isUndef())
40462 else if (Ops[OpIndex] != Op)
40465 // Convert the 128-bit shuffle mask selection values into 128-bit
40466 // selection bits defined by a vshuf64x2 instruction's immediate control
40468 PermMask |= (ScaledMask[i] % 4) << (i * 2);
40471 return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
40472 CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
40473 CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
40474 DAG.getTargetConstant(PermMask, DL, MVT::i8));
40477 // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
40478 // doesn't work because our mask is for 128 bits and we don't have an MVT
40480 bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
40481 isUndefOrInRange(ScaledMask[1], 0, 2) &&
40482 isUndefOrInRange(ScaledMask[2], 2, 4) &&
40483 isUndefOrInRange(ScaledMask[3], 2, 4) &&
40484 (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
40485 ScaledMask[0] == (ScaledMask[2] % 2)) &&
40486 (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
40487 ScaledMask[1] == (ScaledMask[3] % 2));
40489 if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
40490 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
40491 return SDValue(); // Nothing to do!
40492 MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
40493 if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
40494 return DAG.getBitcast(RootVT, V);
40498 // Handle 128-bit lane shuffles of 256-bit vectors.
40499 if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
40500 // If the upper half is zeroable, then an extract+insert is more optimal
40501 // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
40502 // zero the upper half.
40503 if (isUndefOrZero(Mask[1])) {
40504 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
40505 return SDValue(); // Nothing to do!
40506 assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
40507 Res = CanonicalizeShuffleInput(RootVT, V1);
40508 Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
40509 return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
40513 // If we're inserting the low subvector, an insert-subvector 'concat'
40514 // pattern is quicker than VPERM2X128.
40515 // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
40516 if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
40517 !Subtarget.hasAVX2()) {
40518 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
40519 return SDValue(); // Nothing to do!
40520 SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
40521 SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
40522 Hi = extractSubVector(Hi, 0, DAG, DL, 128);
40523 return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
40526 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
40527 return SDValue(); // Nothing to do!
40529 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
40530 // we need to use the zeroing feature.
40531 // Prefer blends for sequential shuffles unless we are optimizing for size.
40532 if (UnaryShuffle &&
40533 !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
40534 (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
40535 unsigned PermMask = 0;
40536 PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
40537 PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
40538 return DAG.getNode(
40539 X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
40540 DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
40543 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
40544 return SDValue(); // Nothing to do!
40546 // TODO - handle AVX512VL cases with X86ISD::SHUF128.
40547 if (!UnaryShuffle && !IsMaskedShuffle) {
40548 assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
40549 "Unexpected shuffle sentinel value");
40550 // Prefer blends to X86ISD::VPERM2X128.
40551 if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
40552 unsigned PermMask = 0;
40553 PermMask |= ((Mask[0] & 3) << 0);
40554 PermMask |= ((Mask[1] & 3) << 4);
40555 SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
40556 SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
40557 return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
40558 CanonicalizeShuffleInput(RootVT, LHS),
40559 CanonicalizeShuffleInput(RootVT, RHS),
40560 DAG.getTargetConstant(PermMask, DL, MVT::i8));
40565 // For masks that have been widened to 128-bit elements or more,
40566 // narrow back down to 64-bit elements.
40567 if (BaseMaskEltSizeInBits > 64) {
40568 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
40569 int MaskScale = BaseMaskEltSizeInBits / 64;
40570 SmallVector<int, 64> ScaledMask;
40571 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
40572 Mask = std::move(ScaledMask);
40575 // For masked shuffles, we're trying to match the root width for better
40576 // writemask folding, attempt to scale the mask.
40577 // TODO - variable shuffles might need this to be widened again.
40578 if (IsMaskedShuffle && NumRootElts > Mask.size()) {
40579 assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
40580 int MaskScale = NumRootElts / Mask.size();
40581 SmallVector<int, 64> ScaledMask;
40582 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
40583 Mask = std::move(ScaledMask);
40586 unsigned NumMaskElts = Mask.size();
40587 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
40589 // Determine the effective mask value type.
40590 FloatDomain &= (32 <= MaskEltSizeInBits);
40591 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
40592 : MVT::getIntegerVT(MaskEltSizeInBits);
40593 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
40595 // Only allow legal mask types.
40596 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
40599 // Attempt to match the mask against known shuffle patterns.
40600 MVT ShuffleSrcVT, ShuffleVT;
40601 unsigned Shuffle, PermuteImm;
40603 // Which shuffle domains are permitted?
40604 // Permit domain crossing at higher combine depths.
40605 // TODO: Should we indicate which domain is preferred if both are allowed?
40606 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
40607 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
40608 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
40610 // Determine zeroable mask elements.
40611 APInt KnownUndef, KnownZero;
40612 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
40613 APInt Zeroable = KnownUndef | KnownZero;
40615 if (UnaryShuffle) {
40616 // Attempt to match against broadcast-from-vector.
40617 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
40618 if ((Subtarget.hasAVX2() ||
40619 (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
40620 (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
40621 if (isUndefOrEqual(Mask, 0)) {
40622 if (V1.getValueType() == MaskVT &&
40623 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40624 X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
40625 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
40626 return SDValue(); // Nothing to do!
40627 Res = V1.getOperand(0);
40628 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
40629 return DAG.getBitcast(RootVT, Res);
40631 if (Subtarget.hasAVX2()) {
40632 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
40633 return SDValue(); // Nothing to do!
40634 Res = CanonicalizeShuffleInput(MaskVT, V1);
40635 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
40636 return DAG.getBitcast(RootVT, Res);
40641 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
40642 DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
40643 (!IsMaskedShuffle ||
40644 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
40645 if (Depth == 0 && Root.getOpcode() == Shuffle)
40646 return SDValue(); // Nothing to do!
40647 Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
40648 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
40649 return DAG.getBitcast(RootVT, Res);
40652 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
40653 AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
40655 (!IsMaskedShuffle ||
40656 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
40657 if (Depth == 0 && Root.getOpcode() == Shuffle)
40658 return SDValue(); // Nothing to do!
40659 Res = CanonicalizeShuffleInput(ShuffleVT, V1);
40660 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
40661 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
40662 return DAG.getBitcast(RootVT, Res);
40666 // Attempt to combine to INSERTPS, but only if the inserted element has come
40668 // TODO: Handle other insertions here as well?
40669 if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
40670 Subtarget.hasSSE41() &&
40671 !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
40672 if (MaskEltSizeInBits == 32) {
40673 SDValue SrcV1 = V1, SrcV2 = V2;
40674 if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
40676 SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
40677 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
40678 return SDValue(); // Nothing to do!
40679 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
40680 CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
40681 CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
40682 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
40683 return DAG.getBitcast(RootVT, Res);
40686 if (MaskEltSizeInBits == 64 &&
40687 isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
40688 V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40689 V2.getScalarValueSizeInBits() <= 32) {
40690 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
40691 return SDValue(); // Nothing to do!
40692 PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
40693 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
40694 CanonicalizeShuffleInput(MVT::v4f32, V1),
40695 CanonicalizeShuffleInput(MVT::v4f32, V2),
40696 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
40697 return DAG.getBitcast(RootVT, Res);
40701 SDValue NewV1 = V1; // Save operands in case early exit happens.
40702 SDValue NewV2 = V2;
40703 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
40704 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
40705 ShuffleVT, UnaryShuffle) &&
40706 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
40707 if (Depth == 0 && Root.getOpcode() == Shuffle)
40708 return SDValue(); // Nothing to do!
40709 NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
40710 NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
40711 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
40712 return DAG.getBitcast(RootVT, Res);
40715 NewV1 = V1; // Save operands in case early exit happens.
40717 if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
40718 AllowIntDomain, NewV1, NewV2, DL, DAG,
40719 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
40720 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
40721 if (Depth == 0 && Root.getOpcode() == Shuffle)
40722 return SDValue(); // Nothing to do!
40723 NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
40724 NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
40725 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
40726 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
40727 return DAG.getBitcast(RootVT, Res);
40730 // Typically from here on, we need an integer version of MaskVT.
40731 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
40732 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
40734 // Annoyingly, SSE4A instructions don't map into the above match helpers.
40735 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
40736 uint64_t BitLen, BitIdx;
40737 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
40739 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
40740 return SDValue(); // Nothing to do!
40741 V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
40742 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
40743 DAG.getTargetConstant(BitLen, DL, MVT::i8),
40744 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
40745 return DAG.getBitcast(RootVT, Res);
40748 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
40749 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
40750 return SDValue(); // Nothing to do!
40751 V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
40752 V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
40753 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
40754 DAG.getTargetConstant(BitLen, DL, MVT::i8),
40755 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
40756 return DAG.getBitcast(RootVT, Res);
40760 // Match shuffle against TRUNCATE patterns.
40761 if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
40762 // Match against a VTRUNC instruction, accounting for src/dst sizes.
40763 if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
40765 bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
40766 ShuffleSrcVT.getVectorNumElements();
40768 IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
40769 if (Depth == 0 && Root.getOpcode() == Opc)
40770 return SDValue(); // Nothing to do!
40771 V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
40772 Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
40773 if (ShuffleVT.getSizeInBits() < RootSizeInBits)
40774 Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
40775 return DAG.getBitcast(RootVT, Res);
40778 // Do we need a more general binary truncation pattern?
40779 if (RootSizeInBits < 512 &&
40780 ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
40781 (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
40782 (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
40783 isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
40784 // Bail if this was already a truncation or PACK node.
40785 // We sometimes fail to match PACK if we demand known undef elements.
40786 if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
40787 Root.getOpcode() == X86ISD::PACKSS ||
40788 Root.getOpcode() == X86ISD::PACKUS))
40789 return SDValue(); // Nothing to do!
40790 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
40791 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
40792 V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
40793 V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
40794 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
40795 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
40796 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
40797 Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
40798 return DAG.getBitcast(RootVT, Res);
40802 // Don't try to re-form single instruction chains under any circumstances now
40803 // that we've done encoding canonicalization for them.
40807 // Depth threshold above which we can efficiently use variable mask shuffles.
40808 int VariableCrossLaneShuffleDepth =
40809 Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
40810 int VariablePerLaneShuffleDepth =
40811 Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
40812 AllowVariableCrossLaneMask &=
40813 (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
40814 AllowVariablePerLaneMask &=
40815 (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
40816 // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
40817 // higher depth before combining them.
40818 bool AllowBWIVPERMV3 =
40819 (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
40821 bool MaskContainsZeros = isAnyZero(Mask);
40823 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
40824 // If we have a single input lane-crossing shuffle then lower to VPERMV.
40825 if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
40826 if (Subtarget.hasAVX2() &&
40827 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
40828 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
40829 Res = CanonicalizeShuffleInput(MaskVT, V1);
40830 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
40831 return DAG.getBitcast(RootVT, Res);
40833 // AVX512 variants (non-VLX will pad to 512-bit shuffles).
40834 if ((Subtarget.hasAVX512() &&
40835 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
40836 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
40837 (Subtarget.hasBWI() &&
40838 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
40839 (Subtarget.hasVBMI() &&
40840 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
40841 V1 = CanonicalizeShuffleInput(MaskVT, V1);
40842 V2 = DAG.getUNDEF(MaskVT);
40843 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
40844 return DAG.getBitcast(RootVT, Res);
40848 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
40849 // vector as the second source (non-VLX will pad to 512-bit shuffles).
40850 if (UnaryShuffle && AllowVariableCrossLaneMask &&
40851 ((Subtarget.hasAVX512() &&
40852 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
40853 MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
40854 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
40855 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
40856 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
40857 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
40858 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
40859 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
40860 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
40861 for (unsigned i = 0; i != NumMaskElts; ++i)
40862 if (Mask[i] == SM_SentinelZero)
40863 Mask[i] = NumMaskElts + i;
40864 V1 = CanonicalizeShuffleInput(MaskVT, V1);
40865 V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
40866 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
40867 return DAG.getBitcast(RootVT, Res);
40870 // If that failed and either input is extracted then try to combine as a
40871 // shuffle with the larger type.
40872 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
40873 Inputs, Root, BaseMask, Depth, HasVariableMask,
40874 AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
40876 return WideShuffle;
40878 // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
40879 // (non-VLX will pad to 512-bit shuffles).
40880 if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
40881 ((Subtarget.hasAVX512() &&
40882 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
40883 MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
40884 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
40885 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
40886 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
40887 (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
40888 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
40889 (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
40890 V1 = CanonicalizeShuffleInput(MaskVT, V1);
40891 V2 = CanonicalizeShuffleInput(MaskVT, V2);
40892 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
40893 return DAG.getBitcast(RootVT, Res);
40898 // See if we can combine a single input shuffle with zeros to a bit-mask,
40899 // which is much simpler than any shuffle.
40900 if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
40901 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
40902 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
40903 APInt Zero = APInt::getZero(MaskEltSizeInBits);
40904 APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
40905 APInt UndefElts(NumMaskElts, 0);
40906 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
40907 for (unsigned i = 0; i != NumMaskElts; ++i) {
40909 if (M == SM_SentinelUndef) {
40910 UndefElts.setBit(i);
40913 if (M == SM_SentinelZero)
40915 EltBits[i] = AllOnes;
40917 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
40918 Res = CanonicalizeShuffleInput(MaskVT, V1);
40919 unsigned AndOpcode =
40920 MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
40921 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
40922 return DAG.getBitcast(RootVT, Res);
40925 // If we have a single input shuffle with different shuffle patterns in the
40926 // the 128-bit lanes use the variable mask to VPERMILPS.
40927 // TODO Combine other mask types at higher depths.
40928 if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
40929 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
40930 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
40931 SmallVector<SDValue, 16> VPermIdx;
40932 for (int M : Mask) {
40934 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
40935 VPermIdx.push_back(Idx);
40937 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
40938 Res = CanonicalizeShuffleInput(MaskVT, V1);
40939 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
40940 return DAG.getBitcast(RootVT, Res);
40943 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
40944 // to VPERMIL2PD/VPERMIL2PS.
40945 if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
40946 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
40947 MaskVT == MVT::v8f32)) {
40948 // VPERMIL2 Operation.
40949 // Bits[3] - Match Bit.
40950 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
40951 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
40952 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
40953 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
40954 SmallVector<int, 8> VPerm2Idx;
40955 unsigned M2ZImm = 0;
40956 for (int M : Mask) {
40957 if (M == SM_SentinelUndef) {
40958 VPerm2Idx.push_back(-1);
40961 if (M == SM_SentinelZero) {
40963 VPerm2Idx.push_back(8);
40966 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
40967 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
40968 VPerm2Idx.push_back(Index);
40970 V1 = CanonicalizeShuffleInput(MaskVT, V1);
40971 V2 = CanonicalizeShuffleInput(MaskVT, V2);
40972 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
40973 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
40974 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
40975 return DAG.getBitcast(RootVT, Res);
40978 // If we have 3 or more shuffle instructions or a chain involving a variable
40979 // mask, we can replace them with a single PSHUFB instruction profitably.
40980 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
40981 // instructions, but in practice PSHUFB tends to be *very* fast so we're
40982 // more aggressive.
40983 if (UnaryShuffle && AllowVariablePerLaneMask &&
40984 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
40985 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
40986 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
40987 SmallVector<SDValue, 16> PSHUFBMask;
40988 int NumBytes = RootVT.getSizeInBits() / 8;
40989 int Ratio = NumBytes / NumMaskElts;
40990 for (int i = 0; i < NumBytes; ++i) {
40991 int M = Mask[i / Ratio];
40992 if (M == SM_SentinelUndef) {
40993 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
40996 if (M == SM_SentinelZero) {
40997 PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
41000 M = Ratio * M + i % Ratio;
41001 assert((M / 16) == (i / 16) && "Lane crossing detected");
41002 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
41004 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
41005 Res = CanonicalizeShuffleInput(ByteVT, V1);
41006 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
41007 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
41008 return DAG.getBitcast(RootVT, Res);
41011 // With XOP, if we have a 128-bit binary input shuffle we can always combine
41012 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
41013 // slower than PSHUFB on targets that support both.
41014 if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
41015 Subtarget.hasXOP()) {
41016 // VPPERM Mask Operation
41017 // Bits[4:0] - Byte Index (0 - 31)
41018 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
41019 SmallVector<SDValue, 16> VPPERMMask;
41021 int Ratio = NumBytes / NumMaskElts;
41022 for (int i = 0; i < NumBytes; ++i) {
41023 int M = Mask[i / Ratio];
41024 if (M == SM_SentinelUndef) {
41025 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
41028 if (M == SM_SentinelZero) {
41029 VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
41032 M = Ratio * M + i % Ratio;
41033 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
41035 MVT ByteVT = MVT::v16i8;
41036 V1 = CanonicalizeShuffleInput(ByteVT, V1);
41037 V2 = CanonicalizeShuffleInput(ByteVT, V2);
41038 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
41039 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
41040 return DAG.getBitcast(RootVT, Res);
41043 // If that failed and either input is extracted then try to combine as a
41044 // shuffle with the larger type.
41045 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
41046 Inputs, Root, BaseMask, Depth, HasVariableMask,
41047 AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
41048 return WideShuffle;
41050 // If we have a dual input shuffle then lower to VPERMV3,
41051 // (non-VLX will pad to 512-bit shuffles)
41052 if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
41053 ((Subtarget.hasAVX512() &&
41054 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
41055 MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
41056 MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
41057 MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
41058 MaskVT == MVT::v16i32)) ||
41059 (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
41060 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
41061 MaskVT == MVT::v32i16)) ||
41062 (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
41063 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
41064 MaskVT == MVT::v64i8)))) {
41065 V1 = CanonicalizeShuffleInput(MaskVT, V1);
41066 V2 = CanonicalizeShuffleInput(MaskVT, V2);
41067 Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
41068 return DAG.getBitcast(RootVT, Res);
41071 // Failed to find any combines.
41075 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
41076 // instruction if possible.
41078 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
41079 // type size to attempt to combine:
41080 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
41082 // extract_subvector(shuffle(x,y,m2),0)
41083 static SDValue combineX86ShuffleChainWithExtract(
41084 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
41085 bool HasVariableMask, bool AllowVariableCrossLaneMask,
41086 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
41087 const X86Subtarget &Subtarget) {
41088 unsigned NumMaskElts = BaseMask.size();
41089 unsigned NumInputs = Inputs.size();
41090 if (NumInputs == 0)
41093 EVT RootVT = Root.getValueType();
41094 unsigned RootSizeInBits = RootVT.getSizeInBits();
41095 unsigned RootEltSizeInBits = RootSizeInBits / NumMaskElts;
41096 assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
41098 // Peek through extract_subvector to find widest legal vector.
41099 // TODO: Handle ISD::TRUNCATE
41100 unsigned WideSizeInBits = RootSizeInBits;
41101 for (unsigned I = 0; I != NumInputs; ++I) {
41102 SDValue Input = peekThroughBitcasts(Inputs[I]);
41103 while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR)
41104 Input = peekThroughBitcasts(Input.getOperand(0));
41105 if (DAG.getTargetLoweringInfo().isTypeLegal(Input.getValueType()) &&
41106 WideSizeInBits < Input.getValueSizeInBits())
41107 WideSizeInBits = Input.getValueSizeInBits();
41110 // Bail if we fail to find a source larger than the existing root.
41111 unsigned Scale = WideSizeInBits / RootSizeInBits;
41112 if (WideSizeInBits <= RootSizeInBits ||
41113 (WideSizeInBits % RootSizeInBits) != 0)
41116 // Create new mask for larger type.
41117 SmallVector<int, 64> WideMask(BaseMask);
41118 for (int &M : WideMask) {
41121 M = (M % NumMaskElts) + ((M / NumMaskElts) * Scale * NumMaskElts);
41123 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
41125 // Attempt to peek through inputs and adjust mask when we extract from an
41126 // upper subvector.
41127 int AdjustedMasks = 0;
41128 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
41129 for (unsigned I = 0; I != NumInputs; ++I) {
41130 SDValue &Input = WideInputs[I];
41131 Input = peekThroughBitcasts(Input);
41132 while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41133 Input.getOperand(0).getValueSizeInBits() <= WideSizeInBits) {
41134 uint64_t Idx = Input.getConstantOperandVal(1);
41137 unsigned InputEltSizeInBits = Input.getScalarValueSizeInBits();
41138 Idx = (Idx * InputEltSizeInBits) / RootEltSizeInBits;
41140 int lo = I * WideMask.size();
41141 int hi = (I + 1) * WideMask.size();
41142 for (int &M : WideMask)
41143 if (lo <= M && M < hi)
41146 Input = peekThroughBitcasts(Input.getOperand(0));
41150 // Remove unused/repeated shuffle source ops.
41151 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
41152 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
41154 // Bail if we're always extracting from the lowest subvectors,
41155 // combineX86ShuffleChain should match this for the current width, or the
41156 // shuffle still references too many inputs.
41157 if (AdjustedMasks == 0 || WideInputs.size() > 2)
41160 // Minor canonicalization of the accumulated shuffle mask to make it easier
41161 // to match below. All this does is detect masks with sequential pairs of
41162 // elements, and shrink them to the half-width mask. It does this in a loop
41163 // so it will reduce the size of the mask to the minimal width mask which
41164 // performs an equivalent shuffle.
41165 while (WideMask.size() > 1) {
41166 SmallVector<int, 64> WidenedMask;
41167 if (!canWidenShuffleElements(WideMask, WidenedMask))
41169 WideMask = std::move(WidenedMask);
41172 // Canonicalization of binary shuffle masks to improve pattern matching by
41173 // commuting the inputs.
41174 if (WideInputs.size() == 2 && canonicalizeShuffleMaskWithCommute(WideMask)) {
41175 ShuffleVectorSDNode::commuteMask(WideMask);
41176 std::swap(WideInputs[0], WideInputs[1]);
41179 // Increase depth for every upper subvector we've peeked through.
41180 Depth += AdjustedMasks;
41182 // Attempt to combine wider chain.
41183 // TODO: Can we use a better Root?
41184 SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
41185 WideInputs.back().getValueSizeInBits()
41186 ? WideInputs.front()
41187 : WideInputs.back();
41188 assert(WideRoot.getValueSizeInBits() == WideSizeInBits &&
41189 "WideRootSize mismatch");
41191 if (SDValue WideShuffle =
41192 combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
41193 HasVariableMask, AllowVariableCrossLaneMask,
41194 AllowVariablePerLaneMask, DAG, Subtarget)) {
41196 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
41197 return DAG.getBitcast(RootVT, WideShuffle);
41203 // Canonicalize the combined shuffle mask chain with horizontal ops.
41204 // NOTE: This may update the Ops and Mask.
41205 static SDValue canonicalizeShuffleMaskWithHorizOp(
41206 MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
41207 unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
41208 const X86Subtarget &Subtarget) {
41209 if (Mask.empty() || Ops.empty())
41212 SmallVector<SDValue> BC;
41213 for (SDValue Op : Ops)
41214 BC.push_back(peekThroughBitcasts(Op));
41216 // All ops must be the same horizop + type.
41217 SDValue BC0 = BC[0];
41218 EVT VT0 = BC0.getValueType();
41219 unsigned Opcode0 = BC0.getOpcode();
41220 if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
41221 return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
41225 bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
41226 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
41227 bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
41228 if (!isHoriz && !isPack)
41231 // Do all ops have a single use?
41232 bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
41233 return Op.hasOneUse() &&
41234 peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
41237 int NumElts = VT0.getVectorNumElements();
41238 int NumLanes = VT0.getSizeInBits() / 128;
41239 int NumEltsPerLane = NumElts / NumLanes;
41240 int NumHalfEltsPerLane = NumEltsPerLane / 2;
41241 MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
41242 unsigned EltSizeInBits = RootSizeInBits / Mask.size();
41244 if (NumEltsPerLane >= 4 &&
41245 (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
41246 SmallVector<int> LaneMask, ScaledMask;
41247 if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
41248 scaleShuffleElements(LaneMask, 4, ScaledMask)) {
41249 // See if we can remove the shuffle by resorting the HOP chain so that
41250 // the HOP args are pre-shuffled.
41251 // TODO: Generalize to any sized/depth chain.
41252 // TODO: Add support for PACKSS/PACKUS.
41254 // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
41255 auto GetHOpSrc = [&](int M) {
41256 if (M == SM_SentinelUndef)
41257 return DAG.getUNDEF(VT0);
41258 if (M == SM_SentinelZero)
41259 return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
41260 SDValue Src0 = BC[M / 4];
41261 SDValue Src1 = Src0.getOperand((M % 4) >= 2);
41262 if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
41263 return Src1.getOperand(M % 2);
41266 SDValue M0 = GetHOpSrc(ScaledMask[0]);
41267 SDValue M1 = GetHOpSrc(ScaledMask[1]);
41268 SDValue M2 = GetHOpSrc(ScaledMask[2]);
41269 SDValue M3 = GetHOpSrc(ScaledMask[3]);
41270 if (M0 && M1 && M2 && M3) {
41271 SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
41272 SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
41273 return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
41276 // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
41277 if (Ops.size() >= 2) {
41279 auto GetHOpSrc = [&](int M, int &OutM) {
41280 // TODO: Support SM_SentinelZero
41282 return M == SM_SentinelUndef;
41283 SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
41284 if (!LHS || LHS == Src) {
41289 if (!RHS || RHS == Src) {
41291 OutM = (M % 2) + 2;
41296 int PostMask[4] = {-1, -1, -1, -1};
41297 if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
41298 GetHOpSrc(ScaledMask[1], PostMask[1]) &&
41299 GetHOpSrc(ScaledMask[2], PostMask[2]) &&
41300 GetHOpSrc(ScaledMask[3], PostMask[3])) {
41301 LHS = DAG.getBitcast(SrcVT, LHS);
41302 RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
41303 SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
41304 // Use SHUFPS for the permute so this will work on SSE2 targets,
41305 // shuffle combining and domain handling will simplify this later on.
41306 MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
41307 Res = DAG.getBitcast(ShuffleVT, Res);
41308 return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
41309 getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
41315 if (2 < Ops.size())
41318 SDValue BC1 = BC[BC.size() - 1];
41319 if (Mask.size() == VT0.getVectorNumElements()) {
41320 // Canonicalize binary shuffles of horizontal ops that use the
41321 // same sources to an unary shuffle.
41322 // TODO: Try to perform this fold even if the shuffle remains.
41323 if (Ops.size() == 2) {
41324 auto ContainsOps = [](SDValue HOp, SDValue Op) {
41325 return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
41327 // Commute if all BC0's ops are contained in BC1.
41328 if (ContainsOps(BC1, BC0.getOperand(0)) &&
41329 ContainsOps(BC1, BC0.getOperand(1))) {
41330 ShuffleVectorSDNode::commuteMask(Mask);
41331 std::swap(Ops[0], Ops[1]);
41332 std::swap(BC0, BC1);
41335 // If BC1 can be represented by BC0, then convert to unary shuffle.
41336 if (ContainsOps(BC0, BC1.getOperand(0)) &&
41337 ContainsOps(BC0, BC1.getOperand(1))) {
41338 for (int &M : Mask) {
41339 if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
41341 int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
41342 M -= NumElts + (SubLane * NumHalfEltsPerLane);
41343 if (BC1.getOperand(SubLane) != BC0.getOperand(0))
41344 M += NumHalfEltsPerLane;
41349 // Canonicalize unary horizontal ops to only refer to lower halves.
41350 for (int i = 0; i != NumElts; ++i) {
41352 if (isUndefOrZero(M))
41354 if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
41355 (M % NumEltsPerLane) >= NumHalfEltsPerLane)
41356 M -= NumHalfEltsPerLane;
41357 if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
41358 (M % NumEltsPerLane) >= NumHalfEltsPerLane)
41359 M -= NumHalfEltsPerLane;
41363 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
41364 // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
41365 // represents the LHS/RHS inputs for the lower/upper halves.
41366 SmallVector<int, 16> TargetMask128, WideMask128;
41367 if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
41368 scaleShuffleElements(TargetMask128, 2, WideMask128)) {
41369 assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
41370 bool SingleOp = (Ops.size() == 1);
41371 if (isPack || OneUseOps ||
41372 shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
41373 SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
41374 SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
41375 Lo = Lo.getOperand(WideMask128[0] & 1);
41376 Hi = Hi.getOperand(WideMask128[1] & 1);
41378 SDValue Undef = DAG.getUNDEF(SrcVT);
41379 SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
41380 Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
41381 Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
41382 Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
41383 Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
41385 return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
41389 // If we are post-shuffling a 256-bit hop and not requiring the upper
41390 // elements, then try to narrow to a 128-bit hop directly.
41391 SmallVector<int, 16> WideMask64;
41392 if (Ops.size() == 1 && NumLanes == 2 &&
41393 scaleShuffleElements(Mask, 4, WideMask64) &&
41394 isUndefInRange(WideMask64, 2, 2)) {
41395 int M0 = WideMask64[0];
41396 int M1 = WideMask64[1];
41397 if (isInRange(M0, 0, 4) && isInRange(M1, 0, 4)) {
41398 MVT HalfVT = VT0.getSimpleVT().getHalfNumVectorElementsVT();
41399 unsigned Idx0 = (M0 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
41400 unsigned Idx1 = (M1 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
41401 SDValue V0 = extract128BitVector(BC[0].getOperand(M0 & 1), Idx0, DAG, DL);
41402 SDValue V1 = extract128BitVector(BC[0].getOperand(M1 & 1), Idx1, DAG, DL);
41403 SDValue Res = DAG.getNode(Opcode0, DL, HalfVT, V0, V1);
41404 return widenSubVector(Res, false, Subtarget, DAG, DL, 256);
41411 // Attempt to constant fold all of the constant source ops.
41412 // Returns true if the entire shuffle is folded to a constant.
41413 // TODO: Extend this to merge multiple constant Ops and update the mask.
41414 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
41415 ArrayRef<int> Mask, SDValue Root,
41416 bool HasVariableMask,
41418 const X86Subtarget &Subtarget) {
41419 MVT VT = Root.getSimpleValueType();
41421 unsigned SizeInBits = VT.getSizeInBits();
41422 unsigned NumMaskElts = Mask.size();
41423 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
41424 unsigned NumOps = Ops.size();
41426 // Extract constant bits from each source op.
41427 SmallVector<APInt, 16> UndefEltsOps(NumOps);
41428 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
41429 for (unsigned I = 0; I != NumOps; ++I)
41430 if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I],
41434 // If we're optimizing for size, only fold if at least one of the constants is
41435 // only used once or the combined shuffle has included a variable mask
41436 // shuffle, this is to avoid constant pool bloat.
41437 bool IsOptimizingSize = DAG.shouldOptForSize();
41438 if (IsOptimizingSize && !HasVariableMask &&
41439 llvm::none_of(Ops, [](SDValue SrcOp) { return SrcOp->hasOneUse(); }))
41442 // Shuffle the constant bits according to the mask.
41444 APInt UndefElts(NumMaskElts, 0);
41445 APInt ZeroElts(NumMaskElts, 0);
41446 APInt ConstantElts(NumMaskElts, 0);
41447 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
41448 APInt::getZero(MaskSizeInBits));
41449 for (unsigned i = 0; i != NumMaskElts; ++i) {
41451 if (M == SM_SentinelUndef) {
41452 UndefElts.setBit(i);
41454 } else if (M == SM_SentinelZero) {
41455 ZeroElts.setBit(i);
41458 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
41460 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
41461 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
41463 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
41464 if (SrcUndefElts[SrcMaskIdx]) {
41465 UndefElts.setBit(i);
41469 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
41470 APInt &Bits = SrcEltBits[SrcMaskIdx];
41472 ZeroElts.setBit(i);
41476 ConstantElts.setBit(i);
41477 ConstantBitData[i] = Bits;
41479 assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
41481 // Attempt to create a zero vector.
41482 if ((UndefElts | ZeroElts).isAllOnes())
41483 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
41485 // Create the constant data.
41487 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
41488 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
41490 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
41492 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
41493 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
41496 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
41497 return DAG.getBitcast(VT, CstOp);
41503 MaxShuffleCombineDepth = 8
41506 } // namespace llvm
41508 /// Fully generic combining of x86 shuffle instructions.
41510 /// This should be the last combine run over the x86 shuffle instructions. Once
41511 /// they have been fully optimized, this will recursively consider all chains
41512 /// of single-use shuffle instructions, build a generic model of the cumulative
41513 /// shuffle operation, and check for simpler instructions which implement this
41514 /// operation. We use this primarily for two purposes:
41516 /// 1) Collapse generic shuffles to specialized single instructions when
41517 /// equivalent. In most cases, this is just an encoding size win, but
41518 /// sometimes we will collapse multiple generic shuffles into a single
41519 /// special-purpose shuffle.
41520 /// 2) Look for sequences of shuffle instructions with 3 or more total
41521 /// instructions, and replace them with the slightly more expensive SSSE3
41522 /// PSHUFB instruction if available. We do this as the last combining step
41523 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
41524 /// a suitable short sequence of other instructions. The PSHUFB will either
41525 /// use a register or have to read from memory and so is slightly (but only
41526 /// slightly) more expensive than the other shuffle instructions.
41528 /// Because this is inherently a quadratic operation (for each shuffle in
41529 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
41530 /// This should never be an issue in practice as the shuffle lowering doesn't
41531 /// produce sequences of more than 8 instructions.
41533 /// FIXME: We will currently miss some cases where the redundant shuffling
41534 /// would simplify under the threshold for PSHUFB formation because of
41535 /// combine-ordering. To fix this, we should do the redundant instruction
41536 /// combining in this recursive walk.
41537 static SDValue combineX86ShufflesRecursively(
41538 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
41539 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
41540 unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
41541 bool AllowVariablePerLaneMask, SelectionDAG &DAG,
41542 const X86Subtarget &Subtarget) {
41543 assert(!RootMask.empty() &&
41544 (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
41545 "Illegal shuffle root mask");
41546 MVT RootVT = Root.getSimpleValueType();
41547 assert(RootVT.isVector() && "Shuffles operate on vector types!");
41548 unsigned RootSizeInBits = RootVT.getSizeInBits();
41550 // Bound the depth of our recursive combine because this is ultimately
41551 // quadratic in nature.
41552 if (Depth >= MaxDepth)
41555 // Directly rip through bitcasts to find the underlying operand.
41556 SDValue Op = SrcOps[SrcOpIndex];
41557 Op = peekThroughOneUseBitcasts(Op);
41559 EVT VT = Op.getValueType();
41560 if (!VT.isVector() || !VT.isSimple())
41561 return SDValue(); // Bail if we hit a non-simple non-vector.
41563 // FIXME: Just bail on f16 for now.
41564 if (VT.getVectorElementType() == MVT::f16)
41567 assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
41568 "Can only combine shuffles upto size of the root op.");
41570 // Create a demanded elts mask from the referenced elements of Op.
41571 APInt OpDemandedElts = APInt::getZero(RootMask.size());
41572 for (int M : RootMask) {
41573 int BaseIdx = RootMask.size() * SrcOpIndex;
41574 if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
41575 OpDemandedElts.setBit(M - BaseIdx);
41577 if (RootSizeInBits != VT.getSizeInBits()) {
41578 // Op is smaller than Root - extract the demanded elts for the subvector.
41579 unsigned Scale = RootSizeInBits / VT.getSizeInBits();
41580 unsigned NumOpMaskElts = RootMask.size() / Scale;
41581 assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
41582 assert(OpDemandedElts
41583 .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
41585 "Out of range elements referenced in root mask");
41586 OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
41589 APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
41591 // Extract target shuffle mask and resolve sentinels and inputs.
41592 SmallVector<int, 64> OpMask;
41593 SmallVector<SDValue, 2> OpInputs;
41594 APInt OpUndef, OpZero;
41595 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
41596 if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
41597 OpZero, DAG, Depth, false)) {
41598 // Shuffle inputs must not be larger than the shuffle result.
41599 // TODO: Relax this for single input faux shuffles (e.g. trunc).
41600 if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
41601 return OpInput.getValueSizeInBits() > VT.getSizeInBits();
41604 } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41605 (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
41606 !isNullConstant(Op.getOperand(1))) {
41607 SDValue SrcVec = Op.getOperand(0);
41608 int ExtractIdx = Op.getConstantOperandVal(1);
41609 unsigned NumElts = VT.getVectorNumElements();
41610 OpInputs.assign({SrcVec});
41611 OpMask.assign(NumElts, SM_SentinelUndef);
41612 std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
41613 OpZero = OpUndef = APInt::getZero(NumElts);
41618 // If the shuffle result was smaller than the root, we need to adjust the
41619 // mask indices and pad the mask with undefs.
41620 if (RootSizeInBits > VT.getSizeInBits()) {
41621 unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
41622 unsigned OpMaskSize = OpMask.size();
41623 if (OpInputs.size() > 1) {
41624 unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
41625 for (int &M : OpMask) {
41628 int EltIdx = M % OpMaskSize;
41629 int OpIdx = M / OpMaskSize;
41630 M = (PaddedMaskSize * OpIdx) + EltIdx;
41633 OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
41634 OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
41635 OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
41638 SmallVector<int, 64> Mask;
41639 SmallVector<SDValue, 16> Ops;
41641 // We don't need to merge masks if the root is empty.
41642 bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
41644 // Only resolve zeros if it will remove an input, otherwise we might end
41645 // up in an infinite loop.
41646 bool ResolveKnownZeros = true;
41647 if (!OpZero.isZero()) {
41648 APInt UsedInputs = APInt::getZero(OpInputs.size());
41649 for (int i = 0, e = OpMask.size(); i != e; ++i) {
41651 if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
41653 UsedInputs.setBit(M / OpMask.size());
41654 if (UsedInputs.isAllOnes()) {
41655 ResolveKnownZeros = false;
41660 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
41661 ResolveKnownZeros);
41664 Ops.append(OpInputs.begin(), OpInputs.end());
41666 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
41668 // Add the inputs to the Ops list, avoiding duplicates.
41669 Ops.append(SrcOps.begin(), SrcOps.end());
41671 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
41672 // Attempt to find an existing match.
41673 SDValue InputBC = peekThroughBitcasts(Input);
41674 for (int i = 0, e = Ops.size(); i < e; ++i)
41675 if (InputBC == peekThroughBitcasts(Ops[i]))
41677 // Match failed - should we replace an existing Op?
41678 if (InsertionPoint >= 0) {
41679 Ops[InsertionPoint] = Input;
41680 return InsertionPoint;
41682 // Add to the end of the Ops list.
41683 Ops.push_back(Input);
41684 return Ops.size() - 1;
41687 SmallVector<int, 2> OpInputIdx;
41688 for (SDValue OpInput : OpInputs)
41689 OpInputIdx.push_back(
41690 AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
41692 assert(((RootMask.size() > OpMask.size() &&
41693 RootMask.size() % OpMask.size() == 0) ||
41694 (OpMask.size() > RootMask.size() &&
41695 OpMask.size() % RootMask.size() == 0) ||
41696 OpMask.size() == RootMask.size()) &&
41697 "The smaller number of elements must divide the larger.");
41699 // This function can be performance-critical, so we rely on the power-of-2
41700 // knowledge that we have about the mask sizes to replace div/rem ops with
41701 // bit-masks and shifts.
41702 assert(llvm::has_single_bit<uint32_t>(RootMask.size()) &&
41703 "Non-power-of-2 shuffle mask sizes");
41704 assert(llvm::has_single_bit<uint32_t>(OpMask.size()) &&
41705 "Non-power-of-2 shuffle mask sizes");
41706 unsigned RootMaskSizeLog2 = llvm::countr_zero(RootMask.size());
41707 unsigned OpMaskSizeLog2 = llvm::countr_zero(OpMask.size());
41709 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
41710 unsigned RootRatio =
41711 std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
41712 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
41713 assert((RootRatio == 1 || OpRatio == 1) &&
41714 "Must not have a ratio for both incoming and op masks!");
41716 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
41717 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
41718 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
41719 unsigned RootRatioLog2 = llvm::countr_zero(RootRatio);
41720 unsigned OpRatioLog2 = llvm::countr_zero(OpRatio);
41722 Mask.resize(MaskWidth, SM_SentinelUndef);
41724 // Merge this shuffle operation's mask into our accumulated mask. Note that
41725 // this shuffle's mask will be the first applied to the input, followed by
41726 // the root mask to get us all the way to the root value arrangement. The
41727 // reason for this order is that we are recursing up the operation chain.
41728 for (unsigned i = 0; i < MaskWidth; ++i) {
41729 unsigned RootIdx = i >> RootRatioLog2;
41730 if (RootMask[RootIdx] < 0) {
41731 // This is a zero or undef lane, we're done.
41732 Mask[i] = RootMask[RootIdx];
41736 unsigned RootMaskedIdx =
41738 ? RootMask[RootIdx]
41739 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
41741 // Just insert the scaled root mask value if it references an input other
41742 // than the SrcOp we're currently inserting.
41743 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
41744 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
41745 Mask[i] = RootMaskedIdx;
41749 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
41750 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
41751 if (OpMask[OpIdx] < 0) {
41752 // The incoming lanes are zero or undef, it doesn't matter which ones we
41754 Mask[i] = OpMask[OpIdx];
41758 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
41759 unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
41760 : (OpMask[OpIdx] << OpRatioLog2) +
41761 (RootMaskedIdx & (OpRatio - 1));
41763 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
41764 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
41765 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
41766 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
41768 Mask[i] = OpMaskedIdx;
41772 // Peek through vector widenings and set out of bounds mask indices to undef.
41773 // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
41774 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
41775 SDValue &Op = Ops[I];
41776 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
41777 isNullConstant(Op.getOperand(2))) {
41778 Op = Op.getOperand(1);
41779 unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
41780 int Lo = I * Mask.size();
41781 int Hi = (I + 1) * Mask.size();
41782 int NewHi = Lo + (Mask.size() / Scale);
41783 for (int &M : Mask) {
41784 if (Lo <= M && NewHi <= M && M < Hi)
41785 M = SM_SentinelUndef;
41790 // Peek through any free extract_subvector nodes back to root size.
41791 for (SDValue &Op : Ops)
41792 while (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41793 (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
41794 isNullConstant(Op.getOperand(1)))
41795 Op = Op.getOperand(0);
41797 // Remove unused/repeated shuffle source ops.
41798 resolveTargetShuffleInputsAndMask(Ops, Mask);
41800 // Handle the all undef/zero/ones cases early.
41801 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
41802 return DAG.getUNDEF(RootVT);
41803 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
41804 return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
41805 if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
41806 !llvm::is_contained(Mask, SM_SentinelZero))
41807 return getOnesVector(RootVT, DAG, SDLoc(Root));
41809 assert(!Ops.empty() && "Shuffle with no inputs detected");
41810 HasVariableMask |= IsOpVariableMask;
41812 // Update the list of shuffle nodes that have been combined so far.
41813 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
41815 CombinedNodes.push_back(Op.getNode());
41817 // See if we can recurse into each shuffle source op (if it's a target
41818 // shuffle). The source op should only be generally combined if it either has
41819 // a single use (i.e. current Op) or all its users have already been combined,
41820 // if not then we can still combine but should prevent generation of variable
41821 // shuffles to avoid constant pool bloat.
41822 // Don't recurse if we already have more source ops than we can combine in
41823 // the remaining recursion depth.
41824 if (Ops.size() < (MaxDepth - Depth)) {
41825 for (int i = 0, e = Ops.size(); i < e; ++i) {
41826 // For empty roots, we need to resolve zeroable elements before combining
41827 // them with other shuffles.
41828 SmallVector<int, 64> ResolvedMask = Mask;
41830 resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
41831 bool AllowCrossLaneVar = false;
41832 bool AllowPerLaneVar = false;
41833 if (Ops[i].getNode()->hasOneUse() ||
41834 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
41835 AllowCrossLaneVar = AllowVariableCrossLaneMask;
41836 AllowPerLaneVar = AllowVariablePerLaneMask;
41838 if (SDValue Res = combineX86ShufflesRecursively(
41839 Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
41840 HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
41846 // Attempt to constant fold all of the constant source ops.
41847 if (SDValue Cst = combineX86ShufflesConstants(
41848 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
41851 // If constant fold failed and we only have constants - then we have
41852 // multiple uses by a single non-variable shuffle - just bail.
41853 if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
41855 SmallVector<APInt> RawBits;
41856 unsigned EltSizeInBits = RootSizeInBits / Mask.size();
41857 return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
41863 // Canonicalize the combined shuffle mask chain with horizontal ops.
41864 // NOTE: This will update the Ops and Mask.
41865 if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
41866 Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
41867 return DAG.getBitcast(RootVT, HOp);
41869 // Try to refine our inputs given our knowledge of target shuffle mask.
41870 for (auto I : enumerate(Ops)) {
41871 int OpIdx = I.index();
41872 SDValue &Op = I.value();
41874 // What range of shuffle mask element values results in picking from Op?
41875 int Lo = OpIdx * Mask.size();
41876 int Hi = Lo + Mask.size();
41878 // Which elements of Op do we demand, given the mask's granularity?
41879 APInt OpDemandedElts(Mask.size(), 0);
41880 for (int MaskElt : Mask) {
41881 if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
41882 int OpEltIdx = MaskElt - Lo;
41883 OpDemandedElts.setBit(OpEltIdx);
41887 // Is the shuffle result smaller than the root?
41888 if (Op.getValueSizeInBits() < RootSizeInBits) {
41889 // We padded the mask with undefs. But we now need to undo that.
41890 unsigned NumExpectedVectorElts = Mask.size();
41891 unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
41892 unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
41893 assert(!OpDemandedElts.extractBits(
41894 NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
41895 "Demanding the virtual undef widening padding?");
41896 OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
41899 // The Op itself may be of different VT, so we need to scale the mask.
41900 unsigned NumOpElts = Op.getValueType().getVectorNumElements();
41901 APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
41903 // Can this operand be simplified any further, given it's demanded elements?
41904 if (SDValue NewOp =
41905 DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
41906 Op, OpScaledDemandedElts, DAG))
41909 // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
41911 // Widen any subvector shuffle inputs we've collected.
41912 // TODO: Remove this to avoid generating temporary nodes, we should only
41913 // widen once combineX86ShuffleChain has found a match.
41914 if (any_of(Ops, [RootSizeInBits](SDValue Op) {
41915 return Op.getValueSizeInBits() < RootSizeInBits;
41917 for (SDValue &Op : Ops)
41918 if (Op.getValueSizeInBits() < RootSizeInBits)
41919 Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
41921 // Reresolve - we might have repeated subvector sources.
41922 resolveTargetShuffleInputsAndMask(Ops, Mask);
41925 // We can only combine unary and binary shuffle mask cases.
41926 if (Ops.size() <= 2) {
41927 // Minor canonicalization of the accumulated shuffle mask to make it easier
41928 // to match below. All this does is detect masks with sequential pairs of
41929 // elements, and shrink them to the half-width mask. It does this in a loop
41930 // so it will reduce the size of the mask to the minimal width mask which
41931 // performs an equivalent shuffle.
41932 while (Mask.size() > 1) {
41933 SmallVector<int, 64> WidenedMask;
41934 if (!canWidenShuffleElements(Mask, WidenedMask))
41936 Mask = std::move(WidenedMask);
41939 // Canonicalization of binary shuffle masks to improve pattern matching by
41940 // commuting the inputs.
41941 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
41942 ShuffleVectorSDNode::commuteMask(Mask);
41943 std::swap(Ops[0], Ops[1]);
41946 // Try to combine into a single shuffle instruction.
41947 if (SDValue Shuffle = combineX86ShuffleChain(
41948 Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
41949 AllowVariablePerLaneMask, DAG, Subtarget))
41952 // If all the operands come from the same larger vector, fallthrough and try
41953 // to use combineX86ShuffleChainWithExtract.
41954 SDValue LHS = peekThroughBitcasts(Ops.front());
41955 SDValue RHS = peekThroughBitcasts(Ops.back());
41956 if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 ||
41957 (RootSizeInBits / Mask.size()) != 64 ||
41958 LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
41959 RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
41960 LHS.getOperand(0) != RHS.getOperand(0))
41964 // If that failed and any input is extracted then try to combine as a
41965 // shuffle with the larger type.
41966 return combineX86ShuffleChainWithExtract(
41967 Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
41968 AllowVariablePerLaneMask, DAG, Subtarget);
41971 /// Helper entry wrapper to combineX86ShufflesRecursively.
41972 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
41973 const X86Subtarget &Subtarget) {
41974 return combineX86ShufflesRecursively(
41975 {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
41976 /*HasVarMask*/ false,
41977 /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
41981 /// Get the PSHUF-style mask from PSHUF node.
41983 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
41984 /// PSHUF-style masks that can be reused with such instructions.
41985 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
41986 MVT VT = N.getSimpleValueType();
41987 SmallVector<int, 4> Mask;
41988 SmallVector<SDValue, 2> Ops;
41990 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
41994 // If we have more than 128-bits, only the low 128-bits of shuffle mask
41995 // matter. Check that the upper masks are repeats and remove them.
41996 if (VT.getSizeInBits() > 128) {
41997 int LaneElts = 128 / VT.getScalarSizeInBits();
41999 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
42000 for (int j = 0; j < LaneElts; ++j)
42001 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
42002 "Mask doesn't repeat in high 128-bit lanes!");
42004 Mask.resize(LaneElts);
42007 switch (N.getOpcode()) {
42008 case X86ISD::PSHUFD:
42010 case X86ISD::PSHUFLW:
42013 case X86ISD::PSHUFHW:
42014 Mask.erase(Mask.begin(), Mask.begin() + 4);
42015 for (int &M : Mask)
42019 llvm_unreachable("No valid shuffle instruction found!");
42023 /// Search for a combinable shuffle across a chain ending in pshufd.
42025 /// We walk up the chain and look for a combinable shuffle, skipping over
42026 /// shuffles that we could hoist this shuffle's transformation past without
42027 /// altering anything.
42029 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
42030 SelectionDAG &DAG) {
42031 assert(N.getOpcode() == X86ISD::PSHUFD &&
42032 "Called with something other than an x86 128-bit half shuffle!");
42035 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
42036 // of the shuffles in the chain so that we can form a fresh chain to replace
42038 SmallVector<SDValue, 8> Chain;
42039 SDValue V = N.getOperand(0);
42040 for (; V.hasOneUse(); V = V.getOperand(0)) {
42041 switch (V.getOpcode()) {
42043 return SDValue(); // Nothing combined!
42046 // Skip bitcasts as we always know the type for the target specific
42050 case X86ISD::PSHUFD:
42051 // Found another dword shuffle.
42054 case X86ISD::PSHUFLW:
42055 // Check that the low words (being shuffled) are the identity in the
42056 // dword shuffle, and the high words are self-contained.
42057 if (Mask[0] != 0 || Mask[1] != 1 ||
42058 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
42061 Chain.push_back(V);
42064 case X86ISD::PSHUFHW:
42065 // Check that the high words (being shuffled) are the identity in the
42066 // dword shuffle, and the low words are self-contained.
42067 if (Mask[2] != 2 || Mask[3] != 3 ||
42068 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
42071 Chain.push_back(V);
42074 case X86ISD::UNPCKL:
42075 case X86ISD::UNPCKH:
42076 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
42077 // shuffle into a preceding word shuffle.
42078 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
42079 V.getSimpleValueType().getVectorElementType() != MVT::i16)
42082 // Search for a half-shuffle which we can combine with.
42083 unsigned CombineOp =
42084 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
42085 if (V.getOperand(0) != V.getOperand(1) ||
42086 !V->isOnlyUserOf(V.getOperand(0).getNode()))
42088 Chain.push_back(V);
42089 V = V.getOperand(0);
42091 switch (V.getOpcode()) {
42093 return SDValue(); // Nothing to combine.
42095 case X86ISD::PSHUFLW:
42096 case X86ISD::PSHUFHW:
42097 if (V.getOpcode() == CombineOp)
42100 Chain.push_back(V);
42104 V = V.getOperand(0);
42108 } while (V.hasOneUse());
42111 // Break out of the loop if we break out of the switch.
42115 if (!V.hasOneUse())
42116 // We fell out of the loop without finding a viable combining instruction.
42119 // Merge this node's mask and our incoming mask.
42120 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
42121 for (int &M : Mask)
42123 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
42124 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
42126 // Rebuild the chain around this new shuffle.
42127 while (!Chain.empty()) {
42128 SDValue W = Chain.pop_back_val();
42130 if (V.getValueType() != W.getOperand(0).getValueType())
42131 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
42133 switch (W.getOpcode()) {
42135 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
42137 case X86ISD::UNPCKL:
42138 case X86ISD::UNPCKH:
42139 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
42142 case X86ISD::PSHUFD:
42143 case X86ISD::PSHUFLW:
42144 case X86ISD::PSHUFHW:
42145 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
42149 if (V.getValueType() != N.getValueType())
42150 V = DAG.getBitcast(N.getValueType(), V);
42152 // Return the new chain to replace N.
42156 // Attempt to commute shufps LHS loads:
42157 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
42158 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
42159 SelectionDAG &DAG) {
42160 // TODO: Add vXf64 support.
42161 if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
42164 // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
42165 auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
42166 if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
42168 SDValue N0 = V.getOperand(0);
42169 SDValue N1 = V.getOperand(1);
42170 unsigned Imm = V.getConstantOperandVal(2);
42171 const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
42172 if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
42173 X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
42175 Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
42176 return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
42177 DAG.getTargetConstant(Imm, DL, MVT::i8));
42180 switch (N.getOpcode()) {
42181 case X86ISD::VPERMILPI:
42182 if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
42183 unsigned Imm = N.getConstantOperandVal(1);
42184 return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
42185 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
42188 case X86ISD::SHUFP: {
42189 SDValue N0 = N.getOperand(0);
42190 SDValue N1 = N.getOperand(1);
42191 unsigned Imm = N.getConstantOperandVal(2);
42193 if (SDValue NewSHUFP = commuteSHUFP(N, N0))
42194 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
42195 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
42196 } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
42197 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
42198 DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
42199 } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
42200 return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
42201 DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
42210 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
42211 static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
42213 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42214 EVT ShuffleVT = N.getValueType();
42216 auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
42217 // AllZeros/AllOnes constants are freely shuffled and will peek through
42218 // bitcasts. Other constant build vectors do not peek through bitcasts. Only
42219 // merge with target shuffles if it has one use so shuffle combining is
42220 // likely to kick in. Shuffles of splats are expected to be removed.
42221 return ISD::isBuildVectorAllOnes(Op.getNode()) ||
42222 ISD::isBuildVectorAllZeros(Op.getNode()) ||
42223 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
42224 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
42225 (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op->hasOneUse()) ||
42226 (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
42227 (FoldLoad && isShuffleFoldableLoad(Op)) ||
42228 DAG.isSplatValue(Op, /*AllowUndefs*/ false);
42230 auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
42231 // Ensure we only shuffle whole vector src elements, unless its a logical
42232 // binops where we can more aggressively move shuffles from dst to src.
42233 return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
42234 BinOp == X86ISD::ANDNP ||
42235 (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
42238 unsigned Opc = N.getOpcode();
42240 // Unary and Unary+Permute Shuffles.
42241 case X86ISD::PSHUFB: {
42242 // Don't merge PSHUFB if it contains zero'd elements.
42243 SmallVector<int> Mask;
42244 SmallVector<SDValue> Ops;
42245 if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
42250 case X86ISD::VBROADCAST:
42251 case X86ISD::MOVDDUP:
42252 case X86ISD::PSHUFD:
42253 case X86ISD::PSHUFHW:
42254 case X86ISD::PSHUFLW:
42255 case X86ISD::VPERMI:
42256 case X86ISD::VPERMILPI: {
42257 if (N.getOperand(0).getValueType() == ShuffleVT &&
42258 N->isOnlyUserOf(N.getOperand(0).getNode())) {
42259 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
42260 unsigned SrcOpcode = N0.getOpcode();
42261 if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
42262 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
42263 SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
42264 if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
42265 IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
42267 Op00 = DAG.getBitcast(ShuffleVT, Op00);
42268 Op01 = DAG.getBitcast(ShuffleVT, Op01);
42269 if (N.getNumOperands() == 2) {
42270 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
42271 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
42273 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
42274 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
42276 EVT OpVT = N0.getValueType();
42277 return DAG.getBitcast(ShuffleVT,
42278 DAG.getNode(SrcOpcode, DL, OpVT,
42279 DAG.getBitcast(OpVT, LHS),
42280 DAG.getBitcast(OpVT, RHS)));
42286 // Binary and Binary+Permute Shuffles.
42287 case X86ISD::INSERTPS: {
42288 // Don't merge INSERTPS if it contains zero'd elements.
42289 unsigned InsertPSMask = N.getConstantOperandVal(2);
42290 unsigned ZeroMask = InsertPSMask & 0xF;
42295 case X86ISD::MOVSD:
42296 case X86ISD::MOVSS:
42297 case X86ISD::BLENDI:
42298 case X86ISD::SHUFP:
42299 case X86ISD::UNPCKH:
42300 case X86ISD::UNPCKL: {
42301 if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
42302 N->isOnlyUserOf(N.getOperand(1).getNode())) {
42303 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
42304 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
42305 unsigned SrcOpcode = N0.getOpcode();
42306 if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
42307 N0.getValueType() == N1.getValueType() &&
42308 IsSafeToMoveShuffle(N0, SrcOpcode) &&
42309 IsSafeToMoveShuffle(N1, SrcOpcode)) {
42310 SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
42311 SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
42312 SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
42313 SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
42314 // Ensure the total number of shuffles doesn't increase by folding this
42315 // shuffle through to the source ops.
42316 if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
42317 (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
42318 ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
42319 (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
42321 Op00 = DAG.getBitcast(ShuffleVT, Op00);
42322 Op10 = DAG.getBitcast(ShuffleVT, Op10);
42323 Op01 = DAG.getBitcast(ShuffleVT, Op01);
42324 Op11 = DAG.getBitcast(ShuffleVT, Op11);
42325 if (N.getNumOperands() == 3) {
42326 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
42327 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
42329 LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
42330 RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
42332 EVT OpVT = N0.getValueType();
42333 return DAG.getBitcast(ShuffleVT,
42334 DAG.getNode(SrcOpcode, DL, OpVT,
42335 DAG.getBitcast(OpVT, LHS),
42336 DAG.getBitcast(OpVT, RHS)));
42346 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
42347 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
42350 assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
42352 MVT VT = V.getSimpleValueType();
42353 SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
42354 SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
42355 unsigned SrcOpc0 = Src0.getOpcode();
42356 unsigned SrcOpc1 = Src1.getOpcode();
42357 EVT SrcVT0 = Src0.getValueType();
42358 EVT SrcVT1 = Src1.getValueType();
42360 if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
42364 case X86ISD::MOVDDUP: {
42365 SDValue LHS = Src0.getOperand(0);
42366 SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
42368 DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
42369 Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
42370 return DAG.getBitcast(VT, Res);
42372 case X86ISD::VPERMILPI:
42373 // TODO: Handle v4f64 permutes with different low/high lane masks.
42374 if (SrcVT0 == MVT::v4f64) {
42375 uint64_t Mask = Src0.getConstantOperandVal(1);
42376 if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
42380 case X86ISD::VSHLI:
42381 case X86ISD::VSRLI:
42382 case X86ISD::VSRAI:
42383 case X86ISD::PSHUFD:
42384 if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
42385 SDValue LHS = Src0.getOperand(0);
42386 SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
42387 SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
42389 Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
42390 return DAG.getBitcast(VT, Res);
42398 /// Try to combine x86 target specific shuffles.
42399 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
42400 TargetLowering::DAGCombinerInfo &DCI,
42401 const X86Subtarget &Subtarget) {
42403 MVT VT = N.getSimpleValueType();
42404 SmallVector<int, 4> Mask;
42405 unsigned Opcode = N.getOpcode();
42407 if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
42410 // Handle specific target shuffles.
42412 case X86ISD::MOVDDUP: {
42413 SDValue Src = N.getOperand(0);
42414 // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
42415 if (VT == MVT::v2f64 && Src.hasOneUse() &&
42416 ISD::isNormalLoad(Src.getNode())) {
42417 LoadSDNode *LN = cast<LoadSDNode>(Src);
42418 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
42419 SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
42420 DCI.CombineTo(N.getNode(), Movddup);
42421 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
42422 DCI.recursivelyDeleteUnusedNodes(LN);
42423 return N; // Return N so it doesn't get rechecked!
42429 case X86ISD::VBROADCAST: {
42430 SDValue Src = N.getOperand(0);
42431 SDValue BC = peekThroughBitcasts(Src);
42432 EVT SrcVT = Src.getValueType();
42433 EVT BCVT = BC.getValueType();
42435 // If broadcasting from another shuffle, attempt to simplify it.
42436 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
42437 if (isTargetShuffle(BC.getOpcode()) &&
42438 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
42439 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
42440 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
42442 for (unsigned i = 0; i != Scale; ++i)
42443 DemandedMask[i] = i;
42444 if (SDValue Res = combineX86ShufflesRecursively(
42445 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
42446 X86::MaxShuffleCombineDepth,
42447 /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
42448 /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
42449 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
42450 DAG.getBitcast(SrcVT, Res));
42453 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
42454 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
42455 if (Src.getOpcode() == ISD::BITCAST &&
42456 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
42457 DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
42458 FixedVectorType::isValidElementType(
42459 BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
42460 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
42461 VT.getVectorNumElements());
42462 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
42465 // vbroadcast(bitcast(vbroadcast(src))) -> bitcast(vbroadcast(src))
42466 // If we're re-broadcasting a smaller type then broadcast with that type and
42468 // TODO: Do this for any splat?
42469 if (Src.getOpcode() == ISD::BITCAST &&
42470 (BC.getOpcode() == X86ISD::VBROADCAST ||
42471 BC.getOpcode() == X86ISD::VBROADCAST_LOAD) &&
42472 (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) == 0 &&
42473 (VT.getSizeInBits() % BCVT.getSizeInBits()) == 0) {
42475 MVT::getVectorVT(BCVT.getSimpleVT().getScalarType(),
42476 VT.getSizeInBits() / BCVT.getScalarSizeInBits());
42477 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
42480 // Reduce broadcast source vector to lowest 128-bits.
42481 if (SrcVT.getSizeInBits() > 128)
42482 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
42483 extract128BitVector(Src, 0, DAG, DL));
42485 // broadcast(scalar_to_vector(x)) -> broadcast(x).
42486 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
42487 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
42489 // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
42490 if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
42491 isNullConstant(Src.getOperand(1)) &&
42492 DAG.getTargetLoweringInfo().isTypeLegal(
42493 Src.getOperand(0).getValueType()))
42494 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
42496 // Share broadcast with the longest vector and extract low subvector (free).
42497 // Ensure the same SDValue from the SDNode use is being used.
42498 for (SDNode *User : Src->uses())
42499 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
42500 Src == User->getOperand(0) &&
42501 User->getValueSizeInBits(0).getFixedValue() >
42502 VT.getFixedSizeInBits()) {
42503 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
42504 VT.getSizeInBits());
42507 // vbroadcast(scalarload X) -> vbroadcast_load X
42508 // For float loads, extract other uses of the scalar from the broadcast.
42509 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
42510 ISD::isNormalLoad(Src.getNode())) {
42511 LoadSDNode *LN = cast<LoadSDNode>(Src);
42512 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42513 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
42515 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
42516 LN->getMemoryVT(), LN->getMemOperand());
42517 // If the load value is used only by N, replace it via CombineTo N.
42518 bool NoReplaceExtract = Src.hasOneUse();
42519 DCI.CombineTo(N.getNode(), BcastLd);
42520 if (NoReplaceExtract) {
42521 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
42522 DCI.recursivelyDeleteUnusedNodes(LN);
42524 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
42525 DAG.getIntPtrConstant(0, DL));
42526 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
42528 return N; // Return N so it doesn't get rechecked!
42531 // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
42532 // i16. So shrink it ourselves if we can make a broadcast_load.
42533 if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
42534 Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
42535 assert(Subtarget.hasAVX2() && "Expected AVX2");
42536 SDValue TruncIn = Src.getOperand(0);
42538 // If this is a truncate of a non extending load we can just narrow it to
42539 // use a broadcast_load.
42540 if (ISD::isNormalLoad(TruncIn.getNode())) {
42541 LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
42542 // Unless its volatile or atomic.
42543 if (LN->isSimple()) {
42544 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42545 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
42546 SDValue BcastLd = DAG.getMemIntrinsicNode(
42547 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
42548 LN->getPointerInfo(), LN->getOriginalAlign(),
42549 LN->getMemOperand()->getFlags());
42550 DCI.CombineTo(N.getNode(), BcastLd);
42551 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
42552 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
42553 return N; // Return N so it doesn't get rechecked!
42557 // If this is a truncate of an i16 extload, we can directly replace it.
42558 if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
42559 ISD::isEXTLoad(Src.getOperand(0).getNode())) {
42560 LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
42561 if (LN->getMemoryVT().getSizeInBits() == 16) {
42562 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42563 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
42565 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
42566 LN->getMemoryVT(), LN->getMemOperand());
42567 DCI.CombineTo(N.getNode(), BcastLd);
42568 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
42569 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
42570 return N; // Return N so it doesn't get rechecked!
42574 // If this is a truncate of load that has been shifted right, we can
42575 // offset the pointer and use a narrower load.
42576 if (TruncIn.getOpcode() == ISD::SRL &&
42577 TruncIn.getOperand(0).hasOneUse() &&
42578 isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
42579 ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
42580 LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
42581 unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
42582 // Make sure the shift amount and the load size are divisible by 16.
42583 // Don't do this if the load is volatile or atomic.
42584 if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
42586 unsigned Offset = ShiftAmt / 8;
42587 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42588 SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(),
42589 TypeSize::Fixed(Offset), DL);
42590 SDValue Ops[] = { LN->getChain(), Ptr };
42591 SDValue BcastLd = DAG.getMemIntrinsicNode(
42592 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
42593 LN->getPointerInfo().getWithOffset(Offset),
42594 LN->getOriginalAlign(),
42595 LN->getMemOperand()->getFlags());
42596 DCI.CombineTo(N.getNode(), BcastLd);
42597 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
42598 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
42599 return N; // Return N so it doesn't get rechecked!
42604 // vbroadcast(vzload X) -> vbroadcast_load X
42605 if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
42606 MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
42607 if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
42608 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42609 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
42611 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
42612 LN->getMemoryVT(), LN->getMemOperand());
42613 DCI.CombineTo(N.getNode(), BcastLd);
42614 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
42615 DCI.recursivelyDeleteUnusedNodes(LN);
42616 return N; // Return N so it doesn't get rechecked!
42620 // vbroadcast(vector load X) -> vbroadcast_load
42621 if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
42622 SrcVT == MVT::v4i32) &&
42623 Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
42624 LoadSDNode *LN = cast<LoadSDNode>(Src);
42625 // Unless the load is volatile or atomic.
42626 if (LN->isSimple()) {
42627 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42628 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
42629 SDValue BcastLd = DAG.getMemIntrinsicNode(
42630 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
42631 LN->getPointerInfo(), LN->getOriginalAlign(),
42632 LN->getMemOperand()->getFlags());
42633 DCI.CombineTo(N.getNode(), BcastLd);
42634 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
42635 DCI.recursivelyDeleteUnusedNodes(LN);
42636 return N; // Return N so it doesn't get rechecked!
42642 case X86ISD::VZEXT_MOVL: {
42643 SDValue N0 = N.getOperand(0);
42645 // If this a vzmovl of a full vector load, replace it with a vzload, unless
42646 // the load is volatile.
42647 if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
42648 auto *LN = cast<LoadSDNode>(N0);
42649 if (SDValue VZLoad =
42650 narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
42651 DCI.CombineTo(N.getNode(), VZLoad);
42652 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
42653 DCI.recursivelyDeleteUnusedNodes(LN);
42658 // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
42659 // and can just use a VZEXT_LOAD.
42660 // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
42661 if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
42662 auto *LN = cast<MemSDNode>(N0);
42663 if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
42664 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
42665 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
42667 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
42668 LN->getMemoryVT(), LN->getMemOperand());
42669 DCI.CombineTo(N.getNode(), VZLoad);
42670 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
42671 DCI.recursivelyDeleteUnusedNodes(LN);
42676 // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
42677 // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
42678 // if the upper bits of the i64 are zero.
42679 if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
42680 N0.getOperand(0).hasOneUse() &&
42681 N0.getOperand(0).getValueType() == MVT::i64) {
42682 SDValue In = N0.getOperand(0);
42683 APInt Mask = APInt::getHighBitsSet(64, 32);
42684 if (DAG.MaskedValueIsZero(In, Mask)) {
42685 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
42686 MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
42687 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
42688 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
42689 return DAG.getBitcast(VT, Movl);
42693 // Load a scalar integer constant directly to XMM instead of transferring an
42694 // immediate value from GPR.
42695 // vzext_movl (scalar_to_vector C) --> load [C,0...]
42696 if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
42697 if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
42698 // Create a vector constant - scalar constant followed by zeros.
42699 EVT ScalarVT = N0.getOperand(0).getValueType();
42700 Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
42701 unsigned NumElts = VT.getVectorNumElements();
42702 Constant *Zero = ConstantInt::getNullValue(ScalarTy);
42703 SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
42704 ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
42706 // Load the vector constant from constant pool.
42707 MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
42708 SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
42709 MachinePointerInfo MPI =
42710 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
42711 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
42712 return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
42713 MachineMemOperand::MOLoad);
42717 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
42718 // insert into a zero vector. This helps get VZEXT_MOVL closer to
42719 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
42720 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
42721 if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
42722 SDValue V = peekThroughOneUseBitcasts(N0);
42724 if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
42725 isNullConstant(V.getOperand(2))) {
42726 SDValue In = V.getOperand(1);
42727 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
42728 In.getValueSizeInBits() /
42729 VT.getScalarSizeInBits());
42730 In = DAG.getBitcast(SubVT, In);
42731 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
42732 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
42733 getZeroVector(VT, Subtarget, DAG, DL), Movl,
42740 case X86ISD::BLENDI: {
42741 SDValue N0 = N.getOperand(0);
42742 SDValue N1 = N.getOperand(1);
42744 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
42745 // TODO: Handle MVT::v16i16 repeated blend mask.
42746 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
42747 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
42748 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
42749 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
42750 SrcVT.getScalarSizeInBits() >= 32) {
42751 unsigned BlendMask = N.getConstantOperandVal(2);
42752 unsigned Size = VT.getVectorNumElements();
42753 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
42754 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
42755 return DAG.getBitcast(
42756 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
42758 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
42763 case X86ISD::SHUFP: {
42764 // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
42765 // This is a more relaxed shuffle combiner that can ignore oneuse limits.
42766 // TODO: Support types other than v4f32.
42767 if (VT == MVT::v4f32) {
42768 bool Updated = false;
42769 SmallVector<int> Mask;
42770 SmallVector<SDValue> Ops;
42771 if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
42773 for (int i = 0; i != 2; ++i) {
42774 SmallVector<SDValue> SubOps;
42775 SmallVector<int> SubMask, SubScaledMask;
42776 SDValue Sub = peekThroughBitcasts(Ops[i]);
42777 // TODO: Scaling might be easier if we specify the demanded elts.
42778 if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
42779 scaleShuffleElements(SubMask, 4, SubScaledMask) &&
42780 SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
42782 Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
42783 Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
42784 Ops[i] = DAG.getBitcast(VT, SubOps[0]);
42790 for (int &M : Mask)
42792 Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
42793 return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
42798 case X86ISD::VPERMI: {
42799 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
42800 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
42801 SDValue N0 = N.getOperand(0);
42802 SDValue N1 = N.getOperand(1);
42803 unsigned EltSizeInBits = VT.getScalarSizeInBits();
42804 if (N0.getOpcode() == ISD::BITCAST &&
42805 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
42806 SDValue Src = N0.getOperand(0);
42807 EVT SrcVT = Src.getValueType();
42808 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
42809 return DAG.getBitcast(VT, Res);
42813 case X86ISD::VPERM2X128: {
42814 // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
42815 SDValue LHS = N->getOperand(0);
42816 SDValue RHS = N->getOperand(1);
42817 if (LHS.getOpcode() == ISD::BITCAST &&
42818 (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
42819 EVT SrcVT = LHS.getOperand(0).getValueType();
42820 if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
42821 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
42822 DAG.getBitcast(SrcVT, LHS),
42823 DAG.getBitcast(SrcVT, RHS),
42824 N->getOperand(2)));
42828 // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
42829 if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
42832 // Fold vperm2x128 subvector shuffle with an inner concat pattern.
42833 // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
42834 auto FindSubVector128 = [&](unsigned Idx) {
42837 SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
42838 SmallVector<SDValue> SubOps;
42839 if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
42840 return SubOps[Idx & 1];
42841 unsigned NumElts = Src.getValueType().getVectorNumElements();
42842 if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
42843 Src.getOperand(1).getValueSizeInBits() == 128 &&
42844 Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
42845 return Src.getOperand(1);
42849 unsigned Imm = N.getConstantOperandVal(2);
42850 if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
42851 if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
42852 MVT SubVT = VT.getHalfNumVectorElementsVT();
42853 SubLo = DAG.getBitcast(SubVT, SubLo);
42854 SubHi = DAG.getBitcast(SubVT, SubHi);
42855 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
42860 case X86ISD::PSHUFD:
42861 case X86ISD::PSHUFLW:
42862 case X86ISD::PSHUFHW: {
42863 SDValue N0 = N.getOperand(0);
42864 SDValue N1 = N.getOperand(1);
42865 if (N0->hasOneUse()) {
42866 SDValue V = peekThroughOneUseBitcasts(N0);
42867 switch (V.getOpcode()) {
42871 case X86ISD::VSHLI:
42872 case X86ISD::VSRLI:
42873 case X86ISD::VSRAI:
42874 case X86ISD::VROTLI:
42875 case X86ISD::VROTRI: {
42876 MVT InnerVT = V.getSimpleValueType();
42877 if (InnerVT.getScalarSizeInBits() <= VT.getScalarSizeInBits()) {
42878 SDValue Res = DAG.getNode(Opcode, DL, VT,
42879 DAG.getBitcast(VT, V.getOperand(0)), N1);
42880 Res = DAG.getBitcast(InnerVT, Res);
42881 Res = DAG.getNode(V.getOpcode(), DL, InnerVT, Res, V.getOperand(1));
42882 return DAG.getBitcast(VT, Res);
42889 Mask = getPSHUFShuffleMask(N);
42890 assert(Mask.size() == 4);
42893 case X86ISD::MOVSD:
42894 case X86ISD::MOVSH:
42895 case X86ISD::MOVSS: {
42896 SDValue N0 = N.getOperand(0);
42897 SDValue N1 = N.getOperand(1);
42899 // Canonicalize scalar FPOps:
42900 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
42901 // If commutable, allow OP(N1[0], N0[0]).
42902 unsigned Opcode1 = N1.getOpcode();
42903 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
42904 Opcode1 == ISD::FDIV) {
42905 SDValue N10 = N1.getOperand(0);
42906 SDValue N11 = N1.getOperand(1);
42908 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
42910 std::swap(N10, N11);
42911 MVT SVT = VT.getVectorElementType();
42912 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
42913 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
42914 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
42915 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
42916 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
42917 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
42923 case X86ISD::INSERTPS: {
42924 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
42925 SDValue Op0 = N.getOperand(0);
42926 SDValue Op1 = N.getOperand(1);
42927 unsigned InsertPSMask = N.getConstantOperandVal(2);
42928 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
42929 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
42930 unsigned ZeroMask = InsertPSMask & 0xF;
42932 // If we zero out all elements from Op0 then we don't need to reference it.
42933 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
42934 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
42935 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
42937 // If we zero out the element from Op1 then we don't need to reference it.
42938 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
42939 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
42940 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
42942 // Attempt to merge insertps Op1 with an inner target shuffle node.
42943 SmallVector<int, 8> TargetMask1;
42944 SmallVector<SDValue, 2> Ops1;
42945 APInt KnownUndef1, KnownZero1;
42946 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
42948 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
42949 // Zero/UNDEF insertion - zero out element and remove dependency.
42950 InsertPSMask |= (1u << DstIdx);
42951 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
42952 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
42954 // Update insertps mask srcidx and reference the source input directly.
42955 int M = TargetMask1[SrcIdx];
42956 assert(0 <= M && M < 8 && "Shuffle index out of range");
42957 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
42958 Op1 = Ops1[M < 4 ? 0 : 1];
42959 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
42960 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
42963 // Attempt to merge insertps Op0 with an inner target shuffle node.
42964 SmallVector<int, 8> TargetMask0;
42965 SmallVector<SDValue, 2> Ops0;
42966 APInt KnownUndef0, KnownZero0;
42967 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
42969 bool Updated = false;
42970 bool UseInput00 = false;
42971 bool UseInput01 = false;
42972 for (int i = 0; i != 4; ++i) {
42973 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
42974 // No change if element is already zero or the inserted element.
42978 if (KnownUndef0[i] || KnownZero0[i]) {
42979 // If the target mask is undef/zero then we must zero the element.
42980 InsertPSMask |= (1u << i);
42985 // The input vector element must be inline.
42986 int M = TargetMask0[i];
42987 if (M != i && M != (i + 4))
42990 // Determine which inputs of the target shuffle we're using.
42991 UseInput00 |= (0 <= M && M < 4);
42992 UseInput01 |= (4 <= M);
42995 // If we're not using both inputs of the target shuffle then use the
42996 // referenced input directly.
42997 if (UseInput00 && !UseInput01) {
43000 } else if (!UseInput00 && UseInput01) {
43006 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
43007 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
43010 // If we're inserting an element from a vbroadcast load, fold the
43011 // load into the X86insertps instruction. We need to convert the scalar
43012 // load to a vector and clear the source lane of the INSERTPS control.
43013 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
43014 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
43015 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
43016 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
43017 MemIntr->getBasePtr(),
43018 MemIntr->getMemOperand());
43019 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
43020 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
43022 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
43023 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
43034 // Nuke no-op shuffles that show up after combining.
43035 if (isNoopShuffleMask(Mask))
43036 return N.getOperand(0);
43038 // Look for simplifications involving one or two shuffle instructions.
43039 SDValue V = N.getOperand(0);
43040 switch (N.getOpcode()) {
43043 case X86ISD::PSHUFLW:
43044 case X86ISD::PSHUFHW:
43045 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
43047 // See if this reduces to a PSHUFD which is no more expensive and can
43048 // combine with more operations. Note that it has to at least flip the
43049 // dwords as otherwise it would have been removed as a no-op.
43050 if (ArrayRef(Mask).equals({2, 3, 0, 1})) {
43051 int DMask[] = {0, 1, 2, 3};
43052 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
43053 DMask[DOffset + 0] = DOffset + 1;
43054 DMask[DOffset + 1] = DOffset + 0;
43055 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
43056 V = DAG.getBitcast(DVT, V);
43057 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
43058 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
43059 return DAG.getBitcast(VT, V);
43062 // Look for shuffle patterns which can be implemented as a single unpack.
43063 // FIXME: This doesn't handle the location of the PSHUFD generically, and
43064 // only works when we have a PSHUFD followed by two half-shuffles.
43065 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
43066 (V.getOpcode() == X86ISD::PSHUFLW ||
43067 V.getOpcode() == X86ISD::PSHUFHW) &&
43068 V.getOpcode() != N.getOpcode() &&
43069 V.hasOneUse() && V.getOperand(0).hasOneUse()) {
43070 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
43071 if (D.getOpcode() == X86ISD::PSHUFD) {
43072 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
43073 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
43074 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
43075 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
43077 for (int i = 0; i < 4; ++i) {
43078 WordMask[i + NOffset] = Mask[i] + NOffset;
43079 WordMask[i + VOffset] = VMask[i] + VOffset;
43081 // Map the word mask through the DWord mask.
43083 for (int i = 0; i < 8; ++i)
43084 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
43085 if (ArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
43086 ArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
43087 // We can replace all three shuffles with an unpack.
43088 V = DAG.getBitcast(VT, D.getOperand(0));
43089 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
43098 case X86ISD::PSHUFD:
43099 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
43108 /// Checks if the shuffle mask takes subsequent elements
43109 /// alternately from two vectors.
43110 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
43111 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
43113 int ParitySrc[2] = {-1, -1};
43114 unsigned Size = Mask.size();
43115 for (unsigned i = 0; i != Size; ++i) {
43120 // Make sure we are using the matching element from the input.
43121 if ((M % Size) != i)
43124 // Make sure we use the same input for all elements of the same parity.
43125 int Src = M / Size;
43126 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
43128 ParitySrc[i % 2] = Src;
43131 // Make sure each input is used.
43132 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
43135 Op0Even = ParitySrc[0] == 0;
43139 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
43140 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
43141 /// are written to the parameters \p Opnd0 and \p Opnd1.
43143 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
43144 /// so it is easier to generically match. We also insert dummy vector shuffle
43145 /// nodes for the operands which explicitly discard the lanes which are unused
43146 /// by this operation to try to flow through the rest of the combiner
43147 /// the fact that they're unused.
43148 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
43149 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
43152 EVT VT = N->getValueType(0);
43153 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43154 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
43155 !VT.getSimpleVT().isFloatingPoint())
43158 // We only handle target-independent shuffles.
43159 // FIXME: It would be easy and harmless to use the target shuffle mask
43160 // extraction tool to support more.
43161 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
43164 SDValue V1 = N->getOperand(0);
43165 SDValue V2 = N->getOperand(1);
43167 // Make sure we have an FADD and an FSUB.
43168 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
43169 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
43170 V1.getOpcode() == V2.getOpcode())
43173 // If there are other uses of these operations we can't fold them.
43174 if (!V1->hasOneUse() || !V2->hasOneUse())
43177 // Ensure that both operations have the same operands. Note that we can
43178 // commute the FADD operands.
43180 if (V1.getOpcode() == ISD::FSUB) {
43181 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
43182 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
43183 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
43186 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
43187 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
43188 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
43189 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
43193 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
43195 if (!isAddSubOrSubAddMask(Mask, Op0Even))
43198 // It's a subadd if the vector in the even parity is an FADD.
43199 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
43200 : V2->getOpcode() == ISD::FADD;
43207 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
43208 static SDValue combineShuffleToFMAddSub(SDNode *N,
43209 const X86Subtarget &Subtarget,
43210 SelectionDAG &DAG) {
43211 // We only handle target-independent shuffles.
43212 // FIXME: It would be easy and harmless to use the target shuffle mask
43213 // extraction tool to support more.
43214 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
43217 MVT VT = N->getSimpleValueType(0);
43218 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43219 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
43222 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
43223 SDValue Op0 = N->getOperand(0);
43224 SDValue Op1 = N->getOperand(1);
43225 SDValue FMAdd = Op0, FMSub = Op1;
43226 if (FMSub.getOpcode() != X86ISD::FMSUB)
43227 std::swap(FMAdd, FMSub);
43229 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
43230 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
43231 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
43232 FMAdd.getOperand(2) != FMSub.getOperand(2))
43235 // Check for correct shuffle mask.
43236 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
43238 if (!isAddSubOrSubAddMask(Mask, Op0Even))
43241 // FMAddSub takes zeroth operand from FMSub node.
43243 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
43244 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
43245 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
43246 FMAdd.getOperand(2));
43249 /// Try to combine a shuffle into a target-specific add-sub or
43250 /// mul-add-sub node.
43251 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
43252 const X86Subtarget &Subtarget,
43253 SelectionDAG &DAG) {
43254 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
43257 SDValue Opnd0, Opnd1;
43259 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
43262 MVT VT = N->getSimpleValueType(0);
43265 // Try to generate X86ISD::FMADDSUB node here.
43267 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
43268 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
43269 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
43275 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
43276 // the ADDSUB idiom has been successfully recognized. There are no known
43277 // X86 targets with 512-bit ADDSUB instructions!
43278 if (VT.is512BitVector())
43281 // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
43282 // the ADDSUB idiom has been successfully recognized. There are no known
43283 // X86 targets with FP16 ADDSUB instructions!
43284 if (VT.getVectorElementType() == MVT::f16)
43287 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
43290 // We are looking for a shuffle where both sources are concatenated with undef
43291 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
43292 // if we can express this as a single-source shuffle, that's preferable.
43293 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
43294 const X86Subtarget &Subtarget) {
43295 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
43298 EVT VT = N->getValueType(0);
43300 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
43301 if (!VT.is128BitVector() && !VT.is256BitVector())
43304 if (VT.getVectorElementType() != MVT::i32 &&
43305 VT.getVectorElementType() != MVT::i64 &&
43306 VT.getVectorElementType() != MVT::f32 &&
43307 VT.getVectorElementType() != MVT::f64)
43310 SDValue N0 = N->getOperand(0);
43311 SDValue N1 = N->getOperand(1);
43313 // Check that both sources are concats with undef.
43314 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
43315 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
43316 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
43317 !N1.getOperand(1).isUndef())
43320 // Construct the new shuffle mask. Elements from the first source retain their
43321 // index, but elements from the second source no longer need to skip an undef.
43322 SmallVector<int, 8> Mask;
43323 int NumElts = VT.getVectorNumElements();
43325 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
43326 for (int Elt : SVOp->getMask())
43327 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
43330 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
43332 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
43335 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
43336 /// low half of each source vector and does not set any high half elements in
43337 /// the destination vector, narrow the shuffle to half its original size.
43338 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
43339 EVT VT = Shuf->getValueType(0);
43340 if (!DAG.getTargetLoweringInfo().isTypeLegal(Shuf->getValueType(0)))
43342 if (!VT.is256BitVector() && !VT.is512BitVector())
43345 // See if we can ignore all of the high elements of the shuffle.
43346 ArrayRef<int> Mask = Shuf->getMask();
43347 if (!isUndefUpperHalf(Mask))
43350 // Check if the shuffle mask accesses only the low half of each input vector
43351 // (half-index output is 0 or 2).
43352 int HalfIdx1, HalfIdx2;
43353 SmallVector<int, 8> HalfMask(Mask.size() / 2);
43354 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
43355 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
43358 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
43359 // The trick is knowing that all of the insert/extract are actually free
43360 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
43361 // of narrow inputs into a narrow output, and that is always cheaper than
43362 // the wide shuffle that we started with.
43363 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
43364 Shuf->getOperand(1), HalfMask, HalfIdx1,
43365 HalfIdx2, false, DAG, /*UseConcat*/ true);
43368 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
43369 TargetLowering::DAGCombinerInfo &DCI,
43370 const X86Subtarget &Subtarget) {
43371 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
43372 if (SDValue V = narrowShuffle(Shuf, DAG))
43375 // If we have legalized the vector types, look for blends of FADD and FSUB
43376 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
43378 EVT VT = N->getValueType(0);
43379 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43380 if (TLI.isTypeLegal(VT))
43381 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
43384 // Attempt to combine into a vector load/broadcast.
43385 if (SDValue LD = combineToConsecutiveLoads(
43386 VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
43389 // For AVX2, we sometimes want to combine
43390 // (vector_shuffle <mask> (concat_vectors t1, undef)
43391 // (concat_vectors t2, undef))
43393 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
43394 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
43395 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
43398 if (isTargetShuffle(N->getOpcode())) {
43400 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
43403 // Try recursively combining arbitrary sequences of x86 shuffle
43404 // instructions into higher-order shuffles. We do this after combining
43405 // specific PSHUF instruction sequences into their minimal form so that we
43406 // can evaluate how many specialized shuffle instructions are involved in
43407 // a particular chain.
43408 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43411 // Simplify source operands based on shuffle mask.
43412 // TODO - merge this into combineX86ShufflesRecursively.
43413 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
43414 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
43415 return SDValue(N, 0);
43417 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
43418 // Perform this after other shuffle combines to allow inner shuffles to be
43419 // combined away first.
43420 if (SDValue BinOp = canonicalizeShuffleWithBinOps(Op, DAG, dl))
43427 // Simplify variable target shuffle masks based on the demanded elements.
43428 // TODO: Handle DemandedBits in mask indices as well?
43429 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
43430 SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
43431 TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
43432 // If we're demanding all elements don't bother trying to simplify the mask.
43433 unsigned NumElts = DemandedElts.getBitWidth();
43434 if (DemandedElts.isAllOnes())
43437 SDValue Mask = Op.getOperand(MaskIndex);
43438 if (!Mask.hasOneUse())
43441 // Attempt to generically simplify the variable shuffle mask.
43442 APInt MaskUndef, MaskZero;
43443 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
43447 // Attempt to extract+simplify a (constant pool load) shuffle mask.
43448 // TODO: Support other types from getTargetShuffleMaskIndices?
43449 SDValue BC = peekThroughOneUseBitcasts(Mask);
43450 EVT BCVT = BC.getValueType();
43451 auto *Load = dyn_cast<LoadSDNode>(BC);
43455 const Constant *C = getTargetConstantFromNode(Load);
43459 Type *CTy = C->getType();
43460 if (!CTy->isVectorTy() ||
43461 CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
43464 // Handle scaling for i64 elements on 32-bit targets.
43465 unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
43466 if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
43468 unsigned Scale = NumCstElts / NumElts;
43470 // Simplify mask if we have an undemanded element that is not undef.
43471 bool Simplified = false;
43472 SmallVector<Constant *, 32> ConstVecOps;
43473 for (unsigned i = 0; i != NumCstElts; ++i) {
43474 Constant *Elt = C->getAggregateElement(i);
43475 if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
43476 ConstVecOps.push_back(UndefValue::get(Elt->getType()));
43480 ConstVecOps.push_back(Elt);
43485 // Generate new constant pool entry + legalize immediately for the load.
43487 SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
43488 SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
43489 SDValue NewMask = TLO.DAG.getLoad(
43490 BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
43491 MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
43493 return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
43496 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
43497 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
43498 TargetLoweringOpt &TLO, unsigned Depth) const {
43499 int NumElts = DemandedElts.getBitWidth();
43500 unsigned Opc = Op.getOpcode();
43501 EVT VT = Op.getValueType();
43503 // Handle special case opcodes.
43505 case X86ISD::PMULDQ:
43506 case X86ISD::PMULUDQ: {
43507 APInt LHSUndef, LHSZero;
43508 APInt RHSUndef, RHSZero;
43509 SDValue LHS = Op.getOperand(0);
43510 SDValue RHS = Op.getOperand(1);
43511 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
43514 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
43517 // Multiply by zero.
43518 KnownZero = LHSZero | RHSZero;
43521 case X86ISD::VPMADDWD: {
43522 APInt LHSUndef, LHSZero;
43523 APInt RHSUndef, RHSZero;
43524 SDValue LHS = Op.getOperand(0);
43525 SDValue RHS = Op.getOperand(1);
43526 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
43528 if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
43531 if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
43535 // TODO: Multiply by zero.
43537 // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
43538 APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
43539 if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
43542 APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
43543 if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
43548 case X86ISD::PSADBW: {
43549 SDValue LHS = Op.getOperand(0);
43550 SDValue RHS = Op.getOperand(1);
43551 assert(VT.getScalarType() == MVT::i64 &&
43552 LHS.getValueType() == RHS.getValueType() &&
43553 LHS.getValueType().getScalarType() == MVT::i8 &&
43554 "Unexpected PSADBW types");
43556 // Aggressively peek through ops to get at the demanded elts.
43557 if (!DemandedElts.isAllOnes()) {
43558 unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
43559 APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
43560 SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
43561 LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
43562 SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
43563 RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
43564 if (NewLHS || NewRHS) {
43565 NewLHS = NewLHS ? NewLHS : LHS;
43566 NewRHS = NewRHS ? NewRHS : RHS;
43567 return TLO.CombineTo(
43568 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
43575 case X86ISD::VSRA: {
43576 // We only need the bottom 64-bits of the (128-bit) shift amount.
43577 SDValue Amt = Op.getOperand(1);
43578 MVT AmtVT = Amt.getSimpleValueType();
43579 assert(AmtVT.is128BitVector() && "Unexpected value type");
43581 // If we reuse the shift amount just for sse shift amounts then we know that
43582 // only the bottom 64-bits are only ever used.
43583 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
43584 unsigned UseOpc = Use->getOpcode();
43585 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
43586 UseOpc == X86ISD::VSRA) &&
43587 Use->getOperand(0) != Amt;
43590 APInt AmtUndef, AmtZero;
43591 unsigned NumAmtElts = AmtVT.getVectorNumElements();
43592 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
43593 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
43594 Depth + 1, AssumeSingleUse))
43598 case X86ISD::VSHLI:
43599 case X86ISD::VSRLI:
43600 case X86ISD::VSRAI: {
43601 SDValue Src = Op.getOperand(0);
43603 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
43607 // Fold shift(0,x) -> 0
43608 if (DemandedElts.isSubsetOf(KnownZero))
43609 return TLO.CombineTo(
43610 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
43612 // Aggressively peek through ops to get at the demanded elts.
43613 if (!DemandedElts.isAllOnes())
43614 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
43615 Src, DemandedElts, TLO.DAG, Depth + 1))
43616 return TLO.CombineTo(
43617 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
43620 case X86ISD::VPSHA:
43621 case X86ISD::VPSHL:
43622 case X86ISD::VSHLV:
43623 case X86ISD::VSRLV:
43624 case X86ISD::VSRAV: {
43625 APInt LHSUndef, LHSZero;
43626 APInt RHSUndef, RHSZero;
43627 SDValue LHS = Op.getOperand(0);
43628 SDValue RHS = Op.getOperand(1);
43629 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
43633 // Fold shift(0,x) -> 0
43634 if (DemandedElts.isSubsetOf(LHSZero))
43635 return TLO.CombineTo(
43636 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
43638 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
43642 KnownZero = LHSZero;
43645 case X86ISD::KSHIFTL: {
43646 SDValue Src = Op.getOperand(0);
43647 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
43648 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
43649 unsigned ShiftAmt = Amt->getZExtValue();
43652 return TLO.CombineTo(Op, Src);
43654 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
43655 // single shift. We can do this if the bottom bits (which are shifted
43656 // out) are never demanded.
43657 if (Src.getOpcode() == X86ISD::KSHIFTR) {
43658 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
43659 unsigned C1 = Src.getConstantOperandVal(1);
43660 unsigned NewOpc = X86ISD::KSHIFTL;
43661 int Diff = ShiftAmt - C1;
43664 NewOpc = X86ISD::KSHIFTR;
43668 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
43669 return TLO.CombineTo(
43670 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
43674 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
43675 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
43679 KnownUndef <<= ShiftAmt;
43680 KnownZero <<= ShiftAmt;
43681 KnownZero.setLowBits(ShiftAmt);
43684 case X86ISD::KSHIFTR: {
43685 SDValue Src = Op.getOperand(0);
43686 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
43687 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
43688 unsigned ShiftAmt = Amt->getZExtValue();
43691 return TLO.CombineTo(Op, Src);
43693 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
43694 // single shift. We can do this if the top bits (which are shifted
43695 // out) are never demanded.
43696 if (Src.getOpcode() == X86ISD::KSHIFTL) {
43697 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
43698 unsigned C1 = Src.getConstantOperandVal(1);
43699 unsigned NewOpc = X86ISD::KSHIFTR;
43700 int Diff = ShiftAmt - C1;
43703 NewOpc = X86ISD::KSHIFTL;
43707 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
43708 return TLO.CombineTo(
43709 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
43713 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
43714 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
43718 KnownUndef.lshrInPlace(ShiftAmt);
43719 KnownZero.lshrInPlace(ShiftAmt);
43720 KnownZero.setHighBits(ShiftAmt);
43723 case X86ISD::ANDNP: {
43724 // ANDNP = (~LHS & RHS);
43725 SDValue LHS = Op.getOperand(0);
43726 SDValue RHS = Op.getOperand(1);
43728 auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
43730 SmallVector<APInt> EltBits;
43731 int NumElts = VT.getVectorNumElements();
43732 int EltSizeInBits = VT.getScalarSizeInBits();
43733 APInt OpBits = APInt::getAllOnes(EltSizeInBits);
43734 APInt OpElts = DemandedElts;
43735 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
43737 OpBits.clearAllBits();
43738 OpElts.clearAllBits();
43739 for (int I = 0; I != NumElts; ++I) {
43740 if (!DemandedElts[I])
43742 if (UndefElts[I]) {
43743 // We can't assume an undef src element gives an undef dst - the
43744 // other src might be zero.
43745 OpBits.setAllBits();
43747 } else if ((Invert && !EltBits[I].isAllOnes()) ||
43748 (!Invert && !EltBits[I].isZero())) {
43749 OpBits |= Invert ? ~EltBits[I] : EltBits[I];
43754 return std::make_pair(OpBits, OpElts);
43756 APInt BitsLHS, EltsLHS;
43757 APInt BitsRHS, EltsRHS;
43758 std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
43759 std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
43761 APInt LHSUndef, LHSZero;
43762 APInt RHSUndef, RHSZero;
43763 if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
43766 if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
43770 if (!DemandedElts.isAllOnes()) {
43771 SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
43772 TLO.DAG, Depth + 1);
43773 SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
43774 TLO.DAG, Depth + 1);
43775 if (NewLHS || NewRHS) {
43776 NewLHS = NewLHS ? NewLHS : LHS;
43777 NewRHS = NewRHS ? NewRHS : RHS;
43778 return TLO.CombineTo(
43779 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
43784 case X86ISD::CVTSI2P:
43785 case X86ISD::CVTUI2P: {
43786 SDValue Src = Op.getOperand(0);
43787 MVT SrcVT = Src.getSimpleValueType();
43788 APInt SrcUndef, SrcZero;
43789 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
43790 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
43795 case X86ISD::PACKSS:
43796 case X86ISD::PACKUS: {
43797 SDValue N0 = Op.getOperand(0);
43798 SDValue N1 = Op.getOperand(1);
43800 APInt DemandedLHS, DemandedRHS;
43801 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
43803 APInt LHSUndef, LHSZero;
43804 if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
43807 APInt RHSUndef, RHSZero;
43808 if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
43812 // TODO - pass on known zero/undef.
43814 // Aggressively peek through ops to get at the demanded elts.
43815 // TODO - we should do this for all target/faux shuffles ops.
43816 if (!DemandedElts.isAllOnes()) {
43817 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
43818 TLO.DAG, Depth + 1);
43819 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
43820 TLO.DAG, Depth + 1);
43821 if (NewN0 || NewN1) {
43822 NewN0 = NewN0 ? NewN0 : N0;
43823 NewN1 = NewN1 ? NewN1 : N1;
43824 return TLO.CombineTo(Op,
43825 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
43832 case X86ISD::FHADD:
43833 case X86ISD::FHSUB: {
43834 SDValue N0 = Op.getOperand(0);
43835 SDValue N1 = Op.getOperand(1);
43837 APInt DemandedLHS, DemandedRHS;
43838 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
43840 APInt LHSUndef, LHSZero;
43841 if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
43844 APInt RHSUndef, RHSZero;
43845 if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
43849 // TODO - pass on known zero/undef.
43851 // Aggressively peek through ops to get at the demanded elts.
43852 // TODO: Handle repeated operands.
43853 if (N0 != N1 && !DemandedElts.isAllOnes()) {
43854 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
43855 TLO.DAG, Depth + 1);
43856 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
43857 TLO.DAG, Depth + 1);
43858 if (NewN0 || NewN1) {
43859 NewN0 = NewN0 ? NewN0 : N0;
43860 NewN1 = NewN1 ? NewN1 : N1;
43861 return TLO.CombineTo(Op,
43862 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
43867 case X86ISD::VTRUNC:
43868 case X86ISD::VTRUNCS:
43869 case X86ISD::VTRUNCUS: {
43870 SDValue Src = Op.getOperand(0);
43871 MVT SrcVT = Src.getSimpleValueType();
43872 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
43873 APInt SrcUndef, SrcZero;
43874 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
43877 KnownZero = SrcZero.zextOrTrunc(NumElts);
43878 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
43881 case X86ISD::BLENDV: {
43882 APInt SelUndef, SelZero;
43883 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
43884 SelZero, TLO, Depth + 1))
43887 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
43888 APInt LHSUndef, LHSZero;
43889 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
43890 LHSZero, TLO, Depth + 1))
43893 APInt RHSUndef, RHSZero;
43894 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
43895 RHSZero, TLO, Depth + 1))
43898 KnownZero = LHSZero & RHSZero;
43899 KnownUndef = LHSUndef & RHSUndef;
43902 case X86ISD::VZEXT_MOVL: {
43903 // If upper demanded elements are already zero then we have nothing to do.
43904 SDValue Src = Op.getOperand(0);
43905 APInt DemandedUpperElts = DemandedElts;
43906 DemandedUpperElts.clearLowBits(1);
43907 if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
43908 return TLO.CombineTo(Op, Src);
43911 case X86ISD::VBROADCAST: {
43912 SDValue Src = Op.getOperand(0);
43913 MVT SrcVT = Src.getSimpleValueType();
43914 if (!SrcVT.isVector())
43916 // Don't bother broadcasting if we just need the 0'th element.
43917 if (DemandedElts == 1) {
43918 if (Src.getValueType() != VT)
43919 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
43921 return TLO.CombineTo(Op, Src);
43923 APInt SrcUndef, SrcZero;
43924 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
43925 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
43928 // Aggressively peek through src to get at the demanded elt.
43929 // TODO - we should do this for all target/faux shuffles ops.
43930 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
43931 Src, SrcElts, TLO.DAG, Depth + 1))
43932 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
43935 case X86ISD::VPERMV:
43936 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
43940 case X86ISD::PSHUFB:
43941 case X86ISD::VPERMV3:
43942 case X86ISD::VPERMILPV:
43943 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
43947 case X86ISD::VPPERM:
43948 case X86ISD::VPERMIL2:
43949 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
43955 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
43956 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
43957 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
43958 if ((VT.is256BitVector() || VT.is512BitVector()) &&
43959 DemandedElts.lshr(NumElts / 2) == 0) {
43960 unsigned SizeInBits = VT.getSizeInBits();
43961 unsigned ExtSizeInBits = SizeInBits / 2;
43963 // See if 512-bit ops only use the bottom 128-bits.
43964 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
43965 ExtSizeInBits = SizeInBits / 4;
43968 // Scalar broadcast.
43969 case X86ISD::VBROADCAST: {
43971 SDValue Src = Op.getOperand(0);
43972 if (Src.getValueSizeInBits() > ExtSizeInBits)
43973 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
43974 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
43975 ExtSizeInBits / VT.getScalarSizeInBits());
43976 SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
43977 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
43978 TLO.DAG, DL, ExtSizeInBits));
43980 case X86ISD::VBROADCAST_LOAD: {
43982 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
43983 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
43984 ExtSizeInBits / VT.getScalarSizeInBits());
43985 SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
43986 SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
43987 SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
43988 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
43989 MemIntr->getMemOperand());
43990 TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
43992 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
43993 TLO.DAG, DL, ExtSizeInBits));
43995 // Subvector broadcast.
43996 case X86ISD::SUBV_BROADCAST_LOAD: {
43997 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
43998 EVT MemVT = MemIntr->getMemoryVT();
43999 if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
44002 TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
44003 MemIntr->getBasePtr(), MemIntr->getMemOperand());
44004 TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
44006 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
44007 TLO.DAG, DL, ExtSizeInBits));
44008 } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
44010 EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
44011 ExtSizeInBits / VT.getScalarSizeInBits());
44012 if (SDValue BcstLd =
44013 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
44014 return TLO.CombineTo(Op,
44015 insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
44016 TLO.DAG, DL, ExtSizeInBits));
44020 // Byte shifts by immediate.
44021 case X86ISD::VSHLDQ:
44022 case X86ISD::VSRLDQ:
44023 // Shift by uniform.
44027 // Shift by immediate.
44028 case X86ISD::VSHLI:
44029 case X86ISD::VSRLI:
44030 case X86ISD::VSRAI: {
44033 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
44035 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
44036 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
44038 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
44039 return TLO.CombineTo(Op, Insert);
44041 case X86ISD::VPERMI: {
44042 // Simplify PERMPD/PERMQ to extract_subvector.
44043 // TODO: This should be done in shuffle combining.
44044 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
44045 SmallVector<int, 4> Mask;
44046 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
44047 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
44049 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
44050 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
44051 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
44052 return TLO.CombineTo(Op, Insert);
44057 case X86ISD::VPERM2X128: {
44058 // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
44060 unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
44062 return TLO.CombineTo(
44063 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
44064 unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
44065 unsigned SrcIdx = (LoMask & 0x2) >> 1;
44067 extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
44068 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
44070 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
44071 return TLO.CombineTo(Op, Insert);
44073 // Zero upper elements.
44074 case X86ISD::VZEXT_MOVL:
44075 // Target unary shuffles by immediate:
44076 case X86ISD::PSHUFD:
44077 case X86ISD::PSHUFLW:
44078 case X86ISD::PSHUFHW:
44079 case X86ISD::VPERMILPI:
44080 // (Non-Lane Crossing) Target Shuffles.
44081 case X86ISD::VPERMILPV:
44082 case X86ISD::VPERMIL2:
44083 case X86ISD::PSHUFB:
44084 case X86ISD::UNPCKL:
44085 case X86ISD::UNPCKH:
44086 case X86ISD::BLENDI:
44088 case X86ISD::PACKSS:
44089 case X86ISD::PACKUS:
44093 case X86ISD::FHADD:
44094 case X86ISD::FHSUB: {
44096 SmallVector<SDValue, 4> Ops;
44097 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
44098 SDValue SrcOp = Op.getOperand(i);
44099 EVT SrcVT = SrcOp.getValueType();
44100 assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
44101 "Unsupported vector size");
44102 Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
44106 MVT ExtVT = VT.getSimpleVT();
44107 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
44108 ExtSizeInBits / ExtVT.getScalarSizeInBits());
44109 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
44110 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
44112 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
44113 return TLO.CombineTo(Op, Insert);
44118 // For splats, unless we *only* demand the 0'th element,
44119 // stop attempts at simplification here, we aren't going to improve things,
44120 // this is better than any potential shuffle.
44121 if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
44124 // Get target/faux shuffle mask.
44125 APInt OpUndef, OpZero;
44126 SmallVector<int, 64> OpMask;
44127 SmallVector<SDValue, 2> OpInputs;
44128 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
44129 OpZero, TLO.DAG, Depth, false))
44132 // Shuffle inputs must be the same size as the result.
44133 if (OpMask.size() != (unsigned)NumElts ||
44134 llvm::any_of(OpInputs, [VT](SDValue V) {
44135 return VT.getSizeInBits() != V.getValueSizeInBits() ||
44136 !V.getValueType().isVector();
44140 KnownZero = OpZero;
44141 KnownUndef = OpUndef;
44143 // Check if shuffle mask can be simplified to undef/zero/identity.
44144 int NumSrcs = OpInputs.size();
44145 for (int i = 0; i != NumElts; ++i)
44146 if (!DemandedElts[i])
44147 OpMask[i] = SM_SentinelUndef;
44149 if (isUndefInRange(OpMask, 0, NumElts)) {
44150 KnownUndef.setAllBits();
44151 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
44153 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
44154 KnownZero.setAllBits();
44155 return TLO.CombineTo(
44156 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
44158 for (int Src = 0; Src != NumSrcs; ++Src)
44159 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
44160 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
44162 // Attempt to simplify inputs.
44163 for (int Src = 0; Src != NumSrcs; ++Src) {
44164 // TODO: Support inputs of different types.
44165 if (OpInputs[Src].getValueType() != VT)
44168 int Lo = Src * NumElts;
44169 APInt SrcElts = APInt::getZero(NumElts);
44170 for (int i = 0; i != NumElts; ++i)
44171 if (DemandedElts[i]) {
44172 int M = OpMask[i] - Lo;
44173 if (0 <= M && M < NumElts)
44177 // TODO - Propagate input undef/zero elts.
44178 APInt SrcUndef, SrcZero;
44179 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
44184 // If we don't demand all elements, then attempt to combine to a simpler
44186 // We need to convert the depth to something combineX86ShufflesRecursively
44187 // can handle - so pretend its Depth == 0 again, and reduce the max depth
44188 // to match. This prevents combineX86ShuffleChain from returning a
44189 // combined shuffle that's the same as the original root, causing an
44191 if (!DemandedElts.isAllOnes()) {
44192 assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
44194 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
44195 for (int i = 0; i != NumElts; ++i)
44196 if (DemandedElts[i])
44197 DemandedMask[i] = i;
44199 SDValue NewShuffle = combineX86ShufflesRecursively(
44200 {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
44201 /*HasVarMask*/ false,
44202 /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
44205 return TLO.CombineTo(Op, NewShuffle);
44211 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
44212 SDValue Op, const APInt &OriginalDemandedBits,
44213 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
44214 unsigned Depth) const {
44215 EVT VT = Op.getValueType();
44216 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
44217 unsigned Opc = Op.getOpcode();
44219 case X86ISD::VTRUNC: {
44221 SDValue Src = Op.getOperand(0);
44222 MVT SrcVT = Src.getSimpleValueType();
44224 // Simplify the input, using demanded bit information.
44225 APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
44226 APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
44227 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
44231 case X86ISD::PMULDQ:
44232 case X86ISD::PMULUDQ: {
44233 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
44234 KnownBits KnownLHS, KnownRHS;
44235 SDValue LHS = Op.getOperand(0);
44236 SDValue RHS = Op.getOperand(1);
44238 // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
44239 // FIXME: Can we bound this better?
44240 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
44241 APInt DemandedMaskLHS = APInt::getAllOnes(64);
44242 APInt DemandedMaskRHS = APInt::getAllOnes(64);
44244 bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
44245 if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
44246 DemandedMaskLHS = DemandedMask;
44247 if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
44248 DemandedMaskRHS = DemandedMask;
44250 if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
44251 KnownLHS, TLO, Depth + 1))
44253 if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
44254 KnownRHS, TLO, Depth + 1))
44257 // PMULUDQ(X,1) -> AND(X,(1<<32)-1) 'getZeroExtendInReg'.
44258 KnownRHS = KnownRHS.trunc(32);
44259 if (Opc == X86ISD::PMULUDQ && KnownRHS.isConstant() &&
44260 KnownRHS.getConstant().isOne()) {
44262 SDValue Mask = TLO.DAG.getConstant(DemandedMask, DL, VT);
44263 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, DL, VT, LHS, Mask));
44266 // Aggressively peek through ops to get at the demanded low bits.
44267 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
44268 LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
44269 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
44270 RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
44271 if (DemandedLHS || DemandedRHS) {
44272 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
44273 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
44274 return TLO.CombineTo(
44275 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
44279 case X86ISD::ANDNP: {
44281 SDValue Op0 = Op.getOperand(0);
44282 SDValue Op1 = Op.getOperand(1);
44284 if (SimplifyDemandedBits(Op1, OriginalDemandedBits, OriginalDemandedElts,
44285 Known, TLO, Depth + 1))
44287 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
44289 if (SimplifyDemandedBits(Op0, ~Known.Zero & OriginalDemandedBits,
44290 OriginalDemandedElts, Known2, TLO, Depth + 1))
44292 assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
44294 // If the RHS is a constant, see if we can simplify it.
44295 if (ShrinkDemandedConstant(Op, ~Known2.One & OriginalDemandedBits,
44296 OriginalDemandedElts, TLO))
44299 // ANDNP = (~Op0 & Op1);
44300 Known.One &= Known2.Zero;
44301 Known.Zero |= Known2.One;
44304 case X86ISD::VSHLI: {
44305 SDValue Op0 = Op.getOperand(0);
44307 unsigned ShAmt = Op.getConstantOperandVal(1);
44308 if (ShAmt >= BitWidth)
44311 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
44313 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
44314 // single shift. We can do this if the bottom bits (which are shifted
44315 // out) are never demanded.
44316 if (Op0.getOpcode() == X86ISD::VSRLI &&
44317 OriginalDemandedBits.countr_zero() >= ShAmt) {
44318 unsigned Shift2Amt = Op0.getConstantOperandVal(1);
44319 if (Shift2Amt < BitWidth) {
44320 int Diff = ShAmt - Shift2Amt;
44322 return TLO.CombineTo(Op, Op0.getOperand(0));
44324 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
44325 SDValue NewShift = TLO.DAG.getNode(
44326 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
44327 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
44328 return TLO.CombineTo(Op, NewShift);
44332 // If we are only demanding sign bits then we can use the shift source directly.
44333 unsigned NumSignBits =
44334 TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
44335 unsigned UpperDemandedBits = BitWidth - OriginalDemandedBits.countr_zero();
44336 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
44337 return TLO.CombineTo(Op, Op0);
44339 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
44343 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
44344 Known.Zero <<= ShAmt;
44345 Known.One <<= ShAmt;
44347 // Low bits known zero.
44348 Known.Zero.setLowBits(ShAmt);
44351 case X86ISD::VSRLI: {
44352 unsigned ShAmt = Op.getConstantOperandVal(1);
44353 if (ShAmt >= BitWidth)
44356 APInt DemandedMask = OriginalDemandedBits << ShAmt;
44358 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
44359 OriginalDemandedElts, Known, TLO, Depth + 1))
44362 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
44363 Known.Zero.lshrInPlace(ShAmt);
44364 Known.One.lshrInPlace(ShAmt);
44366 // High bits known zero.
44367 Known.Zero.setHighBits(ShAmt);
44370 case X86ISD::VSRAI: {
44371 SDValue Op0 = Op.getOperand(0);
44372 SDValue Op1 = Op.getOperand(1);
44374 unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
44375 if (ShAmt >= BitWidth)
44378 APInt DemandedMask = OriginalDemandedBits << ShAmt;
44380 // If we just want the sign bit then we don't need to shift it.
44381 if (OriginalDemandedBits.isSignMask())
44382 return TLO.CombineTo(Op, Op0);
44384 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
44385 if (Op0.getOpcode() == X86ISD::VSHLI &&
44386 Op.getOperand(1) == Op0.getOperand(1)) {
44387 SDValue Op00 = Op0.getOperand(0);
44388 unsigned NumSignBits =
44389 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
44390 if (ShAmt < NumSignBits)
44391 return TLO.CombineTo(Op, Op00);
44394 // If any of the demanded bits are produced by the sign extension, we also
44395 // demand the input sign bit.
44396 if (OriginalDemandedBits.countl_zero() < ShAmt)
44397 DemandedMask.setSignBit();
44399 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
44403 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
44404 Known.Zero.lshrInPlace(ShAmt);
44405 Known.One.lshrInPlace(ShAmt);
44407 // If the input sign bit is known to be zero, or if none of the top bits
44408 // are demanded, turn this into an unsigned shift right.
44409 if (Known.Zero[BitWidth - ShAmt - 1] ||
44410 OriginalDemandedBits.countl_zero() >= ShAmt)
44411 return TLO.CombineTo(
44412 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
44414 // High bits are known one.
44415 if (Known.One[BitWidth - ShAmt - 1])
44416 Known.One.setHighBits(ShAmt);
44419 case X86ISD::BLENDV: {
44420 SDValue Sel = Op.getOperand(0);
44421 SDValue LHS = Op.getOperand(1);
44422 SDValue RHS = Op.getOperand(2);
44424 APInt SignMask = APInt::getSignMask(BitWidth);
44425 SDValue NewSel = SimplifyMultipleUseDemandedBits(
44426 Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
44427 SDValue NewLHS = SimplifyMultipleUseDemandedBits(
44428 LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
44429 SDValue NewRHS = SimplifyMultipleUseDemandedBits(
44430 RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
44432 if (NewSel || NewLHS || NewRHS) {
44433 NewSel = NewSel ? NewSel : Sel;
44434 NewLHS = NewLHS ? NewLHS : LHS;
44435 NewRHS = NewRHS ? NewRHS : RHS;
44436 return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
44437 NewSel, NewLHS, NewRHS));
44441 case X86ISD::PEXTRB:
44442 case X86ISD::PEXTRW: {
44443 SDValue Vec = Op.getOperand(0);
44444 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
44445 MVT VecVT = Vec.getSimpleValueType();
44446 unsigned NumVecElts = VecVT.getVectorNumElements();
44448 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
44449 unsigned Idx = CIdx->getZExtValue();
44450 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
44452 // If we demand no bits from the vector then we must have demanded
44453 // bits from the implict zext - simplify to zero.
44454 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
44455 if (DemandedVecBits == 0)
44456 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
44458 APInt KnownUndef, KnownZero;
44459 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
44460 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
44461 KnownZero, TLO, Depth + 1))
44464 KnownBits KnownVec;
44465 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
44466 KnownVec, TLO, Depth + 1))
44469 if (SDValue V = SimplifyMultipleUseDemandedBits(
44470 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
44471 return TLO.CombineTo(
44472 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
44474 Known = KnownVec.zext(BitWidth);
44479 case X86ISD::PINSRB:
44480 case X86ISD::PINSRW: {
44481 SDValue Vec = Op.getOperand(0);
44482 SDValue Scl = Op.getOperand(1);
44483 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
44484 MVT VecVT = Vec.getSimpleValueType();
44486 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
44487 unsigned Idx = CIdx->getZExtValue();
44488 if (!OriginalDemandedElts[Idx])
44489 return TLO.CombineTo(Op, Vec);
44491 KnownBits KnownVec;
44492 APInt DemandedVecElts(OriginalDemandedElts);
44493 DemandedVecElts.clearBit(Idx);
44494 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
44495 KnownVec, TLO, Depth + 1))
44498 KnownBits KnownScl;
44499 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
44500 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
44501 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
44504 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
44505 Known = KnownVec.intersectWith(KnownScl);
44510 case X86ISD::PACKSS:
44511 // PACKSS saturates to MIN/MAX integer values. So if we just want the
44512 // sign bit then we can just ask for the source operands sign bit.
44513 // TODO - add known bits handling.
44514 if (OriginalDemandedBits.isSignMask()) {
44515 APInt DemandedLHS, DemandedRHS;
44516 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
44518 KnownBits KnownLHS, KnownRHS;
44519 APInt SignMask = APInt::getSignMask(BitWidth * 2);
44520 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
44521 KnownLHS, TLO, Depth + 1))
44523 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
44524 KnownRHS, TLO, Depth + 1))
44527 // Attempt to avoid multi-use ops if we don't need anything from them.
44528 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
44529 Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
44530 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
44531 Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
44532 if (DemandedOp0 || DemandedOp1) {
44533 SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
44534 SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
44535 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
44538 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
44540 case X86ISD::VBROADCAST: {
44541 SDValue Src = Op.getOperand(0);
44542 MVT SrcVT = Src.getSimpleValueType();
44543 APInt DemandedElts = APInt::getOneBitSet(
44544 SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
44545 if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
44548 // If we don't need the upper bits, attempt to narrow the broadcast source.
44549 // Don't attempt this on AVX512 as it might affect broadcast folding.
44550 // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
44551 if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
44552 OriginalDemandedBits.countl_zero() >= (BitWidth / 2) &&
44553 Src->hasOneUse()) {
44554 MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
44556 TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
44557 MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
44559 TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
44560 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
44564 case X86ISD::PCMPGT:
44565 // icmp sgt(0, R) == ashr(R, BitWidth-1).
44566 // iff we only need the sign bit then we can use R directly.
44567 if (OriginalDemandedBits.isSignMask() &&
44568 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
44569 return TLO.CombineTo(Op, Op.getOperand(1));
44571 case X86ISD::MOVMSK: {
44572 SDValue Src = Op.getOperand(0);
44573 MVT SrcVT = Src.getSimpleValueType();
44574 unsigned SrcBits = SrcVT.getScalarSizeInBits();
44575 unsigned NumElts = SrcVT.getVectorNumElements();
44577 // If we don't need the sign bits at all just return zero.
44578 if (OriginalDemandedBits.countr_zero() >= NumElts)
44579 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
44581 // See if we only demand bits from the lower 128-bit vector.
44582 if (SrcVT.is256BitVector() &&
44583 OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
44584 SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
44585 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
44588 // Only demand the vector elements of the sign bits we need.
44589 APInt KnownUndef, KnownZero;
44590 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
44591 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
44595 Known.Zero = KnownZero.zext(BitWidth);
44596 Known.Zero.setHighBits(BitWidth - NumElts);
44598 // MOVMSK only uses the MSB from each vector element.
44599 KnownBits KnownSrc;
44600 APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
44601 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
44605 if (KnownSrc.One[SrcBits - 1])
44606 Known.One.setLowBits(NumElts);
44607 else if (KnownSrc.Zero[SrcBits - 1])
44608 Known.Zero.setLowBits(NumElts);
44610 // Attempt to avoid multi-use os if we don't need anything from it.
44611 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
44612 Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
44613 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
44616 case X86ISD::TESTP: {
44617 SDValue Op0 = Op.getOperand(0);
44618 SDValue Op1 = Op.getOperand(1);
44619 MVT OpVT = Op0.getSimpleValueType();
44620 assert((OpVT.getVectorElementType() == MVT::f32 ||
44621 OpVT.getVectorElementType() == MVT::f64) &&
44622 "Illegal vector type for X86ISD::TESTP");
44624 // TESTPS/TESTPD only demands the sign bits of ALL the elements.
44625 KnownBits KnownSrc;
44626 APInt SignMask = APInt::getSignMask(OpVT.getScalarSizeInBits());
44627 bool AssumeSingleUse = (Op0 == Op1) && Op->isOnlyUserOf(Op0.getNode());
44628 return SimplifyDemandedBits(Op0, SignMask, KnownSrc, TLO, Depth + 1,
44629 AssumeSingleUse) ||
44630 SimplifyDemandedBits(Op1, SignMask, KnownSrc, TLO, Depth + 1,
44633 case X86ISD::BEXTR:
44634 case X86ISD::BEXTRI: {
44635 SDValue Op0 = Op.getOperand(0);
44636 SDValue Op1 = Op.getOperand(1);
44638 // Only bottom 16-bits of the control bits are required.
44639 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
44640 // NOTE: SimplifyDemandedBits won't do this for constants.
44641 uint64_t Val1 = Cst1->getZExtValue();
44642 uint64_t MaskedVal1 = Val1 & 0xFFFF;
44643 if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
44645 return TLO.CombineTo(
44646 Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
44647 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
44650 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
44651 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
44653 // If the length is 0, the result is 0.
44655 Known.setAllZero();
44659 if ((Shift + Length) <= BitWidth) {
44660 APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
44661 if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
44664 Known = Known.extractBits(Length, Shift);
44665 Known = Known.zextOrTrunc(BitWidth);
44669 assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
44671 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
44672 if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
44675 // If the length is 0, replace with 0.
44676 KnownBits LengthBits = Known1.extractBits(8, 8);
44677 if (LengthBits.isZero())
44678 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
44683 case X86ISD::PDEP: {
44684 SDValue Op0 = Op.getOperand(0);
44685 SDValue Op1 = Op.getOperand(1);
44687 unsigned DemandedBitsLZ = OriginalDemandedBits.countl_zero();
44688 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
44690 // If the demanded bits has leading zeroes, we don't demand those from the
44692 if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
44695 // The number of possible 1s in the mask determines the number of LSBs of
44696 // operand 0 used. Undemanded bits from the mask don't matter so filter
44697 // them before counting.
44699 uint64_t Count = (~Known.Zero & LoMask).popcount();
44700 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
44701 if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
44704 // Zeroes are retained from the mask, but not ones.
44705 Known.One.clearAllBits();
44706 // The result will have at least as many trailing zeros as the non-mask
44707 // operand since bits can only map to the same or higher bit position.
44708 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
44713 return TargetLowering::SimplifyDemandedBitsForTargetNode(
44714 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
44717 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
44718 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
44719 SelectionDAG &DAG, unsigned Depth) const {
44720 int NumElts = DemandedElts.getBitWidth();
44721 unsigned Opc = Op.getOpcode();
44722 EVT VT = Op.getValueType();
44725 case X86ISD::PINSRB:
44726 case X86ISD::PINSRW: {
44727 // If we don't demand the inserted element, return the base vector.
44728 SDValue Vec = Op.getOperand(0);
44729 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
44730 MVT VecVT = Vec.getSimpleValueType();
44731 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
44732 !DemandedElts[CIdx->getZExtValue()])
44736 case X86ISD::VSHLI: {
44737 // If we are only demanding sign bits then we can use the shift source
44739 SDValue Op0 = Op.getOperand(0);
44740 unsigned ShAmt = Op.getConstantOperandVal(1);
44741 unsigned BitWidth = DemandedBits.getBitWidth();
44742 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
44743 unsigned UpperDemandedBits = BitWidth - DemandedBits.countr_zero();
44744 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
44748 case X86ISD::VSRAI:
44749 // iff we only need the sign bit then we can use the source directly.
44750 // TODO: generalize where we only demand extended signbits.
44751 if (DemandedBits.isSignMask())
44752 return Op.getOperand(0);
44754 case X86ISD::PCMPGT:
44755 // icmp sgt(0, R) == ashr(R, BitWidth-1).
44756 // iff we only need the sign bit then we can use R directly.
44757 if (DemandedBits.isSignMask() &&
44758 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
44759 return Op.getOperand(1);
44761 case X86ISD::ANDNP: {
44762 // ANDNP = (~LHS & RHS);
44763 SDValue LHS = Op.getOperand(0);
44764 SDValue RHS = Op.getOperand(1);
44766 KnownBits LHSKnown = DAG.computeKnownBits(LHS, DemandedElts, Depth + 1);
44767 KnownBits RHSKnown = DAG.computeKnownBits(RHS, DemandedElts, Depth + 1);
44769 // If all of the demanded bits are known 0 on LHS and known 0 on RHS, then
44770 // the (inverted) LHS bits cannot contribute to the result of the 'andn' in
44771 // this context, so return RHS.
44772 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero))
44778 APInt ShuffleUndef, ShuffleZero;
44779 SmallVector<int, 16> ShuffleMask;
44780 SmallVector<SDValue, 2> ShuffleOps;
44781 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
44782 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
44783 // If all the demanded elts are from one operand and are inline,
44784 // then we can use the operand directly.
44785 int NumOps = ShuffleOps.size();
44786 if (ShuffleMask.size() == (unsigned)NumElts &&
44787 llvm::all_of(ShuffleOps, [VT](SDValue V) {
44788 return VT.getSizeInBits() == V.getValueSizeInBits();
44791 if (DemandedElts.isSubsetOf(ShuffleUndef))
44792 return DAG.getUNDEF(VT);
44793 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
44794 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
44796 // Bitmask that indicates which ops have only been accessed 'inline'.
44797 APInt IdentityOp = APInt::getAllOnes(NumOps);
44798 for (int i = 0; i != NumElts; ++i) {
44799 int M = ShuffleMask[i];
44800 if (!DemandedElts[i] || ShuffleUndef[i])
44802 int OpIdx = M / NumElts;
44803 int EltIdx = M % NumElts;
44804 if (M < 0 || EltIdx != i) {
44805 IdentityOp.clearAllBits();
44808 IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
44809 if (IdentityOp == 0)
44812 assert((IdentityOp == 0 || IdentityOp.popcount() == 1) &&
44813 "Multiple identity shuffles detected");
44815 if (IdentityOp != 0)
44816 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countr_zero()]);
44820 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
44821 Op, DemandedBits, DemandedElts, DAG, Depth);
44824 bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
44825 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
44826 bool PoisonOnly, unsigned Depth) const {
44827 unsigned EltsBits = Op.getScalarValueSizeInBits();
44828 unsigned NumElts = DemandedElts.getBitWidth();
44830 // TODO: Add more target shuffles.
44831 switch (Op.getOpcode()) {
44832 case X86ISD::PSHUFD:
44833 case X86ISD::VPERMILPI: {
44834 SmallVector<int, 8> Mask;
44835 DecodePSHUFMask(NumElts, EltsBits, Op.getConstantOperandVal(1), Mask);
44837 APInt DemandedSrcElts = APInt::getZero(NumElts);
44838 for (unsigned I = 0; I != NumElts; ++I)
44839 if (DemandedElts[I])
44840 DemandedSrcElts.setBit(Mask[I]);
44842 return DAG.isGuaranteedNotToBeUndefOrPoison(
44843 Op.getOperand(0), DemandedSrcElts, PoisonOnly, Depth + 1);
44846 return TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
44847 Op, DemandedElts, DAG, PoisonOnly, Depth);
44850 bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
44851 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
44852 bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
44854 // TODO: Add more target shuffles.
44855 switch (Op.getOpcode()) {
44856 case X86ISD::PSHUFD:
44857 case X86ISD::VPERMILPI:
44860 return TargetLowering::canCreateUndefOrPoisonForTargetNode(
44861 Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
44864 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
44865 const APInt &DemandedElts,
44867 const SelectionDAG &DAG,
44868 unsigned Depth) const {
44869 unsigned NumElts = DemandedElts.getBitWidth();
44870 unsigned Opc = Op.getOpcode();
44873 case X86ISD::VBROADCAST:
44874 case X86ISD::VBROADCAST_LOAD:
44875 UndefElts = APInt::getZero(NumElts);
44879 return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
44883 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
44884 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
44885 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
44886 bool AllowTruncate) {
44887 switch (Src.getOpcode()) {
44888 case ISD::TRUNCATE:
44889 if (!AllowTruncate)
44893 return Src.getOperand(0).getValueSizeInBits() == Size;
44897 return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
44898 checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
44901 return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
44902 checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) &&
44903 checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate);
44904 case ISD::BUILD_VECTOR:
44905 return ISD::isBuildVectorAllZeros(Src.getNode()) ||
44906 ISD::isBuildVectorAllOnes(Src.getNode());
44911 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
44912 static unsigned getAltBitOpcode(unsigned Opcode) {
44914 case ISD::AND: return X86ISD::FAND;
44915 case ISD::OR: return X86ISD::FOR;
44916 case ISD::XOR: return X86ISD::FXOR;
44917 case X86ISD::ANDNP: return X86ISD::FANDN;
44919 llvm_unreachable("Unknown bitwise opcode");
44922 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
44923 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
44925 EVT SrcVT = Src.getValueType();
44926 if (SrcVT != MVT::v4i1)
44929 switch (Src.getOpcode()) {
44931 if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
44932 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
44933 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
44934 SDValue Op0 = Src.getOperand(0);
44935 if (ISD::isNormalLoad(Op0.getNode()))
44936 return DAG.getBitcast(MVT::v4f32, Op0);
44937 if (Op0.getOpcode() == ISD::BITCAST &&
44938 Op0.getOperand(0).getValueType() == MVT::v4f32)
44939 return Op0.getOperand(0);
44945 SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
44946 SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
44948 return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
44956 // Helper to push sign extension of vXi1 SETCC result through bitops.
44957 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
44958 SDValue Src, const SDLoc &DL) {
44959 switch (Src.getOpcode()) {
44961 case ISD::TRUNCATE:
44962 case ISD::BUILD_VECTOR:
44963 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
44967 return DAG.getNode(
44968 Src.getOpcode(), DL, SExtVT,
44969 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
44970 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
44973 return DAG.getSelect(
44974 DL, SExtVT, Src.getOperand(0),
44975 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL),
44976 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(2), DL));
44978 llvm_unreachable("Unexpected node type for vXi1 sign extension");
44981 // Try to match patterns such as
44982 // (i16 bitcast (v16i1 x))
44984 // (i16 movmsk (16i8 sext (v16i1 x)))
44985 // before the illegal vector is scalarized on subtargets that don't have legal
44987 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
44989 const X86Subtarget &Subtarget) {
44990 EVT SrcVT = Src.getValueType();
44991 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
44994 // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
44995 // legalization destroys the v4i32 type.
44996 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
44997 if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
44998 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
44999 DAG.getBitcast(MVT::v4f32, V));
45000 return DAG.getZExtOrTrunc(V, DL, VT);
45004 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
45005 // movmskb even with avx512. This will be better than truncating to vXi1 and
45006 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
45007 // vpcmpeqb/vpcmpgtb.
45008 bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
45009 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
45010 Src.getOperand(0).getValueType() == MVT::v32i8 ||
45011 Src.getOperand(0).getValueType() == MVT::v64i8);
45013 // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
45014 // directly with vpmovmskb/vmovmskps/vmovmskpd.
45015 if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
45016 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
45017 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
45018 EVT CmpVT = Src.getOperand(0).getValueType();
45019 EVT EltVT = CmpVT.getVectorElementType();
45020 if (CmpVT.getSizeInBits() <= 256 &&
45021 (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
45022 PreferMovMsk = true;
45025 // With AVX512 vxi1 types are legal and we prefer using k-regs.
45026 // MOVMSK is supported in SSE2 or later.
45027 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
45030 // If the upper ops of a concatenation are undef, then try to bitcast the
45031 // lower op and extend.
45032 SmallVector<SDValue, 4> SubSrcOps;
45033 if (collectConcatOps(Src.getNode(), SubSrcOps, DAG) &&
45034 SubSrcOps.size() >= 2) {
45035 SDValue LowerOp = SubSrcOps[0];
45036 ArrayRef<SDValue> UpperOps(std::next(SubSrcOps.begin()), SubSrcOps.end());
45037 if (LowerOp.getOpcode() == ISD::SETCC &&
45038 all_of(UpperOps, [](SDValue Op) { return Op.isUndef(); })) {
45039 EVT SubVT = VT.getIntegerVT(
45040 *DAG.getContext(), LowerOp.getValueType().getVectorMinNumElements());
45041 if (SDValue V = combineBitcastvxi1(DAG, SubVT, LowerOp, DL, Subtarget)) {
45042 EVT IntVT = VT.getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
45043 return DAG.getBitcast(VT, DAG.getNode(ISD::ANY_EXTEND, DL, IntVT, V));
45048 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
45049 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
45050 // v8i16 and v16i16.
45051 // For these two cases, we can shuffle the upper element bytes to a
45052 // consecutive sequence at the start of the vector and treat the results as
45053 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
45054 // for v16i16 this is not the case, because the shuffle is expensive, so we
45055 // avoid sign-extending to this type entirely.
45056 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
45057 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
45059 bool PropagateSExt = false;
45060 switch (SrcVT.getSimpleVT().SimpleTy) {
45064 SExtVT = MVT::v2i64;
45067 SExtVT = MVT::v4i32;
45068 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
45069 // sign-extend to a 256-bit operation to avoid truncation.
45070 if (Subtarget.hasAVX() &&
45071 checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
45072 SExtVT = MVT::v4i64;
45073 PropagateSExt = true;
45077 SExtVT = MVT::v8i16;
45078 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
45079 // sign-extend to a 256-bit operation to match the compare.
45080 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
45081 // 256-bit because the shuffle is cheaper than sign extending the result of
45083 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
45084 checkBitcastSrcVectorSize(Src, 512, true))) {
45085 SExtVT = MVT::v8i32;
45086 PropagateSExt = true;
45090 SExtVT = MVT::v16i8;
45091 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
45092 // it is not profitable to sign-extend to 256-bit because this will
45093 // require an extra cross-lane shuffle which is more expensive than
45094 // truncating the result of the compare to 128-bits.
45097 SExtVT = MVT::v32i8;
45100 // If we have AVX512F, but not AVX512BW and the input is truncated from
45101 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
45102 if (Subtarget.hasAVX512()) {
45103 if (Subtarget.hasBWI())
45105 SExtVT = MVT::v64i8;
45108 // Split if this is a <64 x i8> comparison result.
45109 if (checkBitcastSrcVectorSize(Src, 512, false)) {
45110 SExtVT = MVT::v64i8;
45116 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
45117 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
45119 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
45120 V = getPMOVMSKB(DL, V, DAG, Subtarget);
45122 if (SExtVT == MVT::v8i16) {
45123 V = widenSubVector(V, false, Subtarget, DAG, DL, 256);
45124 V = DAG.getNode(ISD::TRUNCATE, DL, MVT::v16i8, V);
45126 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
45130 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
45131 V = DAG.getZExtOrTrunc(V, DL, IntVT);
45132 return DAG.getBitcast(VT, V);
45135 // Convert a vXi1 constant build vector to the same width scalar integer.
45136 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
45137 EVT SrcVT = Op.getValueType();
45138 assert(SrcVT.getVectorElementType() == MVT::i1 &&
45139 "Expected a vXi1 vector");
45140 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
45141 "Expected a constant build vector");
45143 APInt Imm(SrcVT.getVectorNumElements(), 0);
45144 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
45145 SDValue In = Op.getOperand(Idx);
45146 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
45149 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
45150 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
45153 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
45154 TargetLowering::DAGCombinerInfo &DCI,
45155 const X86Subtarget &Subtarget) {
45156 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
45158 if (!DCI.isBeforeLegalizeOps())
45161 // Only do this if we have k-registers.
45162 if (!Subtarget.hasAVX512())
45165 EVT DstVT = N->getValueType(0);
45166 SDValue Op = N->getOperand(0);
45167 EVT SrcVT = Op.getValueType();
45169 if (!Op.hasOneUse())
45172 // Look for logic ops.
45173 if (Op.getOpcode() != ISD::AND &&
45174 Op.getOpcode() != ISD::OR &&
45175 Op.getOpcode() != ISD::XOR)
45178 // Make sure we have a bitcast between mask registers and a scalar type.
45179 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
45180 DstVT.isScalarInteger()) &&
45181 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
45182 SrcVT.isScalarInteger()))
45185 SDValue LHS = Op.getOperand(0);
45186 SDValue RHS = Op.getOperand(1);
45188 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
45189 LHS.getOperand(0).getValueType() == DstVT)
45190 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
45191 DAG.getBitcast(DstVT, RHS));
45193 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
45194 RHS.getOperand(0).getValueType() == DstVT)
45195 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
45196 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
45198 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
45199 // Most of these have to move a constant from the scalar domain anyway.
45200 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
45201 RHS = combinevXi1ConstantToInteger(RHS, DAG);
45202 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
45203 DAG.getBitcast(DstVT, LHS), RHS);
45209 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
45210 const X86Subtarget &Subtarget) {
45212 unsigned NumElts = BV->getNumOperands();
45213 SDValue Splat = BV->getSplatValue();
45215 // Build MMX element from integer GPR or SSE float values.
45216 auto CreateMMXElement = [&](SDValue V) {
45218 return DAG.getUNDEF(MVT::x86mmx);
45219 if (V.getValueType().isFloatingPoint()) {
45220 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
45221 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
45222 V = DAG.getBitcast(MVT::v2i64, V);
45223 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
45225 V = DAG.getBitcast(MVT::i32, V);
45227 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
45229 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
45232 // Convert build vector ops to MMX data in the bottom elements.
45233 SmallVector<SDValue, 8> Ops;
45235 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45237 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
45239 if (Splat.isUndef())
45240 return DAG.getUNDEF(MVT::x86mmx);
45242 Splat = CreateMMXElement(Splat);
45244 if (Subtarget.hasSSE1()) {
45245 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
45247 Splat = DAG.getNode(
45248 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
45249 DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
45250 TLI.getPointerTy(DAG.getDataLayout())),
45253 // Use PSHUFW to repeat 16-bit elements.
45254 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
45255 return DAG.getNode(
45256 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
45257 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
45258 TLI.getPointerTy(DAG.getDataLayout())),
45259 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
45261 Ops.append(NumElts, Splat);
45263 for (unsigned i = 0; i != NumElts; ++i)
45264 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
45267 // Use tree of PUNPCKLs to build up general MMX vector.
45268 while (Ops.size() > 1) {
45269 unsigned NumOps = Ops.size();
45270 unsigned IntrinOp =
45271 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
45272 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
45273 : Intrinsic::x86_mmx_punpcklbw));
45274 SDValue Intrin = DAG.getTargetConstant(
45275 IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
45276 for (unsigned i = 0; i != NumOps; i += 2)
45277 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
45278 Ops[i], Ops[i + 1]);
45279 Ops.resize(NumOps / 2);
45285 // Recursive function that attempts to find if a bool vector node was originally
45286 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
45287 // integer. If so, replace the scalar ops with bool vector equivalents back down
45289 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
45291 const X86Subtarget &Subtarget) {
45292 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45293 unsigned Opc = V.getOpcode();
45295 case ISD::BITCAST: {
45296 // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
45297 SDValue Src = V.getOperand(0);
45298 EVT SrcVT = Src.getValueType();
45299 if (SrcVT.isVector() || SrcVT.isFloatingPoint())
45300 return DAG.getBitcast(VT, Src);
45303 case ISD::TRUNCATE: {
45304 // If we find a suitable source, a truncated scalar becomes a subvector.
45305 SDValue Src = V.getOperand(0);
45307 EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
45308 if (TLI.isTypeLegal(NewSrcVT))
45310 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
45311 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
45312 DAG.getIntPtrConstant(0, DL));
45315 case ISD::ANY_EXTEND:
45316 case ISD::ZERO_EXTEND: {
45317 // If we find a suitable source, an extended scalar becomes a subvector.
45318 SDValue Src = V.getOperand(0);
45319 EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
45320 Src.getScalarValueSizeInBits());
45321 if (TLI.isTypeLegal(NewSrcVT))
45323 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
45324 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
45325 Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
45326 : DAG.getConstant(0, DL, VT),
45327 N0, DAG.getIntPtrConstant(0, DL));
45331 // If we find suitable sources, we can just move an OR to the vector domain.
45332 SDValue Src0 = V.getOperand(0);
45333 SDValue Src1 = V.getOperand(1);
45334 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
45335 if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
45336 return DAG.getNode(Opc, DL, VT, N0, N1);
45340 // If we find a suitable source, a SHL becomes a KSHIFTL.
45341 SDValue Src0 = V.getOperand(0);
45342 if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
45343 ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
45346 if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
45347 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
45348 return DAG.getNode(
45349 X86ISD::KSHIFTL, DL, VT, N0,
45350 DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
45357 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
45358 TargetLowering::DAGCombinerInfo &DCI,
45359 const X86Subtarget &Subtarget) {
45360 SDValue N0 = N->getOperand(0);
45361 EVT VT = N->getValueType(0);
45362 EVT SrcVT = N0.getValueType();
45363 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45365 // Try to match patterns such as
45366 // (i16 bitcast (v16i1 x))
45368 // (i16 movmsk (16i8 sext (v16i1 x)))
45369 // before the setcc result is scalarized on subtargets that don't have legal
45371 if (DCI.isBeforeLegalize()) {
45373 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
45376 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
45377 // type, widen both sides to avoid a trip through memory.
45378 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
45379 Subtarget.hasAVX512()) {
45380 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
45381 N0 = DAG.getBitcast(MVT::v8i1, N0);
45382 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
45383 DAG.getIntPtrConstant(0, dl));
45386 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
45387 // type, widen both sides to avoid a trip through memory.
45388 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
45389 Subtarget.hasAVX512()) {
45390 // Use zeros for the widening if we already have some zeroes. This can
45391 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
45393 // FIXME: It might make sense to detect a concat_vectors with a mix of
45394 // zeroes and undef and turn it into insert_subvector for i1 vectors as
45395 // a separate combine. What we can't do is canonicalize the operands of
45396 // such a concat or we'll get into a loop with SimplifyDemandedBits.
45397 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
45398 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
45399 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
45400 SrcVT = LastOp.getValueType();
45401 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
45402 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
45403 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
45404 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
45405 N0 = DAG.getBitcast(MVT::i8, N0);
45406 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
45410 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
45411 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
45413 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
45414 N0 = DAG.getBitcast(MVT::i8, N0);
45415 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
45418 // If we're bitcasting from iX to vXi1, see if the integer originally
45419 // began as a vXi1 and whether we can remove the bitcast entirely.
45420 if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
45421 SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
45423 combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
45428 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
45429 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
45430 // due to insert_subvector legalization on KNL. By promoting the copy to i16
45431 // we can help with known bits propagation from the vXi1 domain to the
45433 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
45434 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45435 N0.getOperand(0).getValueType() == MVT::v16i1 &&
45436 isNullConstant(N0.getOperand(1)))
45437 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
45438 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
45440 // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
45441 // and the vbroadcast_load are both integer or both fp. In some cases this
45442 // will remove the bitcast entirely.
45443 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
45444 VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
45445 auto *BCast = cast<MemIntrinsicSDNode>(N0);
45446 unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
45447 unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
45448 // Don't swap i8/i16 since don't have fp types that size.
45449 if (MemSize >= 32) {
45450 MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
45451 : MVT::getIntegerVT(MemSize);
45452 MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
45453 : MVT::getIntegerVT(SrcVTSize);
45454 LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
45456 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
45457 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
45459 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
45460 MemVT, BCast->getMemOperand());
45461 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
45462 return DAG.getBitcast(VT, ResNode);
45466 // Since MMX types are special and don't usually play with other vector types,
45467 // it's better to handle them early to be sure we emit efficient code by
45468 // avoiding store-load conversions.
45469 if (VT == MVT::x86mmx) {
45470 // Detect MMX constant vectors.
45472 SmallVector<APInt, 1> EltBits;
45473 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
45475 // Handle zero-extension of i32 with MOVD.
45476 if (EltBits[0].countl_zero() >= 32)
45477 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
45478 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
45479 // Else, bitcast to a double.
45480 // TODO - investigate supporting sext 32-bit immediates on x86_64.
45481 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
45482 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
45485 // Detect bitcasts to x86mmx low word.
45486 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
45487 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
45488 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
45489 bool LowUndef = true, AllUndefOrZero = true;
45490 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
45491 SDValue Op = N0.getOperand(i);
45492 LowUndef &= Op.isUndef() || (i >= e/2);
45493 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
45495 if (AllUndefOrZero) {
45496 SDValue N00 = N0.getOperand(0);
45498 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
45499 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
45500 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
45504 // Detect bitcasts of 64-bit build vectors and convert to a
45505 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
45507 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
45508 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
45509 SrcVT == MVT::v8i8))
45510 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
45512 // Detect bitcasts between element or subvector extraction to x86mmx.
45513 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
45514 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
45515 isNullConstant(N0.getOperand(1))) {
45516 SDValue N00 = N0.getOperand(0);
45517 if (N00.getValueType().is128BitVector())
45518 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
45519 DAG.getBitcast(MVT::v2i64, N00));
45522 // Detect bitcasts from FP_TO_SINT to x86mmx.
45523 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
45525 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
45526 DAG.getUNDEF(MVT::v2i32));
45527 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
45528 DAG.getBitcast(MVT::v2i64, Res));
45532 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
45533 // most of these to scalar anyway.
45534 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
45535 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
45536 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
45537 return combinevXi1ConstantToInteger(N0, DAG);
45540 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
45541 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
45542 isa<ConstantSDNode>(N0)) {
45543 auto *C = cast<ConstantSDNode>(N0);
45544 if (C->isAllOnes())
45545 return DAG.getConstant(1, SDLoc(N0), VT);
45547 return DAG.getConstant(0, SDLoc(N0), VT);
45550 // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
45551 // Turn it into a sign bit compare that produces a k-register. This avoids
45552 // a trip through a GPR.
45553 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
45554 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
45555 isPowerOf2_32(VT.getVectorNumElements())) {
45556 unsigned NumElts = VT.getVectorNumElements();
45559 // Peek through truncate.
45560 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
45561 Src = N0.getOperand(0);
45563 if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
45564 SDValue MovmskIn = Src.getOperand(0);
45565 MVT MovmskVT = MovmskIn.getSimpleValueType();
45566 unsigned MovMskElts = MovmskVT.getVectorNumElements();
45568 // We allow extra bits of the movmsk to be used since they are known zero.
45569 // We can't convert a VPMOVMSKB without avx512bw.
45570 if (MovMskElts <= NumElts &&
45571 (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
45572 EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
45573 MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
45575 MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
45576 SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
45577 DAG.getConstant(0, dl, IntVT), ISD::SETLT);
45578 if (EVT(CmpVT) == VT)
45581 // Pad with zeroes up to original VT to replace the zeroes that were
45582 // being used from the MOVMSK.
45583 unsigned NumConcats = NumElts / MovMskElts;
45584 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
45586 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
45591 // Try to remove bitcasts from input and output of mask arithmetic to
45592 // remove GPR<->K-register crossings.
45593 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
45596 // Convert a bitcasted integer logic operation that has one bitcasted
45597 // floating-point operand into a floating-point logic operation. This may
45598 // create a load of a constant, but that is cheaper than materializing the
45599 // constant in an integer register and transferring it to an SSE register or
45600 // transferring the SSE operand to integer register and back.
45602 switch (N0.getOpcode()) {
45603 case ISD::AND: FPOpcode = X86ISD::FAND; break;
45604 case ISD::OR: FPOpcode = X86ISD::FOR; break;
45605 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
45606 default: return SDValue();
45609 // Check if we have a bitcast from another integer type as well.
45610 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
45611 (Subtarget.hasSSE2() && VT == MVT::f64) ||
45612 (Subtarget.hasFP16() && VT == MVT::f16) ||
45613 (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
45614 TLI.isTypeLegal(VT))))
45617 SDValue LogicOp0 = N0.getOperand(0);
45618 SDValue LogicOp1 = N0.getOperand(1);
45621 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
45622 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
45623 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
45624 LogicOp0.getOperand(0).getValueType() == VT &&
45625 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
45626 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
45627 unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
45628 return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
45630 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
45631 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
45632 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
45633 LogicOp1.getOperand(0).getValueType() == VT &&
45634 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
45635 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
45636 unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
45637 return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
45643 // (mul (zext a), (sext, b))
45644 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
45646 Op0 = Mul.getOperand(0);
45647 Op1 = Mul.getOperand(1);
45649 // The operand1 should be signed extend
45650 if (Op0.getOpcode() == ISD::SIGN_EXTEND)
45651 std::swap(Op0, Op1);
45653 auto IsFreeTruncation = [](SDValue &Op) -> bool {
45654 if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
45655 Op.getOpcode() == ISD::SIGN_EXTEND) &&
45656 Op.getOperand(0).getScalarValueSizeInBits() <= 8)
45659 auto *BV = dyn_cast<BuildVectorSDNode>(Op);
45660 return (BV && BV->isConstant());
45663 // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
45664 // value, we need to check Op0 is zero extended value. Op1 should be signed
45665 // value, so we just check the signed bits.
45666 if ((IsFreeTruncation(Op0) &&
45667 DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
45668 (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
45674 // Given a ABS node, detect the following pattern:
45675 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
45676 // This is useful as it is the input into a SAD pattern.
45677 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
45678 SDValue AbsOp1 = Abs->getOperand(0);
45679 if (AbsOp1.getOpcode() != ISD::SUB)
45682 Op0 = AbsOp1.getOperand(0);
45683 Op1 = AbsOp1.getOperand(1);
45685 // Check if the operands of the sub are zero-extended from vectors of i8.
45686 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
45687 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
45688 Op1.getOpcode() != ISD::ZERO_EXTEND ||
45689 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
45695 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
45696 unsigned &LogBias, const SDLoc &DL,
45697 const X86Subtarget &Subtarget) {
45698 // Extend or truncate to MVT::i8 first.
45700 MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
45701 LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
45702 RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
45704 // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
45705 // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
45706 // The src A, B element type is i8, but the dst C element type is i32.
45707 // When we calculate the reduce stage, we use src vector type vXi8 for it
45708 // so we need logbias 2 to avoid extra 2 stages.
45711 unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
45712 if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
45713 RegSize = std::max(512u, RegSize);
45715 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
45716 // fill in the missing vector elements with 0.
45717 unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
45718 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
45720 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
45721 SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
45723 SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
45725 // Actually build the DotProduct, split as 256/512 bits for
45726 // AVXVNNI/AVX512VNNI.
45727 auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45728 ArrayRef<SDValue> Ops) {
45729 MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
45730 return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
45732 MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
45733 SDValue Zero = DAG.getConstant(0, DL, DpVT);
45735 return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
45739 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
45741 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
45742 const SDValue &Zext1, const SDLoc &DL,
45743 const X86Subtarget &Subtarget) {
45744 // Find the appropriate width for the PSADBW.
45745 EVT InVT = Zext0.getOperand(0).getValueType();
45746 unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
45748 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
45749 // fill in the missing vector elements with 0.
45750 unsigned NumConcat = RegSize / InVT.getSizeInBits();
45751 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
45752 Ops[0] = Zext0.getOperand(0);
45753 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
45754 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
45755 Ops[0] = Zext1.getOperand(0);
45756 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
45758 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
45759 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45760 ArrayRef<SDValue> Ops) {
45761 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
45762 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
45764 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
45765 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
45769 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
45771 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
45772 const X86Subtarget &Subtarget) {
45773 // Bail without SSE41.
45774 if (!Subtarget.hasSSE41())
45777 EVT ExtractVT = Extract->getValueType(0);
45778 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
45781 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
45782 ISD::NodeType BinOp;
45783 SDValue Src = DAG.matchBinOpReduction(
45784 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
45788 EVT SrcVT = Src.getValueType();
45789 EVT SrcSVT = SrcVT.getScalarType();
45790 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
45794 SDValue MinPos = Src;
45796 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
45797 while (SrcVT.getSizeInBits() > 128) {
45799 std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
45800 SrcVT = Lo.getValueType();
45801 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
45803 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
45804 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
45805 "Unexpected value type");
45807 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
45808 // to flip the value accordingly.
45810 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
45811 if (BinOp == ISD::SMAX)
45812 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
45813 else if (BinOp == ISD::SMIN)
45814 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
45815 else if (BinOp == ISD::UMAX)
45816 Mask = DAG.getAllOnesConstant(DL, SrcVT);
45819 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
45821 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
45822 // shuffling each upper element down and insert zeros. This means that the
45823 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
45824 // ready for the PHMINPOS.
45825 if (ExtractVT == MVT::i8) {
45826 SDValue Upper = DAG.getVectorShuffle(
45827 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
45828 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
45829 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
45832 // Perform the PHMINPOS on a v8i16 vector,
45833 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
45834 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
45835 MinPos = DAG.getBitcast(SrcVT, MinPos);
45838 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
45840 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
45841 DAG.getIntPtrConstant(0, DL));
45844 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
45845 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
45846 const X86Subtarget &Subtarget) {
45847 // Bail without SSE2.
45848 if (!Subtarget.hasSSE2())
45851 EVT ExtractVT = Extract->getValueType(0);
45852 unsigned BitWidth = ExtractVT.getSizeInBits();
45853 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
45854 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
45857 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
45858 ISD::NodeType BinOp;
45859 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
45860 if (!Match && ExtractVT == MVT::i1)
45861 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
45865 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
45866 // which we can't support here for now.
45867 if (Match.getScalarValueSizeInBits() != BitWidth)
45872 EVT MatchVT = Match.getValueType();
45873 unsigned NumElts = MatchVT.getVectorNumElements();
45874 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
45875 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45876 LLVMContext &Ctx = *DAG.getContext();
45878 if (ExtractVT == MVT::i1) {
45879 // Special case for (pre-legalization) vXi1 reductions.
45880 if (NumElts > 64 || !isPowerOf2_32(NumElts))
45882 if (Match.getOpcode() == ISD::SETCC) {
45883 ISD::CondCode CC = cast<CondCodeSDNode>(Match.getOperand(2))->get();
45884 if ((BinOp == ISD::AND && CC == ISD::CondCode::SETEQ) ||
45885 (BinOp == ISD::OR && CC == ISD::CondCode::SETNE)) {
45886 // For all_of(setcc(x,y,eq)) - use (iX)x == (iX)y.
45887 // For any_of(setcc(x,y,ne)) - use (iX)x != (iX)y.
45888 X86::CondCode X86CC;
45889 SDValue LHS = DAG.getFreeze(Match.getOperand(0));
45890 SDValue RHS = DAG.getFreeze(Match.getOperand(1));
45891 APInt Mask = APInt::getAllOnes(LHS.getScalarValueSizeInBits());
45892 if (SDValue V = LowerVectorAllEqual(DL, LHS, RHS, CC, Mask, Subtarget,
45894 return DAG.getNode(ISD::TRUNCATE, DL, ExtractVT,
45895 getSETCC(X86CC, V, DL, DAG));
45898 if (TLI.isTypeLegal(MatchVT)) {
45899 // If this is a legal AVX512 predicate type then we can just bitcast.
45900 EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
45901 Movmsk = DAG.getBitcast(MovmskVT, Match);
45903 // Use combineBitcastvxi1 to create the MOVMSK.
45904 while (NumElts > MaxElts) {
45906 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
45907 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
45910 EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
45911 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
45915 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
45917 // FIXME: Better handling of k-registers or 512-bit vectors?
45918 unsigned MatchSizeInBits = Match.getValueSizeInBits();
45919 if (!(MatchSizeInBits == 128 ||
45920 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
45923 // Make sure this isn't a vector of 1 element. The perf win from using
45924 // MOVMSK diminishes with less elements in the reduction, but it is
45925 // generally better to get the comparison over to the GPRs as soon as
45926 // possible to reduce the number of vector ops.
45927 if (Match.getValueType().getVectorNumElements() < 2)
45930 // Check that we are extracting a reduction of all sign bits.
45931 if (DAG.ComputeNumSignBits(Match) != BitWidth)
45934 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
45936 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
45937 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
45938 MatchSizeInBits = Match.getValueSizeInBits();
45941 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
45943 if (64 == BitWidth || 32 == BitWidth)
45944 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
45945 MatchSizeInBits / BitWidth);
45947 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
45949 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
45950 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
45951 NumElts = MaskSrcVT.getVectorNumElements();
45953 assert((NumElts <= 32 || NumElts == 64) &&
45954 "Not expecting more than 64 elements");
45956 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
45957 if (BinOp == ISD::XOR) {
45958 // parity -> (PARITY(MOVMSK X))
45959 SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
45960 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
45964 ISD::CondCode CondCode;
45965 if (BinOp == ISD::OR) {
45966 // any_of -> MOVMSK != 0
45967 CmpC = DAG.getConstant(0, DL, CmpVT);
45968 CondCode = ISD::CondCode::SETNE;
45970 // all_of -> MOVMSK == ((1 << NumElts) - 1)
45971 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
45973 CondCode = ISD::CondCode::SETEQ;
45976 // The setcc produces an i8 of 0/1, so extend that to the result width and
45977 // negate to get the final 0/-1 mask value.
45978 EVT SetccVT = TLI.getSetCCResultType(DAG.getDataLayout(), Ctx, CmpVT);
45979 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
45980 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
45981 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
45982 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
45985 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
45986 const X86Subtarget &Subtarget) {
45987 if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
45990 EVT ExtractVT = Extract->getValueType(0);
45991 // Verify the type we're extracting is i32, as the output element type of
45992 // vpdpbusd is i32.
45993 if (ExtractVT != MVT::i32)
45996 EVT VT = Extract->getOperand(0).getValueType();
45997 if (!isPowerOf2_32(VT.getVectorNumElements()))
46000 // Match shuffle + add pyramid.
46001 ISD::NodeType BinOp;
46002 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
46004 // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
46005 // done by vpdpbusd compute a signed 16-bit product that will be sign extended
46006 // before adding into the accumulator.
46008 // We also need to verify that the multiply has at least 2x the number of bits
46009 // of the input. We shouldn't match
46010 // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
46011 // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
46012 // Root = Root.getOperand(0);
46014 // If there was a match, we want Root to be a mul.
46015 if (!Root || Root.getOpcode() != ISD::MUL)
46018 // Check whether we have an extend and mul pattern
46020 if (!detectExtMul(DAG, Root, LHS, RHS))
46023 // Create the dot product instruction.
46025 unsigned StageBias;
46026 SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
46028 // If the original vector was wider than 4 elements, sum over the results
46029 // in the DP vector.
46030 unsigned Stages = Log2_32(VT.getVectorNumElements());
46031 EVT DpVT = DP.getValueType();
46033 if (Stages > StageBias) {
46034 unsigned DpElems = DpVT.getVectorNumElements();
46036 for (unsigned i = Stages - StageBias; i > 0; --i) {
46037 SmallVector<int, 16> Mask(DpElems, -1);
46038 for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
46039 Mask[j] = MaskEnd + j;
46042 DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
46043 DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
46047 // Return the lowest ExtractSizeInBits bits.
46049 EVT::getVectorVT(*DAG.getContext(), ExtractVT,
46050 DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
46051 DP = DAG.getBitcast(ResVT, DP);
46052 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
46053 Extract->getOperand(1));
46056 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
46057 const X86Subtarget &Subtarget) {
46058 // PSADBW is only supported on SSE2 and up.
46059 if (!Subtarget.hasSSE2())
46062 EVT ExtractVT = Extract->getValueType(0);
46063 // Verify the type we're extracting is either i32 or i64.
46064 // FIXME: Could support other types, but this is what we have coverage for.
46065 if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
46068 EVT VT = Extract->getOperand(0).getValueType();
46069 if (!isPowerOf2_32(VT.getVectorNumElements()))
46072 // Match shuffle + add pyramid.
46073 ISD::NodeType BinOp;
46074 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
46076 // The operand is expected to be zero extended from i8
46077 // (verified in detectZextAbsDiff).
46078 // In order to convert to i64 and above, additional any/zero/sign
46079 // extend is expected.
46080 // The zero extend from 32 bit has no mathematical effect on the result.
46081 // Also the sign extend is basically zero extend
46082 // (extends the sign bit which is zero).
46083 // So it is correct to skip the sign/zero extend instruction.
46084 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
46085 Root.getOpcode() == ISD::ZERO_EXTEND ||
46086 Root.getOpcode() == ISD::ANY_EXTEND))
46087 Root = Root.getOperand(0);
46089 // If there was a match, we want Root to be a select that is the root of an
46090 // abs-diff pattern.
46091 if (!Root || Root.getOpcode() != ISD::ABS)
46094 // Check whether we have an abs-diff pattern feeding into the select.
46095 SDValue Zext0, Zext1;
46096 if (!detectZextAbsDiff(Root, Zext0, Zext1))
46099 // Create the SAD instruction.
46101 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
46103 // If the original vector was wider than 8 elements, sum over the results
46104 // in the SAD vector.
46105 unsigned Stages = Log2_32(VT.getVectorNumElements());
46106 EVT SadVT = SAD.getValueType();
46108 unsigned SadElems = SadVT.getVectorNumElements();
46110 for(unsigned i = Stages - 3; i > 0; --i) {
46111 SmallVector<int, 16> Mask(SadElems, -1);
46112 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
46113 Mask[j] = MaskEnd + j;
46116 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
46117 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
46121 unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
46122 // Return the lowest ExtractSizeInBits bits.
46123 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
46124 SadVT.getSizeInBits() / ExtractSizeInBits);
46125 SAD = DAG.getBitcast(ResVT, SAD);
46126 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
46127 Extract->getOperand(1));
46130 // Attempt to peek through a target shuffle and extract the scalar from the
46132 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
46133 TargetLowering::DAGCombinerInfo &DCI,
46134 const X86Subtarget &Subtarget) {
46135 if (DCI.isBeforeLegalizeOps())
46139 SDValue Src = N->getOperand(0);
46140 SDValue Idx = N->getOperand(1);
46142 EVT VT = N->getValueType(0);
46143 EVT SrcVT = Src.getValueType();
46144 EVT SrcSVT = SrcVT.getVectorElementType();
46145 unsigned SrcEltBits = SrcSVT.getSizeInBits();
46146 unsigned NumSrcElts = SrcVT.getVectorNumElements();
46148 // Don't attempt this for boolean mask vectors or unknown extraction indices.
46149 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
46152 const APInt &IdxC = N->getConstantOperandAPInt(1);
46153 if (IdxC.uge(NumSrcElts))
46156 SDValue SrcBC = peekThroughBitcasts(Src);
46158 // Handle extract(bitcast(broadcast(scalar_value))).
46159 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
46160 SDValue SrcOp = SrcBC.getOperand(0);
46161 EVT SrcOpVT = SrcOp.getValueType();
46162 if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
46163 (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
46164 unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
46165 unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
46166 // TODO support non-zero offsets.
46168 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
46169 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
46175 // If we're extracting a single element from a broadcast load and there are
46176 // no other users, just create a single load.
46177 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
46178 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
46179 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
46180 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
46181 VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
46182 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
46183 MemIntr->getBasePtr(),
46184 MemIntr->getPointerInfo(),
46185 MemIntr->getOriginalAlign(),
46186 MemIntr->getMemOperand()->getFlags());
46187 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
46192 // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
46193 // TODO: Move to DAGCombine?
46194 if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
46195 SrcBC.getValueType().isInteger() &&
46196 (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
46197 SrcBC.getScalarValueSizeInBits() ==
46198 SrcBC.getOperand(0).getValueSizeInBits()) {
46199 unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
46200 if (IdxC.ult(Scale)) {
46201 unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
46202 SDValue Scl = SrcBC.getOperand(0);
46203 EVT SclVT = Scl.getValueType();
46205 Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
46206 DAG.getShiftAmountConstant(Offset, SclVT, dl));
46208 Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
46209 Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
46214 // Handle extract(truncate(x)) for 0'th index.
46215 // TODO: Treat this as a faux shuffle?
46216 // TODO: When can we use this for general indices?
46217 if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
46218 (SrcVT.getSizeInBits() % 128) == 0) {
46219 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
46220 MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
46221 return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
46225 // We can only legally extract other elements from 128-bit vectors and in
46226 // certain circumstances, depending on SSE-level.
46227 // TODO: Investigate float/double extraction if it will be just stored.
46228 auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
46230 EVT VecSVT = VecVT.getScalarType();
46231 if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
46232 (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
46233 VecSVT == MVT::i64)) {
46234 unsigned EltSizeInBits = VecSVT.getSizeInBits();
46235 unsigned NumEltsPerLane = 128 / EltSizeInBits;
46236 unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
46237 unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
46238 VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
46239 Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
46240 Idx &= (NumEltsPerLane - 1);
46242 if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
46243 ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
46244 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
46245 DAG.getBitcast(VecVT, Vec),
46246 DAG.getIntPtrConstant(Idx, dl));
46248 if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
46249 (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
46250 unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
46251 return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
46252 DAG.getTargetConstant(Idx, dl, MVT::i8));
46257 // Resolve the target shuffle inputs and mask.
46258 SmallVector<int, 16> Mask;
46259 SmallVector<SDValue, 2> Ops;
46260 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
46263 // Shuffle inputs must be the same size as the result.
46264 if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
46265 return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
46269 // Attempt to narrow/widen the shuffle mask to the correct size.
46270 if (Mask.size() != NumSrcElts) {
46271 if ((NumSrcElts % Mask.size()) == 0) {
46272 SmallVector<int, 16> ScaledMask;
46273 int Scale = NumSrcElts / Mask.size();
46274 narrowShuffleMaskElts(Scale, Mask, ScaledMask);
46275 Mask = std::move(ScaledMask);
46276 } else if ((Mask.size() % NumSrcElts) == 0) {
46277 // Simplify Mask based on demanded element.
46278 int ExtractIdx = (int)IdxC.getZExtValue();
46279 int Scale = Mask.size() / NumSrcElts;
46280 int Lo = Scale * ExtractIdx;
46281 int Hi = Scale * (ExtractIdx + 1);
46282 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
46283 if (i < Lo || Hi <= i)
46284 Mask[i] = SM_SentinelUndef;
46286 SmallVector<int, 16> WidenedMask;
46287 while (Mask.size() > NumSrcElts &&
46288 canWidenShuffleElements(Mask, WidenedMask))
46289 Mask = std::move(WidenedMask);
46293 // If narrowing/widening failed, see if we can extract+zero-extend.
46296 if (Mask.size() == NumSrcElts) {
46297 ExtractIdx = Mask[IdxC.getZExtValue()];
46300 unsigned Scale = Mask.size() / NumSrcElts;
46301 if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
46303 unsigned ScaledIdx = Scale * IdxC.getZExtValue();
46304 if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
46306 ExtractIdx = Mask[ScaledIdx];
46307 EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
46308 ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
46309 assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
46310 "Failed to widen vector type");
46313 // If the shuffle source element is undef/zero then we can just accept it.
46314 if (ExtractIdx == SM_SentinelUndef)
46315 return DAG.getUNDEF(VT);
46317 if (ExtractIdx == SM_SentinelZero)
46318 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
46319 : DAG.getConstant(0, dl, VT);
46321 SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
46322 ExtractIdx = ExtractIdx % Mask.size();
46323 if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
46324 return DAG.getZExtOrTrunc(V, dl, VT);
46329 /// Extracting a scalar FP value from vector element 0 is free, so extract each
46330 /// operand first, then perform the math as a scalar op.
46331 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
46332 const X86Subtarget &Subtarget) {
46333 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
46334 SDValue Vec = ExtElt->getOperand(0);
46335 SDValue Index = ExtElt->getOperand(1);
46336 EVT VT = ExtElt->getValueType(0);
46337 EVT VecVT = Vec.getValueType();
46339 // TODO: If this is a unary/expensive/expand op, allow extraction from a
46340 // non-zero element because the shuffle+scalar op will be cheaper?
46341 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
46344 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
46345 // extract, the condition code), so deal with those as a special-case.
46346 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
46347 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
46348 if (OpVT != MVT::f32 && OpVT != MVT::f64)
46351 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
46353 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
46354 Vec.getOperand(0), Index);
46355 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
46356 Vec.getOperand(1), Index);
46357 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
46360 if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
46364 // Vector FP selects don't fit the pattern of FP math ops (because the
46365 // condition has a different type and we have to change the opcode), so deal
46366 // with those here.
46367 // FIXME: This is restricted to pre type legalization by ensuring the setcc
46368 // has i1 elements. If we loosen this we need to convert vector bool to a
46370 if (Vec.getOpcode() == ISD::VSELECT &&
46371 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
46372 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
46373 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
46374 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
46376 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
46377 Vec.getOperand(0).getValueType().getScalarType(),
46378 Vec.getOperand(0), Index);
46379 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
46380 Vec.getOperand(1), Index);
46381 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
46382 Vec.getOperand(2), Index);
46383 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
46386 // TODO: This switch could include FNEG and the x86-specific FP logic ops
46387 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
46388 // missed load folding and fma+fneg combining.
46389 switch (Vec.getOpcode()) {
46390 case ISD::FMA: // Begin 3 operands
46392 case ISD::FADD: // Begin 2 operands
46397 case ISD::FCOPYSIGN:
46400 case ISD::FMINNUM_IEEE:
46401 case ISD::FMAXNUM_IEEE:
46402 case ISD::FMAXIMUM:
46403 case ISD::FMINIMUM:
46406 case ISD::FABS: // Begin 1 operand
46411 case ISD::FNEARBYINT:
46415 case X86ISD::FRSQRT: {
46416 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
46418 SmallVector<SDValue, 4> ExtOps;
46419 for (SDValue Op : Vec->ops())
46420 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
46421 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
46426 llvm_unreachable("All opcodes should return within switch");
46429 /// Try to convert a vector reduction sequence composed of binops and shuffles
46430 /// into horizontal ops.
46431 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
46432 const X86Subtarget &Subtarget) {
46433 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
46435 // We need at least SSE2 to anything here.
46436 if (!Subtarget.hasSSE2())
46440 SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
46441 {ISD::ADD, ISD::MUL, ISD::FADD}, true);
46445 SDValue Index = ExtElt->getOperand(1);
46446 assert(isNullConstant(Index) &&
46447 "Reduction doesn't end in an extract from index 0");
46449 EVT VT = ExtElt->getValueType(0);
46450 EVT VecVT = Rdx.getValueType();
46451 if (VecVT.getScalarType() != VT)
46455 unsigned NumElts = VecVT.getVectorNumElements();
46456 unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
46458 // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
46459 auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
46460 if (V.getValueType() == MVT::v4i8) {
46461 if (ZeroExtend && Subtarget.hasSSE41()) {
46462 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
46463 DAG.getConstant(0, DL, MVT::v4i32),
46464 DAG.getBitcast(MVT::i32, V),
46465 DAG.getIntPtrConstant(0, DL));
46466 return DAG.getBitcast(MVT::v16i8, V);
46468 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
46469 ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
46470 : DAG.getUNDEF(MVT::v4i8));
46472 return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
46473 DAG.getUNDEF(MVT::v8i8));
46476 // vXi8 mul reduction - promote to vXi16 mul reduction.
46477 if (Opc == ISD::MUL) {
46478 if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
46480 if (VecVT.getSizeInBits() >= 128) {
46481 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
46482 SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
46483 SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
46484 Lo = DAG.getBitcast(WideVT, Lo);
46485 Hi = DAG.getBitcast(WideVT, Hi);
46486 Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
46487 while (Rdx.getValueSizeInBits() > 128) {
46488 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
46489 Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
46492 Rdx = WidenToV16I8(Rdx, false);
46493 Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
46494 Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
46497 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
46498 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
46499 {4, 5, 6, 7, -1, -1, -1, -1}));
46500 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
46501 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
46502 {2, 3, -1, -1, -1, -1, -1, -1}));
46503 Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
46504 DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
46505 {1, -1, -1, -1, -1, -1, -1, -1}));
46506 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
46507 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
46510 // vXi8 add reduction - sub 128-bit vector.
46511 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
46512 Rdx = WidenToV16I8(Rdx, true);
46513 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
46514 DAG.getConstant(0, DL, MVT::v16i8));
46515 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
46516 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
46519 // Must be a >=128-bit vector with pow2 elements.
46520 if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
46523 // vXi8 add reduction - sum lo/hi halves then use PSADBW.
46524 if (VT == MVT::i8) {
46525 while (Rdx.getValueSizeInBits() > 128) {
46527 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
46528 VecVT = Lo.getValueType();
46529 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
46531 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
46533 SDValue Hi = DAG.getVectorShuffle(
46534 MVT::v16i8, DL, Rdx, Rdx,
46535 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
46536 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
46537 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
46538 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
46539 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
46540 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
46543 // See if we can use vXi8 PSADBW add reduction for larger zext types.
46544 // If the source vector values are 0-255, then we can use PSADBW to
46545 // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
46546 // TODO: See if its worth avoiding vXi16/i32 truncations?
46547 if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
46548 DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
46549 (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
46550 Subtarget.hasAVX512())) {
46551 EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
46552 Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
46553 if (ByteVT.getSizeInBits() < 128)
46554 Rdx = WidenToV16I8(Rdx, true);
46556 // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
46557 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46558 ArrayRef<SDValue> Ops) {
46559 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
46560 SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
46561 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
46563 MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
46564 Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
46566 // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
46567 while (Rdx.getValueSizeInBits() > 128) {
46569 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
46570 VecVT = Lo.getValueType();
46571 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
46573 assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
46576 SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
46577 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
46580 VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
46581 Rdx = DAG.getBitcast(VecVT, Rdx);
46582 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
46585 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
46586 if (!shouldUseHorizontalOp(true, DAG, Subtarget))
46589 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
46591 // 256-bit horizontal instructions operate on 128-bit chunks rather than
46592 // across the whole vector, so we need an extract + hop preliminary stage.
46593 // This is the only step where the operands of the hop are not the same value.
46594 // TODO: We could extend this to handle 512-bit or even longer vectors.
46595 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
46596 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
46597 unsigned NumElts = VecVT.getVectorNumElements();
46598 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
46599 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
46600 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
46601 VecVT = Rdx.getValueType();
46603 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
46604 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
46607 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
46608 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
46609 for (unsigned i = 0; i != ReductionSteps; ++i)
46610 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
46612 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
46615 /// Detect vector gather/scatter index generation and convert it from being a
46616 /// bunch of shuffles and extracts into a somewhat faster sequence.
46617 /// For i686, the best sequence is apparently storing the value and loading
46618 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
46619 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
46620 TargetLowering::DAGCombinerInfo &DCI,
46621 const X86Subtarget &Subtarget) {
46622 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
46625 SDValue InputVector = N->getOperand(0);
46626 SDValue EltIdx = N->getOperand(1);
46627 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
46629 EVT SrcVT = InputVector.getValueType();
46630 EVT VT = N->getValueType(0);
46631 SDLoc dl(InputVector);
46632 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
46633 unsigned NumSrcElts = SrcVT.getVectorNumElements();
46634 unsigned NumEltBits = VT.getScalarSizeInBits();
46635 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46637 if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
46638 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
46640 // Integer Constant Folding.
46641 if (CIdx && VT.isInteger()) {
46642 APInt UndefVecElts;
46643 SmallVector<APInt, 16> EltBits;
46644 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
46645 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
46646 EltBits, true, false)) {
46647 uint64_t Idx = CIdx->getZExtValue();
46648 if (UndefVecElts[Idx])
46649 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
46650 return DAG.getConstant(EltBits[Idx].zext(NumEltBits), dl, VT);
46653 // Convert extract_element(bitcast(<X x i1>) -> bitcast(extract_subvector()).
46654 // Improves lowering of bool masks on rust which splits them into byte array.
46655 if (InputVector.getOpcode() == ISD::BITCAST && (NumEltBits % 8) == 0) {
46656 SDValue Src = peekThroughBitcasts(InputVector);
46657 if (Src.getValueType().getScalarType() == MVT::i1 &&
46658 TLI.isTypeLegal(Src.getValueType())) {
46659 MVT SubVT = MVT::getVectorVT(MVT::i1, NumEltBits);
46660 SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Src,
46661 DAG.getIntPtrConstant(CIdx->getZExtValue() * NumEltBits, dl));
46662 return DAG.getBitcast(VT, Sub);
46668 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumEltBits),
46670 return SDValue(N, 0);
46672 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
46673 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
46674 InputVector.getOpcode() == X86ISD::PINSRW) &&
46675 InputVector.getOperand(2) == EltIdx) {
46676 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
46677 "Vector type mismatch");
46678 SDValue Scl = InputVector.getOperand(1);
46679 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
46680 return DAG.getZExtOrTrunc(Scl, dl, VT);
46683 // TODO - Remove this once we can handle the implicit zero-extension of
46684 // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
46685 // combineBasicSADPattern.
46689 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
46690 if (VT == MVT::i64 && SrcVT == MVT::v1i64 &&
46691 InputVector.getOpcode() == ISD::BITCAST &&
46692 InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
46693 isNullConstant(EltIdx) && InputVector.hasOneUse())
46694 return DAG.getBitcast(VT, InputVector);
46696 // Detect mmx to i32 conversion through a v2i32 elt extract.
46697 if (VT == MVT::i32 && SrcVT == MVT::v2i32 &&
46698 InputVector.getOpcode() == ISD::BITCAST &&
46699 InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
46700 isNullConstant(EltIdx) && InputVector.hasOneUse())
46701 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32,
46702 InputVector.getOperand(0));
46704 // Check whether this extract is the root of a sum of absolute differences
46705 // pattern. This has to be done here because we really want it to happen
46706 // pre-legalization,
46707 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
46710 if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
46713 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
46714 if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
46717 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
46718 if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
46721 // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
46722 if (SDValue V = combineArithReduction(N, DAG, Subtarget))
46725 if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
46728 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
46729 // and then testing the relevant element.
46731 // Note that we only combine extracts on the *same* result number, i.e.
46732 // t0 = merge_values a0, a1, a2, a3
46733 // i1 = extract_vector_elt t0, Constant:i64<2>
46734 // i1 = extract_vector_elt t0, Constant:i64<3>
46736 // i1 = extract_vector_elt t0:1, Constant:i64<2>
46737 // since the latter would need its own MOVMSK.
46738 if (SrcVT.getScalarType() == MVT::i1) {
46739 bool IsVar = !CIdx;
46740 SmallVector<SDNode *, 16> BoolExtracts;
46741 unsigned ResNo = InputVector.getResNo();
46742 auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
46743 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
46744 Use->getOperand(0).getResNo() == ResNo &&
46745 Use->getValueType(0) == MVT::i1) {
46746 BoolExtracts.push_back(Use);
46747 IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
46752 // TODO: Can we drop the oneuse check for constant extracts?
46753 if (all_of(InputVector->uses(), IsBoolExtract) &&
46754 (IsVar || BoolExtracts.size() > 1)) {
46755 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
46757 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
46758 for (SDNode *Use : BoolExtracts) {
46759 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
46760 // Mask = 1 << MaskIdx
46761 SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
46762 SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
46763 SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
46764 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
46765 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
46766 DCI.CombineTo(Use, Res);
46768 return SDValue(N, 0);
46773 // If this extract is from a loaded vector value and will be used as an
46774 // integer, that requires a potentially expensive XMM -> GPR transfer.
46775 // Additionally, if we can convert to a scalar integer load, that will likely
46776 // be folded into a subsequent integer op.
46777 // Note: Unlike the related fold for this in DAGCombiner, this is not limited
46778 // to a single-use of the loaded vector. For the reasons above, we
46779 // expect this to be profitable even if it creates an extra load.
46780 bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
46781 return Use->getOpcode() == ISD::STORE ||
46782 Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
46783 Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
46785 auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
46786 if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
46787 SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
46788 !LikelyUsedAsVector && LoadVec->isSimple()) {
46789 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46791 TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
46792 unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
46793 MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
46794 Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
46796 DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
46797 LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
46798 DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
46805 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
46806 // This is more or less the reverse of combineBitcastvxi1.
46807 static SDValue combineToExtendBoolVectorInReg(
46808 unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
46809 TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
46810 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
46811 Opcode != ISD::ANY_EXTEND)
46813 if (!DCI.isBeforeLegalizeOps())
46815 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
46818 EVT SVT = VT.getScalarType();
46819 EVT InSVT = N0.getValueType().getScalarType();
46820 unsigned EltSizeInBits = SVT.getSizeInBits();
46822 // Input type must be extending a bool vector (bit-casted from a scalar
46823 // integer) to legal integer types.
46824 if (!VT.isVector())
46826 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
46828 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
46831 SDValue N00 = N0.getOperand(0);
46832 EVT SclVT = N00.getValueType();
46833 if (!SclVT.isScalarInteger())
46837 SmallVector<int> ShuffleMask;
46838 unsigned NumElts = VT.getVectorNumElements();
46839 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
46841 // Broadcast the scalar integer to the vector elements.
46842 if (NumElts > EltSizeInBits) {
46843 // If the scalar integer is greater than the vector element size, then we
46844 // must split it down into sub-sections for broadcasting. For example:
46845 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
46846 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
46847 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
46848 unsigned Scale = NumElts / EltSizeInBits;
46849 EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
46850 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
46851 Vec = DAG.getBitcast(VT, Vec);
46853 for (unsigned i = 0; i != Scale; ++i)
46854 ShuffleMask.append(EltSizeInBits, i);
46855 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
46856 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
46857 (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
46858 // If we have register broadcast instructions, use the scalar size as the
46859 // element type for the shuffle. Then cast to the wider element type. The
46860 // widened bits won't be used, and this might allow the use of a broadcast
46862 assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
46863 unsigned Scale = EltSizeInBits / NumElts;
46865 EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
46866 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
46867 ShuffleMask.append(NumElts * Scale, 0);
46868 Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
46869 Vec = DAG.getBitcast(VT, Vec);
46871 // For smaller scalar integers, we can simply any-extend it to the vector
46872 // element size (we don't care about the upper bits) and broadcast it to all
46874 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
46875 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
46876 ShuffleMask.append(NumElts, 0);
46877 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
46880 // Now, mask the relevant bit in each element.
46881 SmallVector<SDValue, 32> Bits;
46882 for (unsigned i = 0; i != NumElts; ++i) {
46883 int BitIdx = (i % EltSizeInBits);
46884 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
46885 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
46887 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
46888 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
46890 // Compare against the bitmask and extend the result.
46891 EVT CCVT = VT.changeVectorElementType(MVT::i1);
46892 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
46893 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
46895 // For SEXT, this is now done, otherwise shift the result down for
46897 if (Opcode == ISD::SIGN_EXTEND)
46899 return DAG.getNode(ISD::SRL, DL, VT, Vec,
46900 DAG.getConstant(EltSizeInBits - 1, DL, VT));
46903 /// If a vector select has an operand that is -1 or 0, try to simplify the
46904 /// select to a bitwise logic operation.
46905 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
46907 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
46908 TargetLowering::DAGCombinerInfo &DCI,
46909 const X86Subtarget &Subtarget) {
46910 SDValue Cond = N->getOperand(0);
46911 SDValue LHS = N->getOperand(1);
46912 SDValue RHS = N->getOperand(2);
46913 EVT VT = LHS.getValueType();
46914 EVT CondVT = Cond.getValueType();
46916 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46918 if (N->getOpcode() != ISD::VSELECT)
46921 assert(CondVT.isVector() && "Vector select expects a vector selector!");
46923 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
46924 // TODO: Can we assert that both operands are not zeros (because that should
46925 // get simplified at node creation time)?
46926 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
46927 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
46929 // If both inputs are 0/undef, create a complete zero vector.
46930 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
46931 if (TValIsAllZeros && FValIsAllZeros) {
46932 if (VT.isFloatingPoint())
46933 return DAG.getConstantFP(0.0, DL, VT);
46934 return DAG.getConstant(0, DL, VT);
46937 // To use the condition operand as a bitwise mask, it must have elements that
46938 // are the same size as the select elements. Ie, the condition operand must
46939 // have already been promoted from the IR select condition type <N x i1>.
46940 // Don't check if the types themselves are equal because that excludes
46941 // vector floating-point selects.
46942 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
46945 // Try to invert the condition if true value is not all 1s and false value is
46946 // not all 0s. Only do this if the condition has one use.
46947 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
46948 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
46949 // Check if the selector will be produced by CMPP*/PCMP*.
46950 Cond.getOpcode() == ISD::SETCC &&
46951 // Check if SETCC has already been promoted.
46952 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
46954 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
46956 if (TValIsAllZeros || FValIsAllOnes) {
46957 SDValue CC = Cond.getOperand(2);
46958 ISD::CondCode NewCC = ISD::getSetCCInverse(
46959 cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
46960 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
46962 std::swap(LHS, RHS);
46963 TValIsAllOnes = FValIsAllOnes;
46964 FValIsAllZeros = TValIsAllZeros;
46968 // Cond value must be 'sign splat' to be converted to a logical op.
46969 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
46972 // vselect Cond, 111..., 000... -> Cond
46973 if (TValIsAllOnes && FValIsAllZeros)
46974 return DAG.getBitcast(VT, Cond);
46976 if (!TLI.isTypeLegal(CondVT))
46979 // vselect Cond, 111..., X -> or Cond, X
46980 if (TValIsAllOnes) {
46981 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
46982 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
46983 return DAG.getBitcast(VT, Or);
46986 // vselect Cond, X, 000... -> and Cond, X
46987 if (FValIsAllZeros) {
46988 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
46989 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
46990 return DAG.getBitcast(VT, And);
46993 // vselect Cond, 000..., X -> andn Cond, X
46994 if (TValIsAllZeros) {
46995 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
46997 // The canonical form differs for i1 vectors - x86andnp is not used
46998 if (CondVT.getScalarType() == MVT::i1)
46999 AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
47002 AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
47003 return DAG.getBitcast(VT, AndN);
47009 /// If both arms of a vector select are concatenated vectors, split the select,
47010 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
47011 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
47012 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
47013 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
47014 const X86Subtarget &Subtarget) {
47015 unsigned Opcode = N->getOpcode();
47016 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
47019 // TODO: Split 512-bit vectors too?
47020 EVT VT = N->getValueType(0);
47021 if (!VT.is256BitVector())
47024 // TODO: Split as long as any 2 of the 3 operands are concatenated?
47025 SDValue Cond = N->getOperand(0);
47026 SDValue TVal = N->getOperand(1);
47027 SDValue FVal = N->getOperand(2);
47028 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
47029 !isFreeToSplitVector(TVal.getNode(), DAG) ||
47030 !isFreeToSplitVector(FVal.getNode(), DAG))
47033 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
47034 ArrayRef<SDValue> Ops) {
47035 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
47037 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
47038 makeBlend, /*CheckBWI*/ false);
47041 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
47042 SDValue Cond = N->getOperand(0);
47043 SDValue LHS = N->getOperand(1);
47044 SDValue RHS = N->getOperand(2);
47047 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
47048 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
47049 if (!TrueC || !FalseC)
47052 // Don't do this for crazy integer types.
47053 EVT VT = N->getValueType(0);
47054 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
47057 // We're going to use the condition bit in math or logic ops. We could allow
47058 // this with a wider condition value (post-legalization it becomes an i8),
47059 // but if nothing is creating selects that late, it doesn't matter.
47060 if (Cond.getValueType() != MVT::i1)
47063 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
47064 // 3, 5, or 9 with i32/i64, so those get transformed too.
47065 // TODO: For constants that overflow or do not differ by power-of-2 or small
47066 // multiplier, convert to 'and' + 'add'.
47067 const APInt &TrueVal = TrueC->getAPIntValue();
47068 const APInt &FalseVal = FalseC->getAPIntValue();
47070 // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
47071 if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
47072 Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
47073 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
47074 if (CC == ISD::SETEQ || CC == ISD::SETNE)
47079 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
47083 APInt AbsDiff = Diff.abs();
47084 if (AbsDiff.isPowerOf2() ||
47085 ((VT == MVT::i32 || VT == MVT::i64) &&
47086 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
47088 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
47089 // of the condition can usually be folded into a compare predicate, but even
47090 // without that, the sequence should be cheaper than a CMOV alternative.
47091 if (TrueVal.slt(FalseVal)) {
47092 Cond = DAG.getNOT(DL, Cond, MVT::i1);
47093 std::swap(TrueC, FalseC);
47096 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
47097 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
47099 // Multiply condition by the difference if non-one.
47100 if (!AbsDiff.isOne())
47101 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
47103 // Add the base if non-zero.
47104 if (!FalseC->isZero())
47105 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
47113 /// If this is a *dynamic* select (non-constant condition) and we can match
47114 /// this node with one of the variable blend instructions, restructure the
47115 /// condition so that blends can use the high (sign) bit of each element.
47116 /// This function will also call SimplifyDemandedBits on already created
47117 /// BLENDV to perform additional simplifications.
47118 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
47119 TargetLowering::DAGCombinerInfo &DCI,
47120 const X86Subtarget &Subtarget) {
47121 SDValue Cond = N->getOperand(0);
47122 if ((N->getOpcode() != ISD::VSELECT &&
47123 N->getOpcode() != X86ISD::BLENDV) ||
47124 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
47127 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47128 unsigned BitWidth = Cond.getScalarValueSizeInBits();
47129 EVT VT = N->getValueType(0);
47131 // We can only handle the cases where VSELECT is directly legal on the
47132 // subtarget. We custom lower VSELECT nodes with constant conditions and
47133 // this makes it hard to see whether a dynamic VSELECT will correctly
47134 // lower, so we both check the operation's status and explicitly handle the
47135 // cases where a *dynamic* blend will fail even though a constant-condition
47136 // blend could be custom lowered.
47137 // FIXME: We should find a better way to handle this class of problems.
47138 // Potentially, we should combine constant-condition vselect nodes
47139 // pre-legalization into shuffles and not mark as many types as custom
47141 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
47143 // FIXME: We don't support i16-element blends currently. We could and
47144 // should support them by making *all* the bits in the condition be set
47145 // rather than just the high bit and using an i8-element blend.
47146 if (VT.getVectorElementType() == MVT::i16)
47148 // Dynamic blending was only available from SSE4.1 onward.
47149 if (VT.is128BitVector() && !Subtarget.hasSSE41())
47151 // Byte blends are only available in AVX2
47152 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
47154 // There are no 512-bit blend instructions that use sign bits.
47155 if (VT.is512BitVector())
47158 // Don't optimize before the condition has been transformed to a legal type
47159 // and don't ever optimize vector selects that map to AVX512 mask-registers.
47160 if (BitWidth < 8 || BitWidth > 64)
47163 auto OnlyUsedAsSelectCond = [](SDValue Cond) {
47164 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
47166 if ((UI->getOpcode() != ISD::VSELECT &&
47167 UI->getOpcode() != X86ISD::BLENDV) ||
47168 UI.getOperandNo() != 0)
47174 APInt DemandedBits(APInt::getSignMask(BitWidth));
47176 if (OnlyUsedAsSelectCond(Cond)) {
47178 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
47179 !DCI.isBeforeLegalizeOps());
47180 if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
47183 // If we changed the computation somewhere in the DAG, this change will
47184 // affect all users of Cond. Update all the nodes so that we do not use
47185 // the generic VSELECT anymore. Otherwise, we may perform wrong
47186 // optimizations as we messed with the actual expectation for the vector
47188 for (SDNode *U : Cond->uses()) {
47189 if (U->getOpcode() == X86ISD::BLENDV)
47192 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
47193 Cond, U->getOperand(1), U->getOperand(2));
47194 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
47195 DCI.AddToWorklist(U);
47197 DCI.CommitTargetLoweringOpt(TLO);
47198 return SDValue(N, 0);
47201 // Otherwise we can still at least try to simplify multiple use bits.
47202 if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
47203 return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
47204 N->getOperand(1), N->getOperand(2));
47210 // (or (and (M, (sub 0, X)), (pandn M, X)))
47211 // which is a special case of:
47212 // (select M, (sub 0, X), X)
47214 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
47215 // We know that, if fNegate is 0 or 1:
47216 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
47218 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
47219 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
47220 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
47221 // This lets us transform our vselect to:
47222 // (add (xor X, M), (and M, 1))
47224 // (sub (xor X, M), M)
47225 static SDValue combineLogicBlendIntoConditionalNegate(
47226 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
47227 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
47228 EVT MaskVT = Mask.getValueType();
47229 assert(MaskVT.isInteger() &&
47230 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
47231 "Mask must be zero/all-bits");
47233 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
47235 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
47238 auto IsNegV = [](SDNode *N, SDValue V) {
47239 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
47240 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
47244 if (IsNegV(Y.getNode(), X))
47246 else if (IsNegV(X.getNode(), Y))
47251 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
47252 SDValue SubOp2 = Mask;
47254 // If the negate was on the false side of the select, then
47255 // the operands of the SUB need to be swapped. PR 27251.
47256 // This is because the pattern being matched above is
47257 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
47258 // but if the pattern matched was
47259 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
47260 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
47261 // pattern also needs to be a negation of the replacement pattern above.
47262 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
47263 // sub accomplishes the negation of the replacement pattern.
47265 std::swap(SubOp1, SubOp2);
47267 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
47268 return DAG.getBitcast(VT, Res);
47271 static SDValue commuteSelect(SDNode *N, SelectionDAG &DAG,
47272 const X86Subtarget &Subtarget) {
47273 if (!Subtarget.hasAVX512())
47275 if (N->getOpcode() != ISD::VSELECT)
47279 SDValue Cond = N->getOperand(0);
47280 SDValue LHS = N->getOperand(1);
47281 SDValue RHS = N->getOperand(2);
47283 if (canCombineAsMaskOperation(LHS, Subtarget))
47286 if (!canCombineAsMaskOperation(RHS, Subtarget))
47289 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
47292 // Commute LHS and RHS to create opportunity to select mask instruction.
47293 // (vselect M, L, R) -> (vselect ~M, R, L)
47294 ISD::CondCode NewCC =
47295 ISD::getSetCCInverse(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
47296 Cond.getOperand(0).getValueType());
47297 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), Cond.getOperand(0),
47298 Cond.getOperand(1), NewCC);
47299 return DAG.getSelect(DL, LHS.getValueType(), Cond, RHS, LHS);
47302 /// Do target-specific dag combines on SELECT and VSELECT nodes.
47303 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
47304 TargetLowering::DAGCombinerInfo &DCI,
47305 const X86Subtarget &Subtarget) {
47307 SDValue Cond = N->getOperand(0);
47308 SDValue LHS = N->getOperand(1);
47309 SDValue RHS = N->getOperand(2);
47311 // Try simplification again because we use this function to optimize
47312 // BLENDV nodes that are not handled by the generic combiner.
47313 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
47316 // When avx512 is available the lhs operand of select instruction can be
47317 // folded with mask instruction, while the rhs operand can't. Commute the
47318 // lhs and rhs of the select instruction to create the opportunity of
47320 if (SDValue V = commuteSelect(N, DAG, Subtarget))
47323 EVT VT = LHS.getValueType();
47324 EVT CondVT = Cond.getValueType();
47325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47326 bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
47328 // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
47329 // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
47330 // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
47331 if (CondVT.isVector() && CondVT.isInteger() &&
47332 CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
47333 (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
47334 DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
47335 if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
47336 DL, DAG, Subtarget))
47339 // Convert vselects with constant condition into shuffles.
47340 if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
47341 (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
47342 SmallVector<int, 64> Mask;
47343 if (createShuffleMaskFromVSELECT(Mask, Cond,
47344 N->getOpcode() == X86ISD::BLENDV))
47345 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
47348 // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
47349 // by forcing the unselected elements to zero.
47350 // TODO: Can we handle more shuffles with this?
47351 if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
47352 LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
47353 LHS.hasOneUse() && RHS.hasOneUse()) {
47354 MVT SimpleVT = VT.getSimpleVT();
47355 SmallVector<SDValue, 1> LHSOps, RHSOps;
47356 SmallVector<int, 64> LHSMask, RHSMask, CondMask;
47357 if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
47358 getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
47359 getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
47360 int NumElts = VT.getVectorNumElements();
47361 for (int i = 0; i != NumElts; ++i) {
47362 // getConstVector sets negative shuffle mask values as undef, so ensure
47363 // we hardcode SM_SentinelZero values to zero (0x80).
47364 if (CondMask[i] < NumElts) {
47365 LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
47369 RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
47372 LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
47373 getConstVector(LHSMask, SimpleVT, DAG, DL, true));
47374 RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
47375 getConstVector(RHSMask, SimpleVT, DAG, DL, true));
47376 return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
47380 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
47381 // instructions match the semantics of the common C idiom x<y?x:y but not
47382 // x<=y?x:y, because of how they handle negative zero (which can be
47383 // ignored in unsafe-math mode).
47384 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
47385 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
47386 VT != MVT::f80 && VT != MVT::f128 && !isSoftFP16(VT, Subtarget) &&
47387 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
47388 (Subtarget.hasSSE2() ||
47389 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
47390 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
47392 unsigned Opcode = 0;
47393 // Check for x CC y ? x : y.
47394 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
47395 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
47399 // Converting this to a min would handle NaNs incorrectly, and swapping
47400 // the operands would cause it to handle comparisons between positive
47401 // and negative zero incorrectly.
47402 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
47403 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
47404 !(DAG.isKnownNeverZeroFloat(LHS) ||
47405 DAG.isKnownNeverZeroFloat(RHS)))
47407 std::swap(LHS, RHS);
47409 Opcode = X86ISD::FMIN;
47412 // Converting this to a min would handle comparisons between positive
47413 // and negative zero incorrectly.
47414 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
47415 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
47417 Opcode = X86ISD::FMIN;
47420 // Converting this to a min would handle both negative zeros and NaNs
47421 // incorrectly, but we can swap the operands to fix both.
47422 std::swap(LHS, RHS);
47427 Opcode = X86ISD::FMIN;
47431 // Converting this to a max would handle comparisons between positive
47432 // and negative zero incorrectly.
47433 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
47434 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
47436 Opcode = X86ISD::FMAX;
47439 // Converting this to a max would handle NaNs incorrectly, and swapping
47440 // the operands would cause it to handle comparisons between positive
47441 // and negative zero incorrectly.
47442 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
47443 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
47444 !(DAG.isKnownNeverZeroFloat(LHS) ||
47445 DAG.isKnownNeverZeroFloat(RHS)))
47447 std::swap(LHS, RHS);
47449 Opcode = X86ISD::FMAX;
47452 // Converting this to a max would handle both negative zeros and NaNs
47453 // incorrectly, but we can swap the operands to fix both.
47454 std::swap(LHS, RHS);
47459 Opcode = X86ISD::FMAX;
47462 // Check for x CC y ? y : x -- a min/max with reversed arms.
47463 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
47464 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
47468 // Converting this to a min would handle comparisons between positive
47469 // and negative zero incorrectly, and swapping the operands would
47470 // cause it to handle NaNs incorrectly.
47471 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
47472 !(DAG.isKnownNeverZeroFloat(LHS) ||
47473 DAG.isKnownNeverZeroFloat(RHS))) {
47474 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
47476 std::swap(LHS, RHS);
47478 Opcode = X86ISD::FMIN;
47481 // Converting this to a min would handle NaNs incorrectly.
47482 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
47484 Opcode = X86ISD::FMIN;
47487 // Converting this to a min would handle both negative zeros and NaNs
47488 // incorrectly, but we can swap the operands to fix both.
47489 std::swap(LHS, RHS);
47494 Opcode = X86ISD::FMIN;
47498 // Converting this to a max would handle NaNs incorrectly.
47499 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
47501 Opcode = X86ISD::FMAX;
47504 // Converting this to a max would handle comparisons between positive
47505 // and negative zero incorrectly, and swapping the operands would
47506 // cause it to handle NaNs incorrectly.
47507 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
47508 !DAG.isKnownNeverZeroFloat(LHS) &&
47509 !DAG.isKnownNeverZeroFloat(RHS)) {
47510 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
47512 std::swap(LHS, RHS);
47514 Opcode = X86ISD::FMAX;
47517 // Converting this to a max would handle both negative zeros and NaNs
47518 // incorrectly, but we can swap the operands to fix both.
47519 std::swap(LHS, RHS);
47524 Opcode = X86ISD::FMAX;
47530 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
47533 // Some mask scalar intrinsics rely on checking if only one bit is set
47534 // and implement it in C code like this:
47535 // A[0] = (U & 1) ? A[0] : W[0];
47536 // This creates some redundant instructions that break pattern matching.
47537 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
47538 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
47539 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
47540 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
47541 SDValue AndNode = Cond.getOperand(0);
47542 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
47543 isNullConstant(Cond.getOperand(1)) &&
47544 isOneConstant(AndNode.getOperand(1))) {
47545 // LHS and RHS swapped due to
47546 // setcc outputting 1 when AND resulted in 0 and vice versa.
47547 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
47548 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
47552 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
47553 // lowering on KNL. In this case we convert it to
47554 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
47555 // The same situation all vectors of i8 and i16 without BWI.
47556 // Make sure we extend these even before type legalization gets a chance to
47557 // split wide vectors.
47558 // Since SKX these selects have a proper lowering.
47559 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
47560 CondVT.getVectorElementType() == MVT::i1 &&
47561 (VT.getVectorElementType() == MVT::i8 ||
47562 VT.getVectorElementType() == MVT::i16)) {
47563 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
47564 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
47567 // AVX512 - Extend select with zero to merge with target shuffle.
47568 // select(mask, extract_subvector(shuffle(x)), zero) -->
47569 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
47570 // TODO - support non target shuffles as well.
47571 if (Subtarget.hasAVX512() && CondVT.isVector() &&
47572 CondVT.getVectorElementType() == MVT::i1) {
47573 auto SelectableOp = [&TLI](SDValue Op) {
47574 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
47575 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
47576 isNullConstant(Op.getOperand(1)) &&
47577 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
47578 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
47581 bool SelectableLHS = SelectableOp(LHS);
47582 bool SelectableRHS = SelectableOp(RHS);
47583 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
47584 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
47586 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
47587 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
47588 : RHS.getOperand(0).getValueType();
47589 EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
47590 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
47591 VT.getSizeInBits());
47592 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
47593 VT.getSizeInBits());
47594 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
47595 DAG.getUNDEF(SrcCondVT), Cond,
47596 DAG.getIntPtrConstant(0, DL));
47597 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
47598 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
47602 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
47605 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
47606 Cond.hasOneUse()) {
47607 EVT CondVT = Cond.getValueType();
47608 SDValue Cond0 = Cond.getOperand(0);
47609 SDValue Cond1 = Cond.getOperand(1);
47610 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
47612 // Canonicalize min/max:
47613 // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
47614 // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
47615 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
47616 // the need for an extra compare against zero. e.g.
47617 // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
47619 // testl %edi, %edi
47621 // cmovgl %edi, %eax
47625 // cmovsl %eax, %edi
47627 // We can also canonicalize
47628 // (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
47629 // (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
47630 // This allows the use of a test instruction for the compare.
47631 if (LHS == Cond0 && RHS == Cond1) {
47632 if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
47633 (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
47634 ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
47635 Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
47636 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
47638 if (CC == ISD::SETUGT && isOneConstant(RHS)) {
47639 ISD::CondCode NewCC = ISD::SETUGE;
47640 Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
47641 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
47645 // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
47646 // fold eq + gt/lt nested selects into ge/le selects
47647 // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
47648 // --> (select (cmpuge Cond0, Cond1), LHS, Y)
47649 // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
47650 // --> (select (cmpsle Cond0, Cond1), LHS, Y)
47652 if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
47653 RHS.getOperand(0).getOpcode() == ISD::SETCC) {
47654 SDValue InnerSetCC = RHS.getOperand(0);
47655 ISD::CondCode InnerCC =
47656 cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
47657 if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
47658 Cond0 == InnerSetCC.getOperand(0) &&
47659 Cond1 == InnerSetCC.getOperand(1)) {
47660 ISD::CondCode NewCC;
47661 switch (CC == ISD::SETEQ ? InnerCC : CC) {
47662 case ISD::SETGT: NewCC = ISD::SETGE; break;
47663 case ISD::SETLT: NewCC = ISD::SETLE; break;
47664 case ISD::SETUGT: NewCC = ISD::SETUGE; break;
47665 case ISD::SETULT: NewCC = ISD::SETULE; break;
47666 default: NewCC = ISD::SETCC_INVALID; break;
47668 if (NewCC != ISD::SETCC_INVALID) {
47669 Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
47670 return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
47676 // Check if the first operand is all zeros and Cond type is vXi1.
47677 // If this an avx512 target we can improve the use of zero masking by
47678 // swapping the operands and inverting the condition.
47679 if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
47680 Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
47681 ISD::isBuildVectorAllZeros(LHS.getNode()) &&
47682 !ISD::isBuildVectorAllZeros(RHS.getNode())) {
47683 // Invert the cond to not(cond) : xor(op,allones)=not(op)
47684 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
47685 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
47686 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
47689 // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
47690 // get split by legalization.
47691 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
47692 CondVT.getVectorElementType() == MVT::i1 &&
47693 TLI.isTypeLegal(VT.getScalarType())) {
47694 EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
47695 if (SDValue ExtCond = combineToExtendBoolVectorInReg(
47696 ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
47697 ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
47698 return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
47702 // Early exit check
47703 if (!TLI.isTypeLegal(VT) || isSoftFP16(VT, Subtarget))
47706 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
47709 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
47712 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
47715 // select(~Cond, X, Y) -> select(Cond, Y, X)
47716 if (CondVT.getScalarType() != MVT::i1) {
47717 if (SDValue CondNot = IsNOT(Cond, DAG))
47718 return DAG.getNode(N->getOpcode(), DL, VT,
47719 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
47721 // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
47723 if (Cond.getOpcode() == X86ISD::PCMPGT &&
47724 ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode()) &&
47725 Cond.hasOneUse()) {
47726 Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
47727 DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
47728 return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
47732 // Try to optimize vXi1 selects if both operands are either all constants or
47733 // bitcasts from scalar integer type. In that case we can convert the operands
47734 // to integer and use an integer select which will be converted to a CMOV.
47735 // We need to take a little bit of care to avoid creating an i64 type after
47736 // type legalization.
47737 if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
47738 VT.getVectorElementType() == MVT::i1 &&
47739 (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
47740 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
47741 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(IntVT)) {
47742 bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
47743 bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
47745 if ((LHSIsConst || (LHS.getOpcode() == ISD::BITCAST &&
47746 LHS.getOperand(0).getValueType() == IntVT)) &&
47747 (RHSIsConst || (RHS.getOpcode() == ISD::BITCAST &&
47748 RHS.getOperand(0).getValueType() == IntVT))) {
47750 LHS = combinevXi1ConstantToInteger(LHS, DAG);
47752 LHS = LHS.getOperand(0);
47755 RHS = combinevXi1ConstantToInteger(RHS, DAG);
47757 RHS = RHS.getOperand(0);
47759 SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
47760 return DAG.getBitcast(VT, Select);
47765 // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
47766 // single bits, then invert the predicate and swap the select operands.
47767 // This can lower using a vector shift bit-hack rather than mask and compare.
47768 if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
47769 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
47770 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
47771 Cond.getOperand(0).getOpcode() == ISD::AND &&
47772 isNullOrNullSplat(Cond.getOperand(1)) &&
47773 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
47774 Cond.getOperand(0).getValueType() == VT) {
47775 // The 'and' mask must be composed of power-of-2 constants.
47776 SDValue And = Cond.getOperand(0);
47777 auto *C = isConstOrConstSplat(And.getOperand(1));
47778 if (C && C->getAPIntValue().isPowerOf2()) {
47779 // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
47781 DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
47782 return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
47785 // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
47786 // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
47787 // 16-bit lacks a proper blendv.
47788 unsigned EltBitWidth = VT.getScalarSizeInBits();
47789 bool CanShiftBlend =
47790 TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
47791 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
47792 (Subtarget.hasXOP()));
47793 if (CanShiftBlend &&
47794 ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
47795 return C->getAPIntValue().isPowerOf2();
47797 // Create a left-shift constant to get the mask bits over to the sign-bit.
47798 SDValue Mask = And.getOperand(1);
47799 SmallVector<int, 32> ShlVals;
47800 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
47801 auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
47802 ShlVals.push_back(EltBitWidth - 1 -
47803 MaskVal->getAPIntValue().exactLogBase2());
47805 // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
47806 SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
47807 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
47809 DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
47810 return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
47818 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
47820 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
47821 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
47822 /// Note that this is only legal for some op/cc combinations.
47823 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
47825 const X86Subtarget &Subtarget) {
47826 // This combine only operates on CMP-like nodes.
47827 if (!(Cmp.getOpcode() == X86ISD::CMP ||
47828 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
47831 // Can't replace the cmp if it has more uses than the one we're looking at.
47832 // FIXME: We would like to be able to handle this, but would need to make sure
47833 // all uses were updated.
47834 if (!Cmp.hasOneUse())
47837 // This only applies to variations of the common case:
47838 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
47839 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
47840 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
47841 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
47842 // Using the proper condcodes (see below), overflow is checked for.
47844 // FIXME: We can generalize both constraints:
47845 // - XOR/OR/AND (if they were made to survive AtomicExpand)
47847 // if the result is compared.
47849 SDValue CmpLHS = Cmp.getOperand(0);
47850 SDValue CmpRHS = Cmp.getOperand(1);
47851 EVT CmpVT = CmpLHS.getValueType();
47853 if (!CmpLHS.hasOneUse())
47856 unsigned Opc = CmpLHS.getOpcode();
47857 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
47860 SDValue OpRHS = CmpLHS.getOperand(2);
47861 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
47865 APInt Addend = OpRHSC->getAPIntValue();
47866 if (Opc == ISD::ATOMIC_LOAD_SUB)
47869 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
47873 APInt Comparison = CmpRHSC->getAPIntValue();
47874 APInt NegAddend = -Addend;
47876 // See if we can adjust the CC to make the comparison match the negated
47878 if (Comparison != NegAddend) {
47879 APInt IncComparison = Comparison + 1;
47880 if (IncComparison == NegAddend) {
47881 if (CC == X86::COND_A && !Comparison.isMaxValue()) {
47882 Comparison = IncComparison;
47884 } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
47885 Comparison = IncComparison;
47889 APInt DecComparison = Comparison - 1;
47890 if (DecComparison == NegAddend) {
47891 if (CC == X86::COND_AE && !Comparison.isMinValue()) {
47892 Comparison = DecComparison;
47894 } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
47895 Comparison = DecComparison;
47901 // If the addend is the negation of the comparison value, then we can do
47902 // a full comparison by emitting the atomic arithmetic as a locked sub.
47903 if (Comparison == NegAddend) {
47904 // The CC is fine, but we need to rewrite the LHS of the comparison as an
47906 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
47907 auto AtomicSub = DAG.getAtomic(
47908 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
47909 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
47910 /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
47911 AN->getMemOperand());
47912 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
47913 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
47914 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
47918 // We can handle comparisons with zero in a number of cases by manipulating
47920 if (!Comparison.isZero())
47923 if (CC == X86::COND_S && Addend == 1)
47925 else if (CC == X86::COND_NS && Addend == 1)
47927 else if (CC == X86::COND_G && Addend == -1)
47929 else if (CC == X86::COND_LE && Addend == -1)
47934 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
47935 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
47936 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
47940 // Check whether a boolean test is testing a boolean value generated by
47941 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
47944 // Simplify the following patterns:
47945 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
47946 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
47947 // to (Op EFLAGS Cond)
47949 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
47950 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
47951 // to (Op EFLAGS !Cond)
47953 // where Op could be BRCOND or CMOV.
47955 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
47956 // This combine only operates on CMP-like nodes.
47957 if (!(Cmp.getOpcode() == X86ISD::CMP ||
47958 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
47961 // Quit if not used as a boolean value.
47962 if (CC != X86::COND_E && CC != X86::COND_NE)
47965 // Check CMP operands. One of them should be 0 or 1 and the other should be
47966 // an SetCC or extended from it.
47967 SDValue Op1 = Cmp.getOperand(0);
47968 SDValue Op2 = Cmp.getOperand(1);
47971 const ConstantSDNode* C = nullptr;
47972 bool needOppositeCond = (CC == X86::COND_E);
47973 bool checkAgainstTrue = false; // Is it a comparison against 1?
47975 if ((C = dyn_cast<ConstantSDNode>(Op1)))
47977 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
47979 else // Quit if all operands are not constants.
47982 if (C->getZExtValue() == 1) {
47983 needOppositeCond = !needOppositeCond;
47984 checkAgainstTrue = true;
47985 } else if (C->getZExtValue() != 0)
47986 // Quit if the constant is neither 0 or 1.
47989 bool truncatedToBoolWithAnd = false;
47990 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
47991 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
47992 SetCC.getOpcode() == ISD::TRUNCATE ||
47993 SetCC.getOpcode() == ISD::AND) {
47994 if (SetCC.getOpcode() == ISD::AND) {
47996 if (isOneConstant(SetCC.getOperand(0)))
47998 if (isOneConstant(SetCC.getOperand(1)))
48002 SetCC = SetCC.getOperand(OpIdx);
48003 truncatedToBoolWithAnd = true;
48005 SetCC = SetCC.getOperand(0);
48008 switch (SetCC.getOpcode()) {
48009 case X86ISD::SETCC_CARRY:
48010 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
48011 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
48012 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
48013 // truncated to i1 using 'and'.
48014 if (checkAgainstTrue && !truncatedToBoolWithAnd)
48016 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
48017 "Invalid use of SETCC_CARRY!");
48019 case X86ISD::SETCC:
48020 // Set the condition code or opposite one if necessary.
48021 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
48022 if (needOppositeCond)
48023 CC = X86::GetOppositeBranchCondition(CC);
48024 return SetCC.getOperand(1);
48025 case X86ISD::CMOV: {
48026 // Check whether false/true value has canonical one, i.e. 0 or 1.
48027 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
48028 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
48029 // Quit if true value is not a constant.
48032 // Quit if false value is not a constant.
48034 SDValue Op = SetCC.getOperand(0);
48035 // Skip 'zext' or 'trunc' node.
48036 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
48037 Op.getOpcode() == ISD::TRUNCATE)
48038 Op = Op.getOperand(0);
48039 // A special case for rdrand/rdseed, where 0 is set if false cond is
48041 if ((Op.getOpcode() != X86ISD::RDRAND &&
48042 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
48045 // Quit if false value is not the constant 0 or 1.
48046 bool FValIsFalse = true;
48047 if (FVal && FVal->getZExtValue() != 0) {
48048 if (FVal->getZExtValue() != 1)
48050 // If FVal is 1, opposite cond is needed.
48051 needOppositeCond = !needOppositeCond;
48052 FValIsFalse = false;
48054 // Quit if TVal is not the constant opposite of FVal.
48055 if (FValIsFalse && TVal->getZExtValue() != 1)
48057 if (!FValIsFalse && TVal->getZExtValue() != 0)
48059 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
48060 if (needOppositeCond)
48061 CC = X86::GetOppositeBranchCondition(CC);
48062 return SetCC.getOperand(3);
48069 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
48071 /// (X86or (X86setcc) (X86setcc))
48072 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
48073 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
48074 X86::CondCode &CC1, SDValue &Flags,
48076 if (Cond->getOpcode() == X86ISD::CMP) {
48077 if (!isNullConstant(Cond->getOperand(1)))
48080 Cond = Cond->getOperand(0);
48085 SDValue SetCC0, SetCC1;
48086 switch (Cond->getOpcode()) {
48087 default: return false;
48094 SetCC0 = Cond->getOperand(0);
48095 SetCC1 = Cond->getOperand(1);
48099 // Make sure we have SETCC nodes, using the same flags value.
48100 if (SetCC0.getOpcode() != X86ISD::SETCC ||
48101 SetCC1.getOpcode() != X86ISD::SETCC ||
48102 SetCC0->getOperand(1) != SetCC1->getOperand(1))
48105 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
48106 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
48107 Flags = SetCC0->getOperand(1);
48111 // When legalizing carry, we create carries via add X, -1
48112 // If that comes from an actual carry, via setcc, we use the
48114 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
48115 if (EFLAGS.getOpcode() == X86ISD::ADD) {
48116 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
48117 bool FoundAndLSB = false;
48118 SDValue Carry = EFLAGS.getOperand(0);
48119 while (Carry.getOpcode() == ISD::TRUNCATE ||
48120 Carry.getOpcode() == ISD::ZERO_EXTEND ||
48121 (Carry.getOpcode() == ISD::AND &&
48122 isOneConstant(Carry.getOperand(1)))) {
48123 FoundAndLSB |= Carry.getOpcode() == ISD::AND;
48124 Carry = Carry.getOperand(0);
48126 if (Carry.getOpcode() == X86ISD::SETCC ||
48127 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
48128 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
48129 uint64_t CarryCC = Carry.getConstantOperandVal(0);
48130 SDValue CarryOp1 = Carry.getOperand(1);
48131 if (CarryCC == X86::COND_B)
48133 if (CarryCC == X86::COND_A) {
48134 // Try to convert COND_A into COND_B in an attempt to facilitate
48135 // materializing "setb reg".
48137 // Do not flip "e > c", where "c" is a constant, because Cmp
48138 // instruction cannot take an immediate as its first operand.
48140 if (CarryOp1.getOpcode() == X86ISD::SUB &&
48141 CarryOp1.getNode()->hasOneUse() &&
48142 CarryOp1.getValueType().isInteger() &&
48143 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
48144 SDValue SubCommute =
48145 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
48146 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
48147 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
48150 // If this is a check of the z flag of an add with 1, switch to the
48152 if (CarryCC == X86::COND_E &&
48153 CarryOp1.getOpcode() == X86ISD::ADD &&
48154 isOneConstant(CarryOp1.getOperand(1)))
48156 } else if (FoundAndLSB) {
48158 SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
48159 if (Carry.getOpcode() == ISD::SRL) {
48160 BitNo = Carry.getOperand(1);
48161 Carry = Carry.getOperand(0);
48163 return getBT(Carry, BitNo, DL, DAG);
48171 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
48172 /// to avoid the inversion.
48173 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
48175 const X86Subtarget &Subtarget) {
48176 // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
48177 if (EFLAGS.getOpcode() != X86ISD::PTEST &&
48178 EFLAGS.getOpcode() != X86ISD::TESTP)
48181 // PTEST/TESTP sets EFLAGS as:
48182 // TESTZ: ZF = (Op0 & Op1) == 0
48183 // TESTC: CF = (~Op0 & Op1) == 0
48184 // TESTNZC: ZF == 0 && CF == 0
48185 MVT VT = EFLAGS.getSimpleValueType();
48186 SDValue Op0 = EFLAGS.getOperand(0);
48187 SDValue Op1 = EFLAGS.getOperand(1);
48188 MVT OpVT = Op0.getSimpleValueType();
48190 // TEST*(~X,Y) == TEST*(X,Y)
48191 if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
48192 X86::CondCode InvCC;
48196 InvCC = X86::COND_E;
48199 // !testc -> !testz.
48200 InvCC = X86::COND_NE;
48204 InvCC = X86::COND_B;
48207 // !testz -> !testc.
48208 InvCC = X86::COND_AE;
48212 // testnzc -> testnzc (no change).
48216 InvCC = X86::COND_INVALID;
48220 if (InvCC != X86::COND_INVALID) {
48222 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
48223 DAG.getBitcast(OpVT, NotOp0), Op1);
48227 if (CC == X86::COND_B || CC == X86::COND_AE) {
48228 // TESTC(X,~X) == TESTC(X,-1)
48229 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
48230 if (peekThroughBitcasts(NotOp1) == peekThroughBitcasts(Op0)) {
48232 return DAG.getNode(
48233 EFLAGS.getOpcode(), DL, VT, DAG.getBitcast(OpVT, NotOp1),
48234 DAG.getBitcast(OpVT,
48235 DAG.getAllOnesConstant(DL, NotOp1.getValueType())));
48240 if (CC == X86::COND_E || CC == X86::COND_NE) {
48241 // TESTZ(X,~Y) == TESTC(Y,X)
48242 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
48243 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
48244 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
48245 DAG.getBitcast(OpVT, NotOp1), Op0);
48249 SDValue BC = peekThroughBitcasts(Op0);
48250 EVT BCVT = BC.getValueType();
48252 // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
48253 if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
48254 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
48255 DAG.getBitcast(OpVT, BC.getOperand(0)),
48256 DAG.getBitcast(OpVT, BC.getOperand(1)));
48259 // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
48260 if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
48261 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
48262 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
48263 DAG.getBitcast(OpVT, BC.getOperand(0)),
48264 DAG.getBitcast(OpVT, BC.getOperand(1)));
48267 // If every element is an all-sign value, see if we can use TESTP/MOVMSK
48268 // to more efficiently extract the sign bits and compare that.
48269 // TODO: Handle TESTC with comparison inversion.
48270 // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
48271 // TESTP/MOVMSK combines to make sure its never worse than PTEST?
48272 if (BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT)) {
48273 unsigned EltBits = BCVT.getScalarSizeInBits();
48274 if (DAG.ComputeNumSignBits(BC) == EltBits) {
48275 assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
48276 APInt SignMask = APInt::getSignMask(EltBits);
48277 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48279 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
48280 // For vXi16 cases we need to use pmovmksb and extract every other
48283 if ((EltBits == 32 || EltBits == 64) && Subtarget.hasAVX()) {
48284 MVT FloatSVT = MVT::getFloatingPointVT(EltBits);
48286 MVT::getVectorVT(FloatSVT, OpVT.getSizeInBits() / EltBits);
48287 Res = DAG.getBitcast(FloatVT, Res);
48288 return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Res, Res);
48289 } else if (EltBits == 16) {
48290 MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
48291 Res = DAG.getBitcast(MovmskVT, Res);
48292 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
48293 Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
48294 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
48296 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
48298 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
48299 DAG.getConstant(0, DL, MVT::i32));
48305 // TESTZ(-1,X) == TESTZ(X,X)
48306 if (ISD::isBuildVectorAllOnes(Op0.getNode()))
48307 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
48309 // TESTZ(X,-1) == TESTZ(X,X)
48310 if (ISD::isBuildVectorAllOnes(Op1.getNode()))
48311 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
48313 // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
48314 // TODO: Add COND_NE handling?
48315 if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
48316 SDValue Src0 = peekThroughBitcasts(Op0);
48317 SDValue Src1 = peekThroughBitcasts(Op1);
48318 if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
48319 Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
48320 peekThroughBitcasts(Src0.getOperand(1)), true);
48321 Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
48322 peekThroughBitcasts(Src1.getOperand(1)), true);
48323 if (Src0 && Src1) {
48324 MVT OpVT2 = OpVT.getDoubleNumVectorElementsVT();
48325 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
48326 DAG.getBitcast(OpVT2, Src0),
48327 DAG.getBitcast(OpVT2, Src1));
48336 // Attempt to simplify the MOVMSK input based on the comparison type.
48337 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
48339 const X86Subtarget &Subtarget) {
48340 // Handle eq/ne against zero (any_of).
48341 // Handle eq/ne against -1 (all_of).
48342 if (!(CC == X86::COND_E || CC == X86::COND_NE))
48344 if (EFLAGS.getValueType() != MVT::i32)
48346 unsigned CmpOpcode = EFLAGS.getOpcode();
48347 if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
48349 auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
48352 const APInt &CmpVal = CmpConstant->getAPIntValue();
48354 SDValue CmpOp = EFLAGS.getOperand(0);
48355 unsigned CmpBits = CmpOp.getValueSizeInBits();
48356 assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
48358 // Peek through any truncate.
48359 if (CmpOp.getOpcode() == ISD::TRUNCATE)
48360 CmpOp = CmpOp.getOperand(0);
48362 // Bail if we don't find a MOVMSK.
48363 if (CmpOp.getOpcode() != X86ISD::MOVMSK)
48366 SDValue Vec = CmpOp.getOperand(0);
48367 MVT VecVT = Vec.getSimpleValueType();
48368 assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
48369 "Unexpected MOVMSK operand");
48370 unsigned NumElts = VecVT.getVectorNumElements();
48371 unsigned NumEltBits = VecVT.getScalarSizeInBits();
48373 bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
48374 bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
48375 NumElts <= CmpBits && CmpVal.isMask(NumElts);
48376 if (!IsAnyOf && !IsAllOf)
48379 // TODO: Check more combining cases for me.
48380 // Here we check the cmp use number to decide do combining or not.
48381 // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
48382 // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
48383 bool IsOneUse = CmpOp.getNode()->hasOneUse();
48385 // See if we can peek through to a vector with a wider element type, if the
48386 // signbits extend down to all the sub-elements as well.
48387 // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
48388 // potential SimplifyDemandedBits/Elts cases.
48389 // If we looked through a truncate that discard bits, we can't do this
48391 // FIXME: We could do this transform for truncates that discarded bits by
48392 // inserting an AND mask between the new MOVMSK and the CMP.
48393 if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
48394 SDValue BC = peekThroughBitcasts(Vec);
48395 MVT BCVT = BC.getSimpleValueType();
48396 unsigned BCNumElts = BCVT.getVectorNumElements();
48397 unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
48398 if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
48399 BCNumEltBits > NumEltBits &&
48400 DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
48402 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
48403 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
48404 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
48405 DAG.getConstant(CmpMask, DL, MVT::i32));
48409 // MOVMSK(CONCAT(X,Y)) == 0 -> MOVMSK(OR(X,Y)).
48410 // MOVMSK(CONCAT(X,Y)) != 0 -> MOVMSK(OR(X,Y)).
48411 // MOVMSK(CONCAT(X,Y)) == -1 -> MOVMSK(AND(X,Y)).
48412 // MOVMSK(CONCAT(X,Y)) != -1 -> MOVMSK(AND(X,Y)).
48413 if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
48414 SmallVector<SDValue> Ops;
48415 if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
48418 EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
48419 APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
48420 SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
48421 DAG.getBitcast(SubVT, Ops[0]),
48422 DAG.getBitcast(SubVT, Ops[1]));
48423 V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
48424 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
48425 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
48426 DAG.getConstant(CmpMask, DL, MVT::i32));
48430 // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
48431 // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
48432 // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(XOR(X,Y),XOR(X,Y)).
48433 // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(XOR(X,Y),XOR(X,Y)).
48434 if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
48435 MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
48436 SDValue BC = peekThroughBitcasts(Vec);
48437 // Ensure MOVMSK was testing every signbit of BC.
48438 if (BC.getValueType().getVectorNumElements() <= NumElts) {
48439 if (BC.getOpcode() == X86ISD::PCMPEQ) {
48440 SDValue V = DAG.getNode(ISD::XOR, SDLoc(BC), BC.getValueType(),
48441 BC.getOperand(0), BC.getOperand(1));
48442 V = DAG.getBitcast(TestVT, V);
48443 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
48445 // Check for 256-bit split vector cases.
48446 if (BC.getOpcode() == ISD::AND &&
48447 BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
48448 BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
48449 SDValue LHS = BC.getOperand(0);
48450 SDValue RHS = BC.getOperand(1);
48451 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), LHS.getValueType(),
48452 LHS.getOperand(0), LHS.getOperand(1));
48453 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), RHS.getValueType(),
48454 RHS.getOperand(0), RHS.getOperand(1));
48455 LHS = DAG.getBitcast(TestVT, LHS);
48456 RHS = DAG.getBitcast(TestVT, RHS);
48457 SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
48458 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
48463 // See if we can avoid a PACKSS by calling MOVMSK on the sources.
48464 // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
48465 // sign bits prior to the comparison with zero unless we know that
48466 // the vXi16 splats the sign bit down to the lower i8 half.
48467 // TODO: Handle all_of patterns.
48468 if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
48469 SDValue VecOp0 = Vec.getOperand(0);
48470 SDValue VecOp1 = Vec.getOperand(1);
48471 bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
48472 bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
48473 // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
48474 if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
48476 SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
48477 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48478 Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
48480 Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
48481 DAG.getConstant(0xAAAA, DL, MVT::i16));
48483 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
48484 DAG.getConstant(0, DL, MVT::i16));
48486 // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
48487 // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
48488 if (CmpBits >= 16 && Subtarget.hasInt256() &&
48489 (IsAnyOf || (SignExt0 && SignExt1))) {
48490 if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
48492 SDValue Result = peekThroughBitcasts(Src);
48493 if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
48494 Result.getValueType().getVectorNumElements() <= NumElts) {
48495 SDValue V = DAG.getNode(ISD::XOR, DL, Result.getValueType(),
48496 Result.getOperand(0), Result.getOperand(1));
48497 V = DAG.getBitcast(MVT::v4i64, V);
48498 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
48500 Result = DAG.getBitcast(MVT::v32i8, Result);
48501 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48502 unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
48503 if (!SignExt0 || !SignExt1) {
48505 "Only perform v16i16 signmasks for any_of patterns");
48506 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
48507 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
48509 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
48510 DAG.getConstant(CmpMask, DL, MVT::i32));
48515 // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
48516 SmallVector<int, 32> ShuffleMask;
48517 SmallVector<SDValue, 2> ShuffleInputs;
48518 if (NumElts <= CmpBits &&
48519 getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
48520 ShuffleMask, DAG) &&
48521 ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
48522 ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
48523 unsigned NumShuffleElts = ShuffleMask.size();
48524 APInt DemandedElts = APInt::getZero(NumShuffleElts);
48525 for (int M : ShuffleMask) {
48526 assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
48527 DemandedElts.setBit(M);
48529 if (DemandedElts.isAllOnes()) {
48531 SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
48532 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48534 DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
48535 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
48536 EFLAGS.getOperand(1));
48540 // MOVMSKPS(V) !=/== 0 -> TESTPS(V,V)
48541 // MOVMSKPD(V) !=/== 0 -> TESTPD(V,V)
48542 // MOVMSKPS(V) !=/== -1 -> TESTPS(V,V)
48543 // MOVMSKPD(V) !=/== -1 -> TESTPD(V,V)
48544 // iff every element is referenced.
48545 if (NumElts <= CmpBits && Subtarget.hasAVX() &&
48546 !Subtarget.preferMovmskOverVTest() && IsOneUse &&
48547 (NumEltBits == 32 || NumEltBits == 64)) {
48549 MVT FloatSVT = MVT::getFloatingPointVT(NumEltBits);
48550 MVT FloatVT = MVT::getVectorVT(FloatSVT, NumElts);
48551 MVT IntVT = FloatVT.changeVectorElementTypeToInteger();
48553 SDValue RHS = IsAnyOf ? Vec : DAG.getAllOnesConstant(DL, IntVT);
48554 CC = IsAnyOf ? CC : (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
48555 return DAG.getNode(X86ISD::TESTP, DL, MVT::i32,
48556 DAG.getBitcast(FloatVT, LHS),
48557 DAG.getBitcast(FloatVT, RHS));
48563 /// Optimize an EFLAGS definition used according to the condition code \p CC
48564 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
48565 /// uses of chain values.
48566 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
48568 const X86Subtarget &Subtarget) {
48569 if (CC == X86::COND_B)
48570 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
48573 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
48576 if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
48579 if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
48582 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
48585 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
48586 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
48587 TargetLowering::DAGCombinerInfo &DCI,
48588 const X86Subtarget &Subtarget) {
48591 SDValue FalseOp = N->getOperand(0);
48592 SDValue TrueOp = N->getOperand(1);
48593 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
48594 SDValue Cond = N->getOperand(3);
48596 // cmov X, X, ?, ? --> X
48597 if (TrueOp == FalseOp)
48600 // Try to simplify the EFLAGS and condition code operands.
48601 // We can't always do this as FCMOV only supports a subset of X86 cond.
48602 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
48603 if (!(FalseOp.getValueType() == MVT::f80 ||
48604 (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
48605 (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
48606 !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
48607 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
48609 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
48613 // If this is a select between two integer constants, try to do some
48614 // optimizations. Note that the operands are ordered the opposite of SELECT
48616 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
48617 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
48618 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
48619 // larger than FalseC (the false value).
48620 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
48621 CC = X86::GetOppositeBranchCondition(CC);
48622 std::swap(TrueC, FalseC);
48623 std::swap(TrueOp, FalseOp);
48626 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
48627 // This is efficient for any integer data type (including i8/i16) and
48629 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
48630 Cond = getSETCC(CC, Cond, DL, DAG);
48632 // Zero extend the condition if needed.
48633 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
48635 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
48636 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
48637 DAG.getConstant(ShAmt, DL, MVT::i8));
48641 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
48642 // for any integer data type, including i8/i16.
48643 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
48644 Cond = getSETCC(CC, Cond, DL, DAG);
48646 // Zero extend the condition if needed.
48647 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
48648 FalseC->getValueType(0), Cond);
48649 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
48650 SDValue(FalseC, 0));
48654 // Optimize cases that will turn into an LEA instruction. This requires
48655 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
48656 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
48657 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
48658 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
48659 "Implicit constant truncation");
48661 bool isFastMultiplier = false;
48662 if (Diff.ult(10)) {
48663 switch (Diff.getZExtValue()) {
48665 case 1: // result = add base, cond
48666 case 2: // result = lea base( , cond*2)
48667 case 3: // result = lea base(cond, cond*2)
48668 case 4: // result = lea base( , cond*4)
48669 case 5: // result = lea base(cond, cond*4)
48670 case 8: // result = lea base( , cond*8)
48671 case 9: // result = lea base(cond, cond*8)
48672 isFastMultiplier = true;
48677 if (isFastMultiplier) {
48678 Cond = getSETCC(CC, Cond, DL ,DAG);
48679 // Zero extend the condition if needed.
48680 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
48682 // Scale the condition by the difference.
48684 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
48685 DAG.getConstant(Diff, DL, Cond.getValueType()));
48687 // Add the base if non-zero.
48688 if (FalseC->getAPIntValue() != 0)
48689 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
48690 SDValue(FalseC, 0));
48697 // Handle these cases:
48698 // (select (x != c), e, c) -> select (x != c), e, x),
48699 // (select (x == c), c, e) -> select (x == c), x, e)
48700 // where the c is an integer constant, and the "select" is the combination
48701 // of CMOV and CMP.
48703 // The rationale for this change is that the conditional-move from a constant
48704 // needs two instructions, however, conditional-move from a register needs
48705 // only one instruction.
48707 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
48708 // some instruction-combining opportunities. This opt needs to be
48709 // postponed as late as possible.
48711 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
48712 // the DCI.xxxx conditions are provided to postpone the optimization as
48713 // late as possible.
48715 ConstantSDNode *CmpAgainst = nullptr;
48716 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
48717 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
48718 !isa<ConstantSDNode>(Cond.getOperand(0))) {
48720 if (CC == X86::COND_NE &&
48721 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
48722 CC = X86::GetOppositeBranchCondition(CC);
48723 std::swap(TrueOp, FalseOp);
48726 if (CC == X86::COND_E &&
48727 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
48728 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
48729 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
48730 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
48737 // (cmov 1 T (uge T 2))
48741 // (adc T 0 (sub T 1))
48742 if (CC == X86::COND_AE && isOneConstant(FalseOp) &&
48743 Cond.getOpcode() == X86ISD::SUB && Cond->hasOneUse()) {
48744 SDValue Cond0 = Cond.getOperand(0);
48745 if (Cond0.getOpcode() == ISD::TRUNCATE)
48746 Cond0 = Cond0.getOperand(0);
48747 auto *Sub1C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
48748 if (Cond0 == TrueOp && Sub1C && Sub1C->getZExtValue() == 2) {
48749 EVT CondVT = Cond->getValueType(0);
48750 EVT OuterVT = N->getValueType(0);
48751 // Subtract 1 and generate a carry.
48753 DAG.getNode(X86ISD::SUB, DL, Cond->getVTList(), Cond.getOperand(0),
48754 DAG.getConstant(1, DL, CondVT));
48755 SDValue EFLAGS(NewSub.getNode(), 1);
48756 return DAG.getNode(X86ISD::ADC, DL, DAG.getVTList(OuterVT, MVT::i32),
48757 TrueOp, DAG.getConstant(0, DL, OuterVT), EFLAGS);
48761 // Fold and/or of setcc's to double CMOV:
48762 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
48763 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
48765 // This combine lets us generate:
48766 // cmovcc1 (jcc1 if we don't have CMOV)
48772 // cmovne (jne if we don't have CMOV)
48773 // When we can't use the CMOV instruction, it might increase branch
48775 // When we can use CMOV, or when there is no mispredict, this improves
48776 // throughput and reduces register pressure.
48778 if (CC == X86::COND_NE) {
48780 X86::CondCode CC0, CC1;
48782 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
48784 std::swap(FalseOp, TrueOp);
48785 CC0 = X86::GetOppositeBranchCondition(CC0);
48786 CC1 = X86::GetOppositeBranchCondition(CC1);
48789 SDValue LOps[] = {FalseOp, TrueOp,
48790 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
48791 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
48792 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
48794 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
48799 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
48800 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
48801 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
48802 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
48803 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
48804 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
48805 SDValue Add = TrueOp;
48806 SDValue Const = FalseOp;
48807 // Canonicalize the condition code for easier matching and output.
48808 if (CC == X86::COND_E)
48809 std::swap(Add, Const);
48811 // We might have replaced the constant in the cmov with the LHS of the
48812 // compare. If so change it to the RHS of the compare.
48813 if (Const == Cond.getOperand(0))
48814 Const = Cond.getOperand(1);
48816 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
48817 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
48818 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
48819 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
48820 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
48821 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
48822 EVT VT = N->getValueType(0);
48823 // This should constant fold.
48824 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
48826 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
48827 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
48828 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
48835 /// Different mul shrinking modes.
48836 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
48838 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
48839 EVT VT = N->getOperand(0).getValueType();
48840 if (VT.getScalarSizeInBits() != 32)
48843 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
48844 unsigned SignBits[2] = {1, 1};
48845 bool IsPositive[2] = {false, false};
48846 for (unsigned i = 0; i < 2; i++) {
48847 SDValue Opd = N->getOperand(i);
48849 SignBits[i] = DAG.ComputeNumSignBits(Opd);
48850 IsPositive[i] = DAG.SignBitIsZero(Opd);
48853 bool AllPositive = IsPositive[0] && IsPositive[1];
48854 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
48855 // When ranges are from -128 ~ 127, use MULS8 mode.
48856 if (MinSignBits >= 25)
48857 Mode = ShrinkMode::MULS8;
48858 // When ranges are from 0 ~ 255, use MULU8 mode.
48859 else if (AllPositive && MinSignBits >= 24)
48860 Mode = ShrinkMode::MULU8;
48861 // When ranges are from -32768 ~ 32767, use MULS16 mode.
48862 else if (MinSignBits >= 17)
48863 Mode = ShrinkMode::MULS16;
48864 // When ranges are from 0 ~ 65535, use MULU16 mode.
48865 else if (AllPositive && MinSignBits >= 16)
48866 Mode = ShrinkMode::MULU16;
48872 /// When the operands of vector mul are extended from smaller size values,
48873 /// like i8 and i16, the type of mul may be shrinked to generate more
48874 /// efficient code. Two typical patterns are handled:
48876 /// %2 = sext/zext <N x i8> %1 to <N x i32>
48877 /// %4 = sext/zext <N x i8> %3 to <N x i32>
48878 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
48879 /// %5 = mul <N x i32> %2, %4
48882 /// %2 = zext/sext <N x i16> %1 to <N x i32>
48883 /// %4 = zext/sext <N x i16> %3 to <N x i32>
48884 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
48885 /// %5 = mul <N x i32> %2, %4
48887 /// There are four mul shrinking modes:
48888 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
48889 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
48890 /// generate pmullw+sext32 for it (MULS8 mode).
48891 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
48892 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
48893 /// generate pmullw+zext32 for it (MULU8 mode).
48894 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
48895 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
48896 /// generate pmullw+pmulhw for it (MULS16 mode).
48897 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
48898 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
48899 /// generate pmullw+pmulhuw for it (MULU16 mode).
48900 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
48901 const X86Subtarget &Subtarget) {
48902 // Check for legality
48903 // pmullw/pmulhw are not supported by SSE.
48904 if (!Subtarget.hasSSE2())
48907 // Check for profitability
48908 // pmulld is supported since SSE41. It is better to use pmulld
48909 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
48911 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
48912 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
48916 if (!canReduceVMulWidth(N, DAG, Mode))
48920 SDValue N0 = N->getOperand(0);
48921 SDValue N1 = N->getOperand(1);
48922 EVT VT = N->getOperand(0).getValueType();
48923 unsigned NumElts = VT.getVectorNumElements();
48924 if ((NumElts % 2) != 0)
48927 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
48929 // Shrink the operands of mul.
48930 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
48931 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
48933 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
48934 // lower part is needed.
48935 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
48936 if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
48937 return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
48938 : ISD::SIGN_EXTEND,
48941 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
48942 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
48943 // the higher part is also needed.
48945 DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
48946 ReducedVT, NewN0, NewN1);
48948 // Repack the lower part and higher part result of mul into a wider
48950 // Generate shuffle functioning as punpcklwd.
48951 SmallVector<int, 16> ShuffleMask(NumElts);
48952 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
48953 ShuffleMask[2 * i] = i;
48954 ShuffleMask[2 * i + 1] = i + NumElts;
48957 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
48958 ResLo = DAG.getBitcast(ResVT, ResLo);
48959 // Generate shuffle functioning as punpckhwd.
48960 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
48961 ShuffleMask[2 * i] = i + NumElts / 2;
48962 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
48965 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
48966 ResHi = DAG.getBitcast(ResVT, ResHi);
48967 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
48970 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
48971 EVT VT, const SDLoc &DL) {
48973 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
48974 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
48975 DAG.getConstant(Mult, DL, VT));
48976 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
48977 DAG.getConstant(Shift, DL, MVT::i8));
48978 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
48983 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
48984 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
48985 DAG.getConstant(Mul1, DL, VT));
48986 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
48987 DAG.getConstant(Mul2, DL, VT));
48988 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
48997 // mul x, 11 => add ((shl (mul x, 5), 1), x)
48998 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
49000 // mul x, 21 => add ((shl (mul x, 5), 2), x)
49001 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
49003 // mul x, 41 => add ((shl (mul x, 5), 3), x)
49004 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
49006 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
49007 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
49008 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
49010 // mul x, 19 => add ((shl (mul x, 9), 1), x)
49011 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
49013 // mul x, 37 => add ((shl (mul x, 9), 2), x)
49014 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
49016 // mul x, 73 => add ((shl (mul x, 9), 3), x)
49017 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
49019 // mul x, 13 => add ((shl (mul x, 3), 2), x)
49020 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
49022 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
49023 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
49025 // mul x, 26 => add ((mul (mul x, 5), 5), x)
49026 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
49028 // mul x, 28 => add ((mul (mul x, 9), 3), x)
49029 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
49031 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
49032 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
49033 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
49036 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
49037 // by a single LEA.
49038 // First check if this a sum of two power of 2s because that's easy. Then
49039 // count how many zeros are up to the first bit.
49040 // TODO: We can do this even without LEA at a cost of two shifts and an add.
49041 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
49042 unsigned ScaleShift = llvm::countr_zero(MulAmt);
49043 if (ScaleShift >= 1 && ScaleShift < 4) {
49044 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
49045 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49046 DAG.getConstant(ShiftAmt, DL, MVT::i8));
49047 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49048 DAG.getConstant(ScaleShift, DL, MVT::i8));
49049 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
49056 // If the upper 17 bits of either element are zero and the other element are
49057 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
49058 // PMULLD, except on KNL.
49059 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
49060 const X86Subtarget &Subtarget) {
49061 if (!Subtarget.hasSSE2())
49064 if (Subtarget.isPMADDWDSlow())
49067 EVT VT = N->getValueType(0);
49069 // Only support vXi32 vectors.
49070 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
49073 // Make sure the type is legal or can split/widen to a legal type.
49074 // With AVX512 but without BWI, we would need to split v32i16.
49075 unsigned NumElts = VT.getVectorNumElements();
49076 if (NumElts == 1 || !isPowerOf2_32(NumElts))
49079 // With AVX512 but without BWI, we would need to split v32i16.
49080 if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
49083 SDValue N0 = N->getOperand(0);
49084 SDValue N1 = N->getOperand(1);
49086 // If we are zero/sign extending two steps without SSE4.1, its better to
49087 // reduce the vmul width instead.
49088 if (!Subtarget.hasSSE41() &&
49089 (((N0.getOpcode() == ISD::ZERO_EXTEND &&
49090 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
49091 (N1.getOpcode() == ISD::ZERO_EXTEND &&
49092 N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
49093 ((N0.getOpcode() == ISD::SIGN_EXTEND &&
49094 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
49095 (N1.getOpcode() == ISD::SIGN_EXTEND &&
49096 N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
49099 // If we are sign extending a wide vector without SSE4.1, its better to reduce
49100 // the vmul width instead.
49101 if (!Subtarget.hasSSE41() &&
49102 (N0.getOpcode() == ISD::SIGN_EXTEND &&
49103 N0.getOperand(0).getValueSizeInBits() > 128) &&
49104 (N1.getOpcode() == ISD::SIGN_EXTEND &&
49105 N1.getOperand(0).getValueSizeInBits() > 128))
49108 // Sign bits must extend down to the lowest i16.
49109 if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
49110 DAG.ComputeMaxSignificantBits(N0) > 16)
49113 // At least one of the elements must be zero in the upper 17 bits, or can be
49114 // safely made zero without altering the final result.
49115 auto GetZeroableOp = [&](SDValue Op) {
49116 APInt Mask17 = APInt::getHighBitsSet(32, 17);
49117 if (DAG.MaskedValueIsZero(Op, Mask17))
49119 // Mask off upper 16-bits of sign-extended constants.
49120 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
49121 return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
49122 DAG.getConstant(0xFFFF, SDLoc(N), VT));
49123 if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
49124 SDValue Src = Op.getOperand(0);
49125 // Convert sext(vXi16) to zext(vXi16).
49126 if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
49127 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
49128 // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
49129 // which will expand the extension.
49130 if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
49131 EVT ExtVT = VT.changeVectorElementType(MVT::i16);
49132 Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
49133 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
49136 // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
49137 if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
49138 N->isOnlyUserOf(Op.getNode())) {
49139 SDValue Src = Op.getOperand(0);
49140 if (Src.getScalarValueSizeInBits() == 16)
49141 return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
49143 // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
49144 if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
49145 N->isOnlyUserOf(Op.getNode())) {
49146 return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
49151 SDValue ZeroN0 = GetZeroableOp(N0);
49152 SDValue ZeroN1 = GetZeroableOp(N1);
49153 if (!ZeroN0 && !ZeroN1)
49155 N0 = ZeroN0 ? ZeroN0 : N0;
49156 N1 = ZeroN1 ? ZeroN1 : N1;
49158 // Use SplitOpsAndApply to handle AVX splitting.
49159 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
49160 ArrayRef<SDValue> Ops) {
49161 MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
49162 MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
49163 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
49164 DAG.getBitcast(OpVT, Ops[0]),
49165 DAG.getBitcast(OpVT, Ops[1]));
49167 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
49171 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
49172 const X86Subtarget &Subtarget) {
49173 if (!Subtarget.hasSSE2())
49176 EVT VT = N->getValueType(0);
49178 // Only support vXi64 vectors.
49179 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
49180 VT.getVectorNumElements() < 2 ||
49181 !isPowerOf2_32(VT.getVectorNumElements()))
49184 SDValue N0 = N->getOperand(0);
49185 SDValue N1 = N->getOperand(1);
49187 // MULDQ returns the 64-bit result of the signed multiplication of the lower
49188 // 32-bits. We can lower with this if the sign bits stretch that far.
49189 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
49190 DAG.ComputeNumSignBits(N1) > 32) {
49191 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
49192 ArrayRef<SDValue> Ops) {
49193 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
49195 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
49196 PMULDQBuilder, /*CheckBWI*/false);
49199 // If the upper bits are zero we can use a single pmuludq.
49200 APInt Mask = APInt::getHighBitsSet(64, 32);
49201 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
49202 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
49203 ArrayRef<SDValue> Ops) {
49204 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
49206 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
49207 PMULUDQBuilder, /*CheckBWI*/false);
49213 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
49214 TargetLowering::DAGCombinerInfo &DCI,
49215 const X86Subtarget &Subtarget) {
49216 EVT VT = N->getValueType(0);
49218 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
49221 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
49224 if (DCI.isBeforeLegalize() && VT.isVector())
49225 return reduceVMULWidth(N, DAG, Subtarget);
49227 // Optimize a single multiply with constant into two operations in order to
49228 // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
49229 if (!MulConstantOptimization)
49232 // An imul is usually smaller than the alternative sequence.
49233 if (DAG.getMachineFunction().getFunction().hasMinSize())
49236 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
49239 if (VT != MVT::i64 && VT != MVT::i32 &&
49240 (!VT.isVector() || !VT.isSimple() || !VT.isInteger()))
49243 ConstantSDNode *CNode = isConstOrConstSplat(
49244 N->getOperand(1), /*AllowUndefs*/ true, /*AllowTrunc*/ false);
49245 const APInt *C = nullptr;
49248 if (auto *RawC = getTargetConstantFromNode(N->getOperand(1)))
49249 if (auto *SplatC = RawC->getSplatValue())
49250 C = &(SplatC->getUniqueInteger());
49252 if (!C || C->getBitWidth() != VT.getScalarSizeInBits())
49255 C = &(CNode->getAPIntValue());
49258 if (isPowerOf2_64(C->getZExtValue()))
49261 int64_t SignMulAmt = C->getSExtValue();
49262 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
49263 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
49266 SDValue NewMul = SDValue();
49267 if (VT == MVT::i64 || VT == MVT::i32) {
49268 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
49269 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
49270 DAG.getConstant(AbsMulAmt, DL, VT));
49271 if (SignMulAmt < 0)
49273 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
49278 uint64_t MulAmt1 = 0;
49279 uint64_t MulAmt2 = 0;
49280 if ((AbsMulAmt % 9) == 0) {
49282 MulAmt2 = AbsMulAmt / 9;
49283 } else if ((AbsMulAmt % 5) == 0) {
49285 MulAmt2 = AbsMulAmt / 5;
49286 } else if ((AbsMulAmt % 3) == 0) {
49288 MulAmt2 = AbsMulAmt / 3;
49291 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
49293 (isPowerOf2_64(MulAmt2) ||
49294 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
49296 if (isPowerOf2_64(MulAmt2) && !(SignMulAmt >= 0 && N->hasOneUse() &&
49297 N->use_begin()->getOpcode() == ISD::ADD))
49298 // If second multiplifer is pow2, issue it first. We want the multiply
49299 // by 3, 5, or 9 to be folded into the addressing mode unless the lone
49300 // use is an add. Only do this for positive multiply amounts since the
49301 // negate would prevent it from being used as an address mode anyway.
49302 std::swap(MulAmt1, MulAmt2);
49304 if (isPowerOf2_64(MulAmt1))
49305 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49306 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
49308 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
49309 DAG.getConstant(MulAmt1, DL, VT));
49311 if (isPowerOf2_64(MulAmt2))
49312 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
49313 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
49315 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
49316 DAG.getConstant(MulAmt2, DL, VT));
49318 // Negate the result.
49319 if (SignMulAmt < 0)
49321 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
49322 } else if (!Subtarget.slowLEA())
49323 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
49326 EVT ShiftVT = VT.isVector() ? VT : MVT::i8;
49327 assert(C->getZExtValue() != 0 &&
49328 C->getZExtValue() != maxUIntN(VT.getScalarSizeInBits()) &&
49329 "Both cases that could cause potential overflows should have "
49330 "already been handled.");
49331 if (isPowerOf2_64(AbsMulAmt - 1)) {
49332 // (mul x, 2^N + 1) => (add (shl x, N), x)
49333 NewMul = DAG.getNode(
49334 ISD::ADD, DL, VT, N->getOperand(0),
49335 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49336 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL, ShiftVT)));
49337 // To negate, subtract the number from zero
49338 if (SignMulAmt < 0)
49340 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
49341 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
49342 // (mul x, 2^N - 1) => (sub (shl x, N), x)
49344 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49345 DAG.getConstant(Log2_64(AbsMulAmt + 1), DL, ShiftVT));
49346 // To negate, reverse the operands of the subtract.
49347 if (SignMulAmt < 0)
49348 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
49350 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
49351 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2) &&
49352 (!VT.isVector() || Subtarget.fastImmVectorShift())) {
49353 // (mul x, 2^N + 2) => (add (shl x, N), (add x, x))
49355 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49356 DAG.getConstant(Log2_64(AbsMulAmt - 2), DL, ShiftVT));
49357 NewMul = DAG.getNode(
49358 ISD::ADD, DL, VT, NewMul,
49359 DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
49360 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2) &&
49361 (!VT.isVector() || Subtarget.fastImmVectorShift())) {
49362 // (mul x, 2^N - 2) => (sub (shl x, N), (add x, x))
49364 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49365 DAG.getConstant(Log2_64(AbsMulAmt + 2), DL, ShiftVT));
49366 NewMul = DAG.getNode(
49367 ISD::SUB, DL, VT, NewMul,
49368 DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
49369 } else if (SignMulAmt >= 0 && VT.isVector() &&
49370 Subtarget.fastImmVectorShift()) {
49371 uint64_t AbsMulAmtLowBit = AbsMulAmt & (-AbsMulAmt);
49372 uint64_t ShiftAmt1;
49373 std::optional<unsigned> Opc;
49374 if (isPowerOf2_64(AbsMulAmt - AbsMulAmtLowBit)) {
49375 ShiftAmt1 = AbsMulAmt - AbsMulAmtLowBit;
49377 } else if (isPowerOf2_64(AbsMulAmt + AbsMulAmtLowBit)) {
49378 ShiftAmt1 = AbsMulAmt + AbsMulAmtLowBit;
49384 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49385 DAG.getConstant(Log2_64(ShiftAmt1), DL, ShiftVT));
49387 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
49388 DAG.getConstant(Log2_64(AbsMulAmtLowBit), DL, ShiftVT));
49389 NewMul = DAG.getNode(*Opc, DL, VT, Shift1, Shift2);
49397 // Try to form a MULHU or MULHS node by looking for
49398 // (srl (mul ext, ext), 16)
49399 // TODO: This is X86 specific because we want to be able to handle wide types
49400 // before type legalization. But we can only do it if the vector will be
49401 // legalized via widening/splitting. Type legalization can't handle promotion
49402 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
49404 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
49405 const X86Subtarget &Subtarget) {
49406 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
49407 "SRL or SRA node is required here!");
49410 if (!Subtarget.hasSSE2())
49413 // The operation feeding into the shift must be a multiply.
49414 SDValue ShiftOperand = N->getOperand(0);
49415 if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
49418 // Input type should be at least vXi32.
49419 EVT VT = N->getValueType(0);
49420 if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
49423 // Need a shift by 16.
49425 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
49429 SDValue LHS = ShiftOperand.getOperand(0);
49430 SDValue RHS = ShiftOperand.getOperand(1);
49432 unsigned ExtOpc = LHS.getOpcode();
49433 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
49434 RHS.getOpcode() != ExtOpc)
49437 // Peek through the extends.
49438 LHS = LHS.getOperand(0);
49439 RHS = RHS.getOperand(0);
49441 // Ensure the input types match.
49442 EVT MulVT = LHS.getValueType();
49443 if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
49446 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
49447 SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
49449 ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
49450 return DAG.getNode(ExtOpc, DL, VT, Mulh);
49453 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
49454 SDValue N0 = N->getOperand(0);
49455 SDValue N1 = N->getOperand(1);
49456 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
49457 EVT VT = N0.getValueType();
49459 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
49460 // since the result of setcc_c is all zero's or all ones.
49461 if (VT.isInteger() && !VT.isVector() &&
49462 N1C && N0.getOpcode() == ISD::AND &&
49463 N0.getOperand(1).getOpcode() == ISD::Constant) {
49464 SDValue N00 = N0.getOperand(0);
49465 APInt Mask = N0.getConstantOperandAPInt(1);
49466 Mask <<= N1C->getAPIntValue();
49467 bool MaskOK = false;
49468 // We can handle cases concerning bit-widening nodes containing setcc_c if
49469 // we carefully interrogate the mask to make sure we are semantics
49471 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
49472 // of the underlying setcc_c operation if the setcc_c was zero extended.
49473 // Consider the following example:
49474 // zext(setcc_c) -> i32 0x0000FFFF
49475 // c1 -> i32 0x0000FFFF
49476 // c2 -> i32 0x00000001
49477 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
49478 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
49479 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
49481 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
49482 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
49484 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
49485 N00.getOpcode() == ISD::ANY_EXTEND) &&
49486 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
49487 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
49489 if (MaskOK && Mask != 0) {
49491 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
49498 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
49499 const X86Subtarget &Subtarget) {
49500 SDValue N0 = N->getOperand(0);
49501 SDValue N1 = N->getOperand(1);
49502 EVT VT = N0.getValueType();
49503 unsigned Size = VT.getSizeInBits();
49505 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
49508 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
49509 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
49510 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
49511 // depending on sign of (SarConst - [56,48,32,24,16])
49513 // sexts in X86 are MOVs. The MOVs have the same code size
49514 // as above SHIFTs (only SHIFT on 1 has lower code size).
49515 // However the MOVs have 2 advantages to a SHIFT:
49516 // 1. MOVs can write to a register that differs from source
49517 // 2. MOVs accept memory operands
49519 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
49520 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
49521 N0.getOperand(1).getOpcode() != ISD::Constant)
49524 SDValue N00 = N0.getOperand(0);
49525 SDValue N01 = N0.getOperand(1);
49526 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
49527 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
49528 EVT CVT = N1.getValueType();
49530 if (SarConst.isNegative())
49533 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
49534 unsigned ShiftSize = SVT.getSizeInBits();
49535 // skipping types without corresponding sext/zext and
49536 // ShlConst that is not one of [56,48,32,24,16]
49537 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
49541 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
49542 SarConst = SarConst - (Size - ShiftSize);
49545 if (SarConst.isNegative())
49546 return DAG.getNode(ISD::SHL, DL, VT, NN,
49547 DAG.getConstant(-SarConst, DL, CVT));
49548 return DAG.getNode(ISD::SRA, DL, VT, NN,
49549 DAG.getConstant(SarConst, DL, CVT));
49554 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
49555 TargetLowering::DAGCombinerInfo &DCI,
49556 const X86Subtarget &Subtarget) {
49557 SDValue N0 = N->getOperand(0);
49558 SDValue N1 = N->getOperand(1);
49559 EVT VT = N0.getValueType();
49561 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
49564 // Only do this on the last DAG combine as it can interfere with other
49566 if (!DCI.isAfterLegalizeDAG())
49569 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
49570 // TODO: This is a generic DAG combine that became an x86-only combine to
49571 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
49572 // and-not ('andn').
49573 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
49576 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
49577 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
49578 if (!ShiftC || !AndC)
49581 // If we can shrink the constant mask below 8-bits or 32-bits, then this
49582 // transform should reduce code size. It may also enable secondary transforms
49583 // from improved known-bits analysis or instruction selection.
49584 APInt MaskVal = AndC->getAPIntValue();
49586 // If this can be matched by a zero extend, don't optimize.
49587 if (MaskVal.isMask()) {
49588 unsigned TO = MaskVal.countr_one();
49589 if (TO >= 8 && isPowerOf2_32(TO))
49593 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
49594 unsigned OldMaskSize = MaskVal.getSignificantBits();
49595 unsigned NewMaskSize = NewMaskVal.getSignificantBits();
49596 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
49597 (OldMaskSize > 32 && NewMaskSize <= 32)) {
49598 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
49600 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
49601 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
49602 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
49607 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
49608 const X86Subtarget &Subtarget) {
49609 unsigned Opcode = N->getOpcode();
49610 assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
49613 EVT VT = N->getValueType(0);
49614 SDValue N0 = N->getOperand(0);
49615 SDValue N1 = N->getOperand(1);
49616 EVT SrcVT = N0.getValueType();
49619 N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
49621 N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
49623 // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
49624 // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
49625 // truncation trees that help us avoid lane crossing shuffles.
49626 // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
49627 // TODO: We don't handle vXf64 shuffles yet.
49628 if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
49629 if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
49630 SmallVector<SDValue> ShuffleOps;
49631 SmallVector<int> ShuffleMask, ScaledMask;
49632 SDValue Vec = peekThroughBitcasts(BCSrc);
49633 if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
49634 resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
49635 // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
49636 // shuffle to a v4X64 width - we can probably relax this in the future.
49637 if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
49638 ShuffleOps[0].getValueType().is256BitVector() &&
49639 scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
49641 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
49642 std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
49643 Lo = DAG.getBitcast(SrcVT, Lo);
49644 Hi = DAG.getBitcast(SrcVT, Hi);
49645 SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
49646 Res = DAG.getBitcast(ShufVT, Res);
49647 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
49648 return DAG.getBitcast(VT, Res);
49654 // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
49655 if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
49656 // If either/both ops are a shuffle that can scale to v2x64,
49657 // then see if we can perform this as a v4x32 post shuffle.
49658 SmallVector<SDValue> Ops0, Ops1;
49659 SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
49661 getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
49662 scaleShuffleElements(Mask0, 2, ScaledMask0) &&
49663 all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
49665 getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
49666 scaleShuffleElements(Mask1, 2, ScaledMask1) &&
49667 all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
49668 if (IsShuf0 || IsShuf1) {
49670 Ops0.assign({BC0});
49671 ScaledMask0.assign({0, 1});
49674 Ops1.assign({BC1});
49675 ScaledMask1.assign({0, 1});
49679 int PostShuffle[4] = {-1, -1, -1, -1};
49680 auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
49684 SDValue Src = Ops[M / 2];
49685 if (!LHS || LHS == Src) {
49689 if (!RHS || RHS == Src) {
49696 if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
49697 FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
49698 FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
49699 FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
49700 LHS = DAG.getBitcast(SrcVT, LHS);
49701 RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
49702 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
49703 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
49704 Res = DAG.getBitcast(ShufVT, Res);
49705 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
49706 return DAG.getBitcast(VT, Res);
49711 // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
49712 if (VT.is256BitVector() && Subtarget.hasInt256()) {
49713 SmallVector<int> Mask0, Mask1;
49714 SmallVector<SDValue> Ops0, Ops1;
49715 SmallVector<int, 2> ScaledMask0, ScaledMask1;
49716 if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
49717 getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
49718 !Ops0.empty() && !Ops1.empty() &&
49720 [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
49722 [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
49723 scaleShuffleElements(Mask0, 2, ScaledMask0) &&
49724 scaleShuffleElements(Mask1, 2, ScaledMask1)) {
49725 SDValue Op00 = peekThroughBitcasts(Ops0.front());
49726 SDValue Op10 = peekThroughBitcasts(Ops1.front());
49727 SDValue Op01 = peekThroughBitcasts(Ops0.back());
49728 SDValue Op11 = peekThroughBitcasts(Ops1.back());
49729 if ((Op00 == Op11) && (Op01 == Op10)) {
49730 std::swap(Op10, Op11);
49731 ShuffleVectorSDNode::commuteMask(ScaledMask1);
49733 if ((Op00 == Op10) && (Op01 == Op11)) {
49734 const int Map[4] = {0, 2, 1, 3};
49735 SmallVector<int, 4> ShuffleMask(
49736 {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
49737 Map[ScaledMask1[1]]});
49738 MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
49739 SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
49740 DAG.getBitcast(SrcVT, Op01));
49741 Res = DAG.getBitcast(ShufVT, Res);
49742 Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
49743 return DAG.getBitcast(VT, Res);
49751 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
49752 TargetLowering::DAGCombinerInfo &DCI,
49753 const X86Subtarget &Subtarget) {
49754 unsigned Opcode = N->getOpcode();
49755 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
49756 "Unexpected pack opcode");
49758 EVT VT = N->getValueType(0);
49759 SDValue N0 = N->getOperand(0);
49760 SDValue N1 = N->getOperand(1);
49761 unsigned NumDstElts = VT.getVectorNumElements();
49762 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
49763 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
49764 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
49765 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
49766 "Unexpected PACKSS/PACKUS input type");
49768 bool IsSigned = (X86ISD::PACKSS == Opcode);
49770 // Constant Folding.
49771 APInt UndefElts0, UndefElts1;
49772 SmallVector<APInt, 32> EltBits0, EltBits1;
49773 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
49774 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
49775 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
49776 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
49777 unsigned NumLanes = VT.getSizeInBits() / 128;
49778 unsigned NumSrcElts = NumDstElts / 2;
49779 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
49780 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
49782 APInt Undefs(NumDstElts, 0);
49783 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
49784 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
49785 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
49786 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
49787 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
49788 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
49790 if (UndefElts[SrcIdx]) {
49791 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
49795 APInt &Val = EltBits[SrcIdx];
49797 // PACKSS: Truncate signed value with signed saturation.
49798 // Source values less than dst minint are saturated to minint.
49799 // Source values greater than dst maxint are saturated to maxint.
49800 if (Val.isSignedIntN(DstBitsPerElt))
49801 Val = Val.trunc(DstBitsPerElt);
49802 else if (Val.isNegative())
49803 Val = APInt::getSignedMinValue(DstBitsPerElt);
49805 Val = APInt::getSignedMaxValue(DstBitsPerElt);
49807 // PACKUS: Truncate signed value with unsigned saturation.
49808 // Source values less than zero are saturated to zero.
49809 // Source values greater than dst maxuint are saturated to maxuint.
49810 if (Val.isIntN(DstBitsPerElt))
49811 Val = Val.trunc(DstBitsPerElt);
49812 else if (Val.isNegative())
49813 Val = APInt::getZero(DstBitsPerElt);
49815 Val = APInt::getAllOnes(DstBitsPerElt);
49817 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
49821 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
49824 // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
49825 if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
49828 // Try to fold PACKSS(NOT(X),NOT(Y)) -> NOT(PACKSS(X,Y)).
49829 // Currently limit this to allsignbits cases only.
49831 (N0.isUndef() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
49832 (N1.isUndef() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
49833 SDValue Not0 = N0.isUndef() ? N0 : IsNOT(N0, DAG);
49834 SDValue Not1 = N1.isUndef() ? N1 : IsNOT(N1, DAG);
49835 if (Not0 && Not1) {
49837 MVT SrcVT = N0.getSimpleValueType();
49839 DAG.getNode(X86ISD::PACKSS, DL, VT, DAG.getBitcast(SrcVT, Not0),
49840 DAG.getBitcast(SrcVT, Not1));
49841 return DAG.getNOT(DL, Pack, VT);
49845 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
49846 // truncate to create a larger truncate.
49847 if (Subtarget.hasAVX512() &&
49848 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
49849 N0.getOperand(0).getValueType() == MVT::v8i32) {
49850 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
49852 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
49853 if (Subtarget.hasVLX())
49854 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
49856 // Widen input to v16i32 so we can truncate that.
49858 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
49859 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
49860 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
49864 // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
49865 if (VT.is128BitVector()) {
49866 unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
49867 SDValue Src0, Src1;
49868 if (N0.getOpcode() == ExtOpc &&
49869 N0.getOperand(0).getValueType().is64BitVector() &&
49870 N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
49871 Src0 = N0.getOperand(0);
49873 if (N1.getOpcode() == ExtOpc &&
49874 N1.getOperand(0).getValueType().is64BitVector() &&
49875 N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
49876 Src1 = N1.getOperand(0);
49878 if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
49879 assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
49880 Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
49881 Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
49882 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
49885 // Try again with pack(*_extend_vector_inreg, undef).
49886 unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
49887 : ISD::ZERO_EXTEND_VECTOR_INREG;
49888 if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
49889 N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
49890 return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
49894 // Attempt to combine as shuffle.
49896 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49902 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
49903 TargetLowering::DAGCombinerInfo &DCI,
49904 const X86Subtarget &Subtarget) {
49905 assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
49906 X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
49907 "Unexpected horizontal add/sub opcode");
49909 if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
49910 MVT VT = N->getSimpleValueType(0);
49911 SDValue LHS = N->getOperand(0);
49912 SDValue RHS = N->getOperand(1);
49914 // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
49915 if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
49916 LHS.getOpcode() == RHS.getOpcode() &&
49917 LHS.getValueType() == RHS.getValueType() &&
49918 N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
49919 SDValue LHS0 = LHS.getOperand(0);
49920 SDValue LHS1 = LHS.getOperand(1);
49921 SDValue RHS0 = RHS.getOperand(0);
49922 SDValue RHS1 = RHS.getOperand(1);
49923 if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
49924 (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
49926 SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
49927 LHS0.isUndef() ? LHS1 : LHS0,
49928 RHS0.isUndef() ? RHS1 : RHS0);
49929 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
49930 Res = DAG.getBitcast(ShufVT, Res);
49932 DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
49933 getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
49935 DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
49936 getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
49937 return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
49938 DAG.getBitcast(VT, NewRHS));
49943 // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
49944 if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
49950 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
49951 TargetLowering::DAGCombinerInfo &DCI,
49952 const X86Subtarget &Subtarget) {
49953 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
49954 X86ISD::VSRL == N->getOpcode()) &&
49955 "Unexpected shift opcode");
49956 EVT VT = N->getValueType(0);
49957 SDValue N0 = N->getOperand(0);
49958 SDValue N1 = N->getOperand(1);
49960 // Shift zero -> zero.
49961 if (ISD::isBuildVectorAllZeros(N0.getNode()))
49962 return DAG.getConstant(0, SDLoc(N), VT);
49964 // Detect constant shift amounts.
49966 SmallVector<APInt, 32> EltBits;
49967 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
49968 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
49969 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
49970 EltBits[0].getZExtValue(), DAG);
49973 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49974 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
49975 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
49976 return SDValue(N, 0);
49981 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
49982 TargetLowering::DAGCombinerInfo &DCI,
49983 const X86Subtarget &Subtarget) {
49984 unsigned Opcode = N->getOpcode();
49985 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
49986 X86ISD::VSRLI == Opcode) &&
49987 "Unexpected shift opcode");
49988 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
49989 EVT VT = N->getValueType(0);
49990 SDValue N0 = N->getOperand(0);
49991 SDValue N1 = N->getOperand(1);
49992 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
49993 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
49994 "Unexpected value type");
49995 assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
49997 // (shift undef, X) -> 0
49999 return DAG.getConstant(0, SDLoc(N), VT);
50001 // Out of range logical bit shifts are guaranteed to be zero.
50002 // Out of range arithmetic bit shifts splat the sign bit.
50003 unsigned ShiftVal = N->getConstantOperandVal(1);
50004 if (ShiftVal >= NumBitsPerElt) {
50006 return DAG.getConstant(0, SDLoc(N), VT);
50007 ShiftVal = NumBitsPerElt - 1;
50010 // (shift X, 0) -> X
50014 // (shift 0, C) -> 0
50015 if (ISD::isBuildVectorAllZeros(N0.getNode()))
50016 // N0 is all zeros or undef. We guarantee that the bits shifted into the
50017 // result are all zeros, not undef.
50018 return DAG.getConstant(0, SDLoc(N), VT);
50020 // (VSRAI -1, C) -> -1
50021 if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
50022 // N0 is all ones or undef. We guarantee that the bits shifted into the
50023 // result are all ones, not undef.
50024 return DAG.getConstant(-1, SDLoc(N), VT);
50026 auto MergeShifts = [&](SDValue X, uint64_t Amt0, uint64_t Amt1) {
50027 unsigned NewShiftVal = Amt0 + Amt1;
50028 if (NewShiftVal >= NumBitsPerElt) {
50029 // Out of range logical bit shifts are guaranteed to be zero.
50030 // Out of range arithmetic bit shifts splat the sign bit.
50032 return DAG.getConstant(0, SDLoc(N), VT);
50033 NewShiftVal = NumBitsPerElt - 1;
50035 return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
50036 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
50039 // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
50040 if (Opcode == N0.getOpcode())
50041 return MergeShifts(N0.getOperand(0), ShiftVal, N0.getConstantOperandVal(1));
50043 // (shl (add X, X), C) -> (shl X, (C + 1))
50044 if (Opcode == X86ISD::VSHLI && N0.getOpcode() == ISD::ADD &&
50045 N0.getOperand(0) == N0.getOperand(1))
50046 return MergeShifts(N0.getOperand(0), ShiftVal, 1);
50048 // We can decode 'whole byte' logical bit shifts as shuffles.
50049 if (LogicalShift && (ShiftVal % 8) == 0) {
50051 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
50055 // Attempt to detect an expanded vXi64 SIGN_EXTEND_INREG vXi1 pattern, and
50056 // convert to a splatted v2Xi32 SIGN_EXTEND_INREG pattern:
50057 // psrad(pshufd(psllq(X,63),1,1,3,3),31) ->
50058 // pshufd(psrad(pslld(X,31),31),0,0,2,2).
50059 if (Opcode == X86ISD::VSRAI && NumBitsPerElt == 32 && ShiftVal == 31 &&
50060 N0.getOpcode() == X86ISD::PSHUFD &&
50061 N0.getConstantOperandVal(1) == getV4X86ShuffleImm({1, 1, 3, 3}) &&
50063 SDValue BC = peekThroughOneUseBitcasts(N0.getOperand(0));
50064 if (BC.getOpcode() == X86ISD::VSHLI &&
50065 BC.getScalarValueSizeInBits() == 64 &&
50066 BC.getConstantOperandVal(1) == 63) {
50068 SDValue Src = BC.getOperand(0);
50069 Src = DAG.getBitcast(VT, Src);
50070 Src = DAG.getNode(X86ISD::PSHUFD, DL, VT, Src,
50071 getV4X86ShuffleImm8ForMask({0, 0, 2, 2}, DL, DAG));
50072 Src = DAG.getNode(X86ISD::VSHLI, DL, VT, Src, N1);
50073 Src = DAG.getNode(X86ISD::VSRAI, DL, VT, Src, N1);
50078 auto TryConstantFold = [&](SDValue V) {
50080 SmallVector<APInt, 32> EltBits;
50081 if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits))
50083 assert(EltBits.size() == VT.getVectorNumElements() &&
50084 "Unexpected shift value type");
50085 // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
50086 // created an undef input due to no input bits being demanded, but user
50087 // still expects 0 in other bits.
50088 for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
50089 APInt &Elt = EltBits[i];
50092 else if (X86ISD::VSHLI == Opcode)
50094 else if (X86ISD::VSRAI == Opcode)
50095 Elt.ashrInPlace(ShiftVal);
50097 Elt.lshrInPlace(ShiftVal);
50099 // Reset undef elements since they were zeroed above.
50101 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
50104 // Constant Folding.
50105 if (N->isOnlyUserOf(N0.getNode())) {
50106 if (SDValue C = TryConstantFold(N0))
50109 // Fold (shift (logic X, C2), C1) -> (logic (shift X, C1), (shift C2, C1))
50110 // Don't break NOT patterns.
50111 SDValue BC = peekThroughOneUseBitcasts(N0);
50112 if (ISD::isBitwiseLogicOp(BC.getOpcode()) &&
50113 BC->isOnlyUserOf(BC.getOperand(1).getNode()) &&
50114 !ISD::isBuildVectorAllOnes(BC.getOperand(1).getNode())) {
50115 if (SDValue RHS = TryConstantFold(BC.getOperand(1))) {
50117 SDValue LHS = DAG.getNode(Opcode, DL, VT,
50118 DAG.getBitcast(VT, BC.getOperand(0)), N1);
50119 return DAG.getNode(BC.getOpcode(), DL, VT, LHS, RHS);
50124 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50125 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
50127 return SDValue(N, 0);
50132 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
50133 TargetLowering::DAGCombinerInfo &DCI,
50134 const X86Subtarget &Subtarget) {
50135 EVT VT = N->getValueType(0);
50136 unsigned Opcode = N->getOpcode();
50137 assert(((Opcode == X86ISD::PINSRB && VT == MVT::v16i8) ||
50138 (Opcode == X86ISD::PINSRW && VT == MVT::v8i16) ||
50139 Opcode == ISD::INSERT_VECTOR_ELT) &&
50140 "Unexpected vector insertion");
50142 SDValue Vec = N->getOperand(0);
50143 SDValue Scl = N->getOperand(1);
50144 SDValue Idx = N->getOperand(2);
50146 // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
50147 if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndef() && isNullConstant(Idx))
50148 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Scl);
50150 if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
50151 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
50152 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50153 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
50154 APInt::getAllOnes(NumBitsPerElt), DCI))
50155 return SDValue(N, 0);
50158 // Attempt to combine insertion patterns to a shuffle.
50159 if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
50161 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
50168 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
50169 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
50170 /// OR -> CMPNEQSS.
50171 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
50172 TargetLowering::DAGCombinerInfo &DCI,
50173 const X86Subtarget &Subtarget) {
50176 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
50177 // we're requiring SSE2 for both.
50178 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
50179 SDValue N0 = N->getOperand(0);
50180 SDValue N1 = N->getOperand(1);
50181 SDValue CMP0 = N0.getOperand(1);
50182 SDValue CMP1 = N1.getOperand(1);
50185 // The SETCCs should both refer to the same CMP.
50186 if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
50189 SDValue CMP00 = CMP0->getOperand(0);
50190 SDValue CMP01 = CMP0->getOperand(1);
50191 EVT VT = CMP00.getValueType();
50193 if (VT == MVT::f32 || VT == MVT::f64 ||
50194 (VT == MVT::f16 && Subtarget.hasFP16())) {
50195 bool ExpectingFlags = false;
50196 // Check for any users that want flags:
50197 for (const SDNode *U : N->uses()) {
50198 if (ExpectingFlags)
50201 switch (U->getOpcode()) {
50206 ExpectingFlags = true;
50208 case ISD::CopyToReg:
50209 case ISD::SIGN_EXTEND:
50210 case ISD::ZERO_EXTEND:
50211 case ISD::ANY_EXTEND:
50216 if (!ExpectingFlags) {
50217 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
50218 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
50220 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
50221 X86::CondCode tmp = cc0;
50226 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
50227 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
50228 // FIXME: need symbolic constants for these magic numbers.
50229 // See X86ATTInstPrinter.cpp:printSSECC().
50230 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
50231 if (Subtarget.hasAVX512()) {
50233 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
50234 DAG.getTargetConstant(x86cc, DL, MVT::i8));
50235 // Need to fill with zeros to ensure the bitcast will produce zeroes
50236 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
50237 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
50238 DAG.getConstant(0, DL, MVT::v16i1),
50239 FSetCC, DAG.getIntPtrConstant(0, DL));
50240 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
50241 N->getSimpleValueType(0));
50243 SDValue OnesOrZeroesF =
50244 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
50245 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
50247 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
50248 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
50250 if (is64BitFP && !Subtarget.is64Bit()) {
50251 // On a 32-bit target, we cannot bitcast the 64-bit float to a
50252 // 64-bit integer, since that's not a legal type. Since
50253 // OnesOrZeroesF is all ones or all zeroes, we don't need all the
50254 // bits, but can do this little dance to extract the lowest 32 bits
50255 // and work with those going forward.
50256 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
50258 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
50259 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
50260 Vector32, DAG.getIntPtrConstant(0, DL));
50264 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
50265 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
50266 DAG.getConstant(1, DL, IntVT));
50267 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
50269 return OneBitOfTruth;
50277 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
50278 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
50279 assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
50281 MVT VT = N->getSimpleValueType(0);
50282 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
50286 SDValue N0 = N->getOperand(0);
50287 SDValue N1 = N->getOperand(1);
50289 if (SDValue Not = IsNOT(N0, DAG)) {
50292 } else if (SDValue Not = IsNOT(N1, DAG)) {
50298 X = DAG.getBitcast(VT, X);
50299 Y = DAG.getBitcast(VT, Y);
50300 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
50304 /// and (vector_shuffle<Z,...,Z>
50305 /// (insert_vector_elt undef, (xor X, -1), Z), undef), Y
50307 /// andnp (vector_shuffle<Z,...,Z>
50308 /// (insert_vector_elt undef, X, Z), undef), Y
50309 static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
50310 const X86Subtarget &Subtarget) {
50311 assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
50313 EVT VT = N->getValueType(0);
50314 // Do not split 256 and 512 bit vectors with SSE2 as they overwrite original
50315 // value and require extra moves.
50316 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
50317 ((VT.is256BitVector() || VT.is512BitVector()) && Subtarget.hasAVX())))
50320 auto GetNot = [&DAG](SDValue V) {
50321 auto *SVN = dyn_cast<ShuffleVectorSDNode>(peekThroughOneUseBitcasts(V));
50322 // TODO: SVN->hasOneUse() is a strong condition. It can be relaxed if all
50323 // end-users are ISD::AND including cases
50324 // (and(extract_vector_element(SVN), Y)).
50325 if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
50326 !SVN->getOperand(1).isUndef()) {
50329 SDValue IVEN = SVN->getOperand(0);
50330 if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
50331 !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
50333 if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
50334 IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
50336 SDValue Src = IVEN.getOperand(1);
50337 if (SDValue Not = IsNOT(Src, DAG)) {
50338 SDValue NotSrc = DAG.getBitcast(Src.getValueType(), Not);
50340 DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(IVEN), IVEN.getValueType(),
50341 IVEN.getOperand(0), NotSrc, IVEN.getOperand(2));
50342 return DAG.getVectorShuffle(SVN->getValueType(0), SDLoc(SVN), NotIVEN,
50343 SVN->getOperand(1), SVN->getMask());
50349 SDValue N0 = N->getOperand(0);
50350 SDValue N1 = N->getOperand(1);
50352 if (SDValue Not = GetNot(N0)) {
50355 } else if (SDValue Not = GetNot(N1)) {
50361 X = DAG.getBitcast(VT, X);
50362 Y = DAG.getBitcast(VT, Y);
50364 // We do not split for SSE at all, but we need to split vectors for AVX1 and
50366 if (!Subtarget.useAVX512Regs() && VT.is512BitVector()) {
50368 std::tie(LoX, HiX) = splitVector(X, DAG, DL);
50370 std::tie(LoY, HiY) = splitVector(Y, DAG, DL);
50371 EVT SplitVT = LoX.getValueType();
50372 SDValue LoV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {LoX, LoY});
50373 SDValue HiV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {HiX, HiY});
50374 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, {LoV, HiV});
50376 return DAG.getNode(X86ISD::ANDNP, DL, VT, {X, Y});
50379 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
50380 // logical operations, like in the example below.
50381 // or (and (truncate x, truncate y)),
50382 // (xor (truncate z, build_vector (constants)))
50383 // Given a target type \p VT, we generate
50384 // or (and x, y), (xor z, zext(build_vector (constants)))
50385 // given x, y and z are of type \p VT. We can do so, if operands are either
50386 // truncates from VT types, the second operand is a vector of constants or can
50387 // be recursively promoted.
50388 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
50390 // Limit recursion to avoid excessive compile times.
50391 if (Depth >= SelectionDAG::MaxRecursionDepth)
50394 if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
50395 N->getOpcode() != ISD::OR)
50398 SDValue N0 = N->getOperand(0);
50399 SDValue N1 = N->getOperand(1);
50402 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50403 if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
50406 if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
50409 // The Left side has to be a trunc.
50410 if (N0.getOpcode() != ISD::TRUNCATE)
50413 // The type of the truncated inputs.
50414 if (N0.getOperand(0).getValueType() != VT)
50417 N0 = N0.getOperand(0);
50420 if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
50423 // The right side has to be a 'trunc' or a constant vector.
50424 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
50425 N1.getOperand(0).getValueType() == VT;
50426 if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
50430 N1 = N1.getOperand(0);
50432 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
50435 return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
50438 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
50439 // register. In most cases we actually compare or select YMM-sized registers
50440 // and mixing the two types creates horrible code. This method optimizes
50441 // some of the transition sequences.
50442 // Even with AVX-512 this is still useful for removing casts around logical
50443 // operations on vXi1 mask types.
50444 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
50445 const X86Subtarget &Subtarget) {
50446 EVT VT = N->getValueType(0);
50447 assert(VT.isVector() && "Expected vector type");
50450 assert((N->getOpcode() == ISD::ANY_EXTEND ||
50451 N->getOpcode() == ISD::ZERO_EXTEND ||
50452 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
50454 SDValue Narrow = N->getOperand(0);
50455 EVT NarrowVT = Narrow.getValueType();
50457 // Generate the wide operation.
50458 SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
50461 switch (N->getOpcode()) {
50462 default: llvm_unreachable("Unexpected opcode");
50463 case ISD::ANY_EXTEND:
50465 case ISD::ZERO_EXTEND:
50466 return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
50467 case ISD::SIGN_EXTEND:
50468 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
50469 Op, DAG.getValueType(NarrowVT));
50473 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
50476 default: llvm_unreachable("Unexpected input node for FP logic conversion");
50477 case ISD::AND: FPOpcode = X86ISD::FAND; break;
50478 case ISD::OR: FPOpcode = X86ISD::FOR; break;
50479 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
50484 /// If both input operands of a logic op are being cast from floating-point
50485 /// types or FP compares, try to convert this into a floating-point logic node
50486 /// to avoid unnecessary moves from SSE to integer registers.
50487 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
50488 TargetLowering::DAGCombinerInfo &DCI,
50489 const X86Subtarget &Subtarget) {
50490 EVT VT = N->getValueType(0);
50491 SDValue N0 = N->getOperand(0);
50492 SDValue N1 = N->getOperand(1);
50495 if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
50496 (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
50499 SDValue N00 = N0.getOperand(0);
50500 SDValue N10 = N1.getOperand(0);
50501 EVT N00Type = N00.getValueType();
50502 EVT N10Type = N10.getValueType();
50504 // Ensure that both types are the same and are legal scalar fp types.
50505 if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
50506 (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
50507 (Subtarget.hasFP16() && N00Type == MVT::f16)))
50510 if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
50511 unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
50512 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
50513 return DAG.getBitcast(VT, FPLogic);
50516 if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
50520 ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
50521 ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
50523 // The vector ISA for FP predicates is incomplete before AVX, so converting
50524 // COMIS* to CMPS* may not be a win before AVX.
50525 if (!Subtarget.hasAVX() &&
50526 !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
50529 // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
50530 // and vector logic:
50531 // logic (setcc N00, N01), (setcc N10, N11) -->
50532 // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
50533 unsigned NumElts = 128 / N00Type.getSizeInBits();
50534 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
50535 EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
50536 SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
50537 SDValue N01 = N0.getOperand(1);
50538 SDValue N11 = N1.getOperand(1);
50539 SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
50540 SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
50541 SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
50542 SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
50543 SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
50544 SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
50545 SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
50546 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
50549 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
50550 // to reduce XMM->GPR traffic.
50551 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
50552 unsigned Opc = N->getOpcode();
50553 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
50554 "Unexpected bit opcode");
50556 SDValue N0 = N->getOperand(0);
50557 SDValue N1 = N->getOperand(1);
50559 // Both operands must be single use MOVMSK.
50560 if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
50561 N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
50564 SDValue Vec0 = N0.getOperand(0);
50565 SDValue Vec1 = N1.getOperand(0);
50566 EVT VecVT0 = Vec0.getValueType();
50567 EVT VecVT1 = Vec1.getValueType();
50569 // Both MOVMSK operands must be from vectors of the same size and same element
50570 // size, but its OK for a fp/int diff.
50571 if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
50572 VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
50577 VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
50579 DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
50580 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
50583 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
50584 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
50585 // handles in InstCombine.
50586 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
50587 unsigned Opc = N->getOpcode();
50588 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
50589 "Unexpected bit opcode");
50591 SDValue N0 = N->getOperand(0);
50592 SDValue N1 = N->getOperand(1);
50593 EVT VT = N->getValueType(0);
50595 // Both operands must be single use.
50596 if (!N0.hasOneUse() || !N1.hasOneUse())
50599 // Search for matching shifts.
50600 SDValue BC0 = peekThroughOneUseBitcasts(N0);
50601 SDValue BC1 = peekThroughOneUseBitcasts(N1);
50603 unsigned BCOpc = BC0.getOpcode();
50604 EVT BCVT = BC0.getValueType();
50605 if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
50609 case X86ISD::VSHLI:
50610 case X86ISD::VSRLI:
50611 case X86ISD::VSRAI: {
50612 if (BC0.getOperand(1) != BC1.getOperand(1))
50617 DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
50618 SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
50619 return DAG.getBitcast(VT, Shift);
50626 // Attempt to fold:
50627 // BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W)).
50628 // TODO: Handle PACKUS handling.
50629 static SDValue combineBitOpWithPACK(SDNode *N, SelectionDAG &DAG) {
50630 unsigned Opc = N->getOpcode();
50631 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
50632 "Unexpected bit opcode");
50634 SDValue N0 = N->getOperand(0);
50635 SDValue N1 = N->getOperand(1);
50636 EVT VT = N->getValueType(0);
50638 // Both operands must be single use.
50639 if (!N0.hasOneUse() || !N1.hasOneUse())
50642 // Search for matching packs.
50643 N0 = peekThroughOneUseBitcasts(N0);
50644 N1 = peekThroughOneUseBitcasts(N1);
50646 if (N0.getOpcode() != X86ISD::PACKSS || N1.getOpcode() != X86ISD::PACKSS)
50649 MVT DstVT = N0.getSimpleValueType();
50650 if (DstVT != N1.getSimpleValueType())
50653 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
50654 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
50656 // Limit to allsignbits packing.
50657 if (DAG.ComputeNumSignBits(N0.getOperand(0)) != NumSrcBits ||
50658 DAG.ComputeNumSignBits(N0.getOperand(1)) != NumSrcBits ||
50659 DAG.ComputeNumSignBits(N1.getOperand(0)) != NumSrcBits ||
50660 DAG.ComputeNumSignBits(N1.getOperand(1)) != NumSrcBits)
50664 SDValue LHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(0), N1.getOperand(0));
50665 SDValue RHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(1), N1.getOperand(1));
50666 return DAG.getBitcast(VT, DAG.getNode(X86ISD::PACKSS, DL, DstVT, LHS, RHS));
50669 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
50670 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
50671 /// with a shift-right to eliminate loading the vector constant mask value.
50672 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
50673 const X86Subtarget &Subtarget) {
50674 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
50675 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
50676 EVT VT = Op0.getValueType();
50677 if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
50680 // Try to convert an "is positive" signbit masking operation into arithmetic
50681 // shift and "andn". This saves a materialization of a -1 vector constant.
50682 // The "is negative" variant should be handled more generally because it only
50683 // requires "and" rather than "andn":
50684 // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
50686 // This is limited to the original type to avoid producing even more bitcasts.
50687 // If the bitcasts can't be eliminated, then it is unlikely that this fold
50688 // will be profitable.
50689 if (N->getValueType(0) == VT &&
50690 supportedVectorShiftWithImm(VT, Subtarget, ISD::SRA)) {
50692 if (Op1.getOpcode() == X86ISD::PCMPGT &&
50693 isAllOnesOrAllOnesSplat(Op1.getOperand(1)) && Op1.hasOneUse()) {
50694 X = Op1.getOperand(0);
50696 } else if (Op0.getOpcode() == X86ISD::PCMPGT &&
50697 isAllOnesOrAllOnesSplat(Op0.getOperand(1)) && Op0.hasOneUse()) {
50698 X = Op0.getOperand(0);
50704 getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
50705 VT.getScalarSizeInBits() - 1, DAG);
50706 return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
50711 if (!X86::isConstantSplat(Op1, SplatVal, false) || !SplatVal.isMask())
50714 // Don't prevent creation of ANDN.
50715 if (isBitwiseNot(Op0))
50718 if (!supportedVectorShiftWithImm(VT, Subtarget, ISD::SRL))
50721 unsigned EltBitWidth = VT.getScalarSizeInBits();
50722 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
50726 unsigned ShiftVal = SplatVal.countr_one();
50727 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
50728 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
50729 return DAG.getBitcast(N->getValueType(0), Shift);
50732 // Get the index node from the lowered DAG of a GEP IR instruction with one
50733 // indexing dimension.
50734 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
50735 if (Ld->isIndexed())
50738 SDValue Base = Ld->getBasePtr();
50740 if (Base.getOpcode() != ISD::ADD)
50743 SDValue ShiftedIndex = Base.getOperand(0);
50745 if (ShiftedIndex.getOpcode() != ISD::SHL)
50748 return ShiftedIndex.getOperand(0);
50752 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
50753 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
50754 switch (VT.getSizeInBits()) {
50755 default: return false;
50756 case 64: return Subtarget.is64Bit() ? true : false;
50757 case 32: return true;
50763 // This function recognizes cases where X86 bzhi instruction can replace and
50764 // 'and-load' sequence.
50765 // In case of loading integer value from an array of constants which is defined
50768 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
50770 // then applying a bitwise and on the result with another input.
50771 // It's equivalent to performing bzhi (zero high bits) on the input, with the
50772 // same index of the load.
50773 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
50774 const X86Subtarget &Subtarget) {
50775 MVT VT = Node->getSimpleValueType(0);
50778 // Check if subtarget has BZHI instruction for the node's type
50779 if (!hasBZHI(Subtarget, VT))
50782 // Try matching the pattern for both operands.
50783 for (unsigned i = 0; i < 2; i++) {
50784 SDValue N = Node->getOperand(i);
50785 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
50787 // continue if the operand is not a load instruction
50791 const Value *MemOp = Ld->getMemOperand()->getValue();
50796 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
50797 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
50798 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
50800 Constant *Init = GV->getInitializer();
50801 Type *Ty = Init->getType();
50802 if (!isa<ConstantDataArray>(Init) ||
50803 !Ty->getArrayElementType()->isIntegerTy() ||
50804 Ty->getArrayElementType()->getScalarSizeInBits() !=
50805 VT.getSizeInBits() ||
50806 Ty->getArrayNumElements() >
50807 Ty->getArrayElementType()->getScalarSizeInBits())
50810 // Check if the array's constant elements are suitable to our case.
50811 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
50812 bool ConstantsMatch = true;
50813 for (uint64_t j = 0; j < ArrayElementCount; j++) {
50814 auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
50815 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
50816 ConstantsMatch = false;
50820 if (!ConstantsMatch)
50823 // Do the transformation (For 32-bit type):
50824 // -> (and (load arr[idx]), inp)
50825 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
50826 // that will be replaced with one bzhi instruction.
50827 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
50828 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
50830 // Get the Node which indexes into the array.
50831 SDValue Index = getIndexFromUnindexedLoad(Ld);
50834 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
50836 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
50837 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
50839 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
50840 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
50842 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
50850 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
50851 // Where C is a mask containing the same number of bits as the setcc and
50852 // where the setcc will freely 0 upper bits of k-register. We can replace the
50853 // undef in the concat with 0s and remove the AND. This mainly helps with
50854 // v2i1/v4i1 setcc being casted to scalar.
50855 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
50856 const X86Subtarget &Subtarget) {
50857 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
50859 EVT VT = N->getValueType(0);
50861 // Make sure this is an AND with constant. We will check the value of the
50863 auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
50867 // This is implied by the ConstantSDNode.
50868 assert(!VT.isVector() && "Expected scalar VT!");
50870 SDValue Src = N->getOperand(0);
50871 if (!Src.hasOneUse())
50874 // (Optionally) peek through any_extend().
50875 if (Src.getOpcode() == ISD::ANY_EXTEND) {
50876 if (!Src.getOperand(0).hasOneUse())
50878 Src = Src.getOperand(0);
50881 if (Src.getOpcode() != ISD::BITCAST || !Src.getOperand(0).hasOneUse())
50884 Src = Src.getOperand(0);
50885 EVT SrcVT = Src.getValueType();
50887 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50888 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
50889 !TLI.isTypeLegal(SrcVT))
50892 if (Src.getOpcode() != ISD::CONCAT_VECTORS)
50895 // We only care about the first subvector of the concat, we expect the
50896 // other subvectors to be ignored due to the AND if we make the change.
50897 SDValue SubVec = Src.getOperand(0);
50898 EVT SubVecVT = SubVec.getValueType();
50900 // The RHS of the AND should be a mask with as many bits as SubVec.
50901 if (!TLI.isTypeLegal(SubVecVT) ||
50902 !C1->getAPIntValue().isMask(SubVecVT.getVectorNumElements()))
50905 // First subvector should be a setcc with a legal result type or a
50906 // AND containing at least one setcc with a legal result type.
50907 auto IsLegalSetCC = [&](SDValue V) {
50908 if (V.getOpcode() != ISD::SETCC)
50910 EVT SetccVT = V.getOperand(0).getValueType();
50911 if (!TLI.isTypeLegal(SetccVT) ||
50912 !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
50914 if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
50918 if (!(IsLegalSetCC(SubVec) || (SubVec.getOpcode() == ISD::AND &&
50919 (IsLegalSetCC(SubVec.getOperand(0)) ||
50920 IsLegalSetCC(SubVec.getOperand(1))))))
50923 // We passed all the checks. Rebuild the concat_vectors with zeroes
50924 // and cast it back to VT.
50926 SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
50927 DAG.getConstant(0, dl, SubVecVT));
50929 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
50931 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getSizeInBits());
50932 return DAG.getZExtOrTrunc(DAG.getBitcast(IntVT, Concat), dl, VT);
50935 static SDValue getBMIMatchingOp(unsigned Opc, SelectionDAG &DAG,
50936 SDValue OpMustEq, SDValue Op, unsigned Depth) {
50937 // We don't want to go crazy with the recursion here. This isn't a super
50938 // important optimization.
50939 static constexpr unsigned kMaxDepth = 2;
50941 // Only do this re-ordering if op has one use.
50942 if (!Op.hasOneUse())
50946 // If we hit another assosiative op, recurse further.
50947 if (Op.getOpcode() == Opc) {
50949 if (Depth++ >= kMaxDepth)
50952 for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
50954 getBMIMatchingOp(Opc, DAG, OpMustEq, Op.getOperand(OpIdx), Depth))
50955 return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), R,
50956 Op.getOperand(1 - OpIdx));
50958 } else if (Op.getOpcode() == ISD::SUB) {
50959 if (Opc == ISD::AND) {
50960 // BLSI: (and x, (sub 0, x))
50961 if (isNullConstant(Op.getOperand(0)) && Op.getOperand(1) == OpMustEq)
50962 return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
50964 // Opc must be ISD::AND or ISD::XOR
50965 // BLSR: (and x, (sub x, 1))
50966 // BLSMSK: (xor x, (sub x, 1))
50967 if (isOneConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
50968 return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
50970 } else if (Op.getOpcode() == ISD::ADD) {
50971 // Opc must be ISD::AND or ISD::XOR
50972 // BLSR: (and x, (add x, -1))
50973 // BLSMSK: (xor x, (add x, -1))
50974 if (isAllOnesConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
50975 return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
50980 static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
50981 const X86Subtarget &Subtarget) {
50982 EVT VT = N->getValueType(0);
50983 // Make sure this node is a candidate for BMI instructions.
50984 if (!Subtarget.hasBMI() || !VT.isScalarInteger() ||
50985 (VT != MVT::i32 && VT != MVT::i64))
50988 assert(N->getOpcode() == ISD::AND || N->getOpcode() == ISD::XOR);
50990 // Try and match LHS and RHS.
50991 for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
50992 if (SDValue OpMatch =
50993 getBMIMatchingOp(N->getOpcode(), DAG, N->getOperand(OpIdx),
50994 N->getOperand(1 - OpIdx), 0))
50999 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
51000 TargetLowering::DAGCombinerInfo &DCI,
51001 const X86Subtarget &Subtarget) {
51002 SDValue N0 = N->getOperand(0);
51003 SDValue N1 = N->getOperand(1);
51004 EVT VT = N->getValueType(0);
51006 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51008 // If this is SSE1 only convert to FAND to avoid scalarization.
51009 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
51010 return DAG.getBitcast(MVT::v4i32,
51011 DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
51012 DAG.getBitcast(MVT::v4f32, N0),
51013 DAG.getBitcast(MVT::v4f32, N1)));
51016 // Use a 32-bit and+zext if upper bits known zero.
51017 if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
51018 APInt HiMask = APInt::getHighBitsSet(64, 32);
51019 if (DAG.MaskedValueIsZero(N1, HiMask) ||
51020 DAG.MaskedValueIsZero(N0, HiMask)) {
51021 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
51022 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
51023 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
51024 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
51028 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
51029 // TODO: Support multiple SrcOps.
51030 if (VT == MVT::i1) {
51031 SmallVector<SDValue, 2> SrcOps;
51032 SmallVector<APInt, 2> SrcPartials;
51033 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
51034 SrcOps.size() == 1) {
51035 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
51036 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
51037 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
51038 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
51039 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
51041 assert(SrcPartials[0].getBitWidth() == NumElts &&
51042 "Unexpected partial reduction mask");
51043 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
51044 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
51045 return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
51050 // InstCombine converts:
51051 // `(-x << C0) & C1`
51053 // `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1`
51054 // This saves an IR instruction but on x86 the neg/shift version is preferable
51055 // so undo the transform.
51057 if (N0.getOpcode() == ISD::MUL && N0.hasOneUse()) {
51058 // TODO: We don't actually need a splat for this, we just need the checks to
51059 // hold for each element.
51060 ConstantSDNode *N1C = isConstOrConstSplat(N1, /*AllowUndefs*/ true,
51061 /*AllowTruncation*/ false);
51062 ConstantSDNode *N01C =
51063 isConstOrConstSplat(N0.getOperand(1), /*AllowUndefs*/ true,
51064 /*AllowTruncation*/ false);
51066 const APInt &MulC = N01C->getAPIntValue();
51067 const APInt &AndC = N1C->getAPIntValue();
51068 APInt MulCLowBit = MulC & (-MulC);
51069 if (MulC.uge(AndC) && !MulC.isPowerOf2() &&
51070 (MulCLowBit + MulC).isPowerOf2()) {
51071 SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
51073 int32_t MulCLowBitLog = MulCLowBit.exactLogBase2();
51074 assert(MulCLowBitLog != -1 &&
51075 "Isolated lowbit is somehow not a power of 2!");
51076 SDValue Shift = DAG.getNode(ISD::SHL, dl, VT, Neg,
51077 DAG.getConstant(MulCLowBitLog, dl, VT));
51078 return DAG.getNode(ISD::AND, dl, VT, Shift, N1);
51083 if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
51086 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
51089 if (SDValue R = combineBitOpWithShift(N, DAG))
51092 if (SDValue R = combineBitOpWithPACK(N, DAG))
51095 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
51098 if (SDValue R = combineAndShuffleNot(N, DAG, Subtarget))
51101 if (DCI.isBeforeLegalizeOps())
51104 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
51107 if (SDValue R = combineAndNotIntoANDNP(N, DAG))
51110 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
51113 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
51116 // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
51117 // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
51118 // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
51119 if (VT.isVector() && getTargetConstantFromNode(N1)) {
51120 unsigned Opc0 = N0.getOpcode();
51121 if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
51122 getTargetConstantFromNode(N0.getOperand(1)) &&
51123 DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
51124 N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
51125 SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
51126 return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
51130 // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
51131 // avoids slow variable shift (moving shift amount to ECX etc.)
51132 if (isOneConstant(N1) && N0->hasOneUse()) {
51134 while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
51135 Src.getOpcode() == ISD::TRUNCATE) &&
51136 Src.getOperand(0)->hasOneUse())
51137 Src = Src.getOperand(0);
51138 bool ContainsNOT = false;
51139 X86::CondCode X86CC = X86::COND_B;
51140 // Peek through AND(NOT(SRL(X,Y)),1).
51141 if (isBitwiseNot(Src)) {
51142 Src = Src.getOperand(0);
51143 X86CC = X86::COND_AE;
51144 ContainsNOT = true;
51146 if (Src.getOpcode() == ISD::SRL &&
51147 !isa<ConstantSDNode>(Src.getOperand(1))) {
51148 SDValue BitNo = Src.getOperand(1);
51149 Src = Src.getOperand(0);
51150 // Peek through AND(SRL(NOT(X),Y),1).
51151 if (isBitwiseNot(Src)) {
51152 Src = Src.getOperand(0);
51153 X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
51154 ContainsNOT = true;
51156 // If we have BMI2 then SHRX should be faster for i32/i64 cases.
51157 if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
51158 if (SDValue BT = getBT(Src, BitNo, dl, DAG))
51159 return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
51163 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
51164 // Attempt to recursively combine a bitmask AND with shuffles.
51166 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
51169 // If either operand is a constant mask, then only the elements that aren't
51170 // zero are actually demanded by the other operand.
51171 auto GetDemandedMasks = [&](SDValue Op) {
51173 SmallVector<APInt> EltBits;
51174 int NumElts = VT.getVectorNumElements();
51175 int EltSizeInBits = VT.getScalarSizeInBits();
51176 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
51177 APInt DemandedElts = APInt::getAllOnes(NumElts);
51178 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
51180 DemandedBits.clearAllBits();
51181 DemandedElts.clearAllBits();
51182 for (int I = 0; I != NumElts; ++I) {
51183 if (UndefElts[I]) {
51184 // We can't assume an undef src element gives an undef dst - the
51185 // other src might be zero.
51186 DemandedBits.setAllBits();
51187 DemandedElts.setBit(I);
51188 } else if (!EltBits[I].isZero()) {
51189 DemandedBits |= EltBits[I];
51190 DemandedElts.setBit(I);
51194 return std::make_pair(DemandedBits, DemandedElts);
51196 APInt Bits0, Elts0;
51197 APInt Bits1, Elts1;
51198 std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
51199 std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
51201 if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
51202 TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
51203 TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
51204 TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
51205 if (N->getOpcode() != ISD::DELETED_NODE)
51206 DCI.AddToWorklist(N);
51207 return SDValue(N, 0);
51210 SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
51211 SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
51212 if (NewN0 || NewN1)
51213 return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
51214 NewN1 ? NewN1 : N1);
51217 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
51218 if ((VT.getScalarSizeInBits() % 8) == 0 &&
51219 N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
51220 isa<ConstantSDNode>(N0.getOperand(1))) {
51221 SDValue BitMask = N1;
51222 SDValue SrcVec = N0.getOperand(0);
51223 EVT SrcVecVT = SrcVec.getValueType();
51225 // Check that the constant bitmask masks whole bytes.
51227 SmallVector<APInt, 64> EltBits;
51228 if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
51229 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
51230 llvm::all_of(EltBits, [](const APInt &M) {
51231 return M.isZero() || M.isAllOnes();
51233 unsigned NumElts = SrcVecVT.getVectorNumElements();
51234 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
51235 unsigned Idx = N0.getConstantOperandVal(1);
51237 // Create a root shuffle mask from the byte mask and the extracted index.
51238 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
51239 for (unsigned i = 0; i != Scale; ++i) {
51242 int VecIdx = Scale * Idx + i;
51243 ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
51246 if (SDValue Shuffle = combineX86ShufflesRecursively(
51247 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
51248 X86::MaxShuffleCombineDepth,
51249 /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
51250 /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
51251 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
51256 if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
51262 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
51263 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
51264 const X86Subtarget &Subtarget) {
51265 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
51267 MVT VT = N->getSimpleValueType(0);
51268 unsigned EltSizeInBits = VT.getScalarSizeInBits();
51269 if (!VT.isVector() || (EltSizeInBits % 8) != 0)
51272 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
51273 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
51274 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
51277 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
51278 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
51279 if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
51280 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
51283 // Attempt to extract constant byte masks.
51284 APInt UndefElts0, UndefElts1;
51285 SmallVector<APInt, 32> EltBits0, EltBits1;
51286 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
51289 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
51293 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
51294 // TODO - add UNDEF elts support.
51295 if (UndefElts0[i] || UndefElts1[i])
51297 if (EltBits0[i] != ~EltBits1[i])
51303 if (useVPTERNLOG(Subtarget, VT)) {
51304 // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
51305 // VPTERNLOG is only available as vXi32/64-bit types.
51306 MVT OpSVT = EltSizeInBits == 32 ? MVT::i32 : MVT::i64;
51308 MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
51309 SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
51310 SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
51311 SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
51312 SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
51313 SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
51315 return DAG.getBitcast(VT, Res);
51318 SDValue X = N->getOperand(0);
51320 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
51321 DAG.getBitcast(VT, N1.getOperand(0)));
51322 return DAG.getNode(ISD::OR, DL, VT, X, Y);
51325 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
51326 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
51327 if (N->getOpcode() != ISD::OR)
51330 SDValue N0 = N->getOperand(0);
51331 SDValue N1 = N->getOperand(1);
51333 // Canonicalize AND to LHS.
51334 if (N1.getOpcode() == ISD::AND)
51337 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
51338 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
51341 Mask = N1.getOperand(0);
51342 X = N1.getOperand(1);
51344 // Check to see if the mask appeared in both the AND and ANDNP.
51345 if (N0.getOperand(0) == Mask)
51346 Y = N0.getOperand(1);
51347 else if (N0.getOperand(1) == Mask)
51348 Y = N0.getOperand(0);
51352 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
51353 // ANDNP combine allows other combines to happen that prevent matching.
51358 // (or (and (m, y), (pandn m, x)))
51360 // (vselect m, x, y)
51361 // As a special case, try to fold:
51362 // (or (and (m, (sub 0, x)), (pandn m, x)))
51364 // (sub (xor X, M), M)
51365 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
51366 const X86Subtarget &Subtarget) {
51367 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
51369 EVT VT = N->getValueType(0);
51370 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
51371 (VT.is256BitVector() && Subtarget.hasInt256())))
51374 SDValue X, Y, Mask;
51375 if (!matchLogicBlend(N, X, Y, Mask))
51378 // Validate that X, Y, and Mask are bitcasts, and see through them.
51379 Mask = peekThroughBitcasts(Mask);
51380 X = peekThroughBitcasts(X);
51381 Y = peekThroughBitcasts(Y);
51383 EVT MaskVT = Mask.getValueType();
51384 unsigned EltBits = MaskVT.getScalarSizeInBits();
51386 // TODO: Attempt to handle floating point cases as well?
51387 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
51392 // Attempt to combine to conditional negate: (sub (xor X, M), M)
51393 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
51397 // PBLENDVB is only available on SSE 4.1.
51398 if (!Subtarget.hasSSE41())
51401 // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
51402 if (Subtarget.hasVLX())
51405 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
51407 X = DAG.getBitcast(BlendVT, X);
51408 Y = DAG.getBitcast(BlendVT, Y);
51409 Mask = DAG.getBitcast(BlendVT, Mask);
51410 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
51411 return DAG.getBitcast(VT, Mask);
51414 // Helper function for combineOrCmpEqZeroToCtlzSrl
51418 // srl(ctlz x), log2(bitsize(x))
51419 // Input pattern is checked by caller.
51420 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
51421 SDValue Cmp = Op.getOperand(1);
51422 EVT VT = Cmp.getOperand(0).getValueType();
51423 unsigned Log2b = Log2_32(VT.getSizeInBits());
51425 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
51426 // The result of the shift is true or false, and on X86, the 32-bit
51427 // encoding of shr and lzcnt is more desirable.
51428 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
51429 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
51430 DAG.getConstant(Log2b, dl, MVT::i8));
51434 // Try to transform:
51435 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
51437 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
51438 // Will also attempt to match more generic cases, eg:
51439 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
51440 // Only applies if the target supports the FastLZCNT feature.
51441 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
51442 TargetLowering::DAGCombinerInfo &DCI,
51443 const X86Subtarget &Subtarget) {
51444 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
51447 auto isORCandidate = [](SDValue N) {
51448 return (N->getOpcode() == ISD::OR && N->hasOneUse());
51451 // Check the zero extend is extending to 32-bit or more. The code generated by
51452 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
51453 // instructions to clear the upper bits.
51454 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
51455 !isORCandidate(N->getOperand(0)))
51458 // Check the node matches: setcc(eq, cmp 0)
51459 auto isSetCCCandidate = [](SDValue N) {
51460 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
51461 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
51462 N->getOperand(1).getOpcode() == X86ISD::CMP &&
51463 isNullConstant(N->getOperand(1).getOperand(1)) &&
51464 N->getOperand(1).getValueType().bitsGE(MVT::i32);
51467 SDNode *OR = N->getOperand(0).getNode();
51468 SDValue LHS = OR->getOperand(0);
51469 SDValue RHS = OR->getOperand(1);
51471 // Save nodes matching or(or, setcc(eq, cmp 0)).
51472 SmallVector<SDNode *, 2> ORNodes;
51473 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
51474 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
51475 ORNodes.push_back(OR);
51476 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
51477 LHS = OR->getOperand(0);
51478 RHS = OR->getOperand(1);
51481 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
51482 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
51483 !isORCandidate(SDValue(OR, 0)))
51486 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
51488 // or(srl(ctlz),srl(ctlz)).
51489 // The dag combiner can then fold it into:
51490 // srl(or(ctlz, ctlz)).
51491 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
51492 SDValue Ret, NewRHS;
51493 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
51494 Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
51499 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
51500 while (!ORNodes.empty()) {
51501 OR = ORNodes.pop_back_val();
51502 LHS = OR->getOperand(0);
51503 RHS = OR->getOperand(1);
51504 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
51505 if (RHS->getOpcode() == ISD::OR)
51506 std::swap(LHS, RHS);
51507 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
51510 Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
51513 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
51516 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
51517 SDValue And1_L, SDValue And1_R,
51518 const SDLoc &DL, SelectionDAG &DAG) {
51519 if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
51521 SDValue NotOp = And0_L->getOperand(0);
51522 if (NotOp == And1_R)
51523 std::swap(And1_R, And1_L);
51524 if (NotOp != And1_L)
51527 // (~(NotOp) & And0_R) | (NotOp & And1_R)
51528 // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
51529 EVT VT = And1_L->getValueType(0);
51530 SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
51531 SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
51532 SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
51533 SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
51537 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
51538 /// equivalent `((x ^ y) & m) ^ y)` pattern.
51539 /// This is typically a better representation for targets without a fused
51540 /// "and-not" operation. This function is intended to be called from a
51541 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
51542 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
51543 // Note that masked-merge variants using XOR or ADD expressions are
51544 // normalized to OR by InstCombine so we only check for OR.
51545 assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
51546 SDValue N0 = Node->getOperand(0);
51547 if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
51549 SDValue N1 = Node->getOperand(1);
51550 if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
51554 SDValue N00 = N0->getOperand(0);
51555 SDValue N01 = N0->getOperand(1);
51556 SDValue N10 = N1->getOperand(0);
51557 SDValue N11 = N1->getOperand(1);
51558 if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
51560 if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
51562 if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
51564 if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
51569 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
51570 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
51571 /// with CMP+{ADC, SBB}.
51572 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
51573 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
51574 SDValue X, SDValue Y,
51576 bool ZeroSecondOpOnly = false) {
51577 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
51580 // Look through a one-use zext.
51581 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
51582 Y = Y.getOperand(0);
51586 if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
51587 CC = (X86::CondCode)Y.getConstantOperandVal(0);
51588 EFLAGS = Y.getOperand(1);
51589 } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
51591 EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
51597 // If X is -1 or 0, then we have an opportunity to avoid constants required in
51598 // the general case below.
51599 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
51600 if (ConstantX && !ZeroSecondOpOnly) {
51601 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
51602 (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
51603 // This is a complicated way to get -1 or 0 from the carry flag:
51604 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
51605 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
51606 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
51607 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
51611 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
51612 (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
51613 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
51614 EFLAGS.getValueType().isInteger() &&
51615 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
51616 // Swap the operands of a SUB, and we have the same pattern as above.
51617 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
51618 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
51619 SDValue NewSub = DAG.getNode(
51620 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
51621 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
51622 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
51623 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
51624 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
51630 if (CC == X86::COND_B) {
51631 // X + SETB Z --> adc X, 0
51632 // X - SETB Z --> sbb X, 0
51633 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
51634 DAG.getVTList(VT, MVT::i32), X,
51635 DAG.getConstant(0, DL, VT), EFLAGS);
51638 if (ZeroSecondOpOnly)
51641 if (CC == X86::COND_A) {
51642 // Try to convert COND_A into COND_B in an attempt to facilitate
51643 // materializing "setb reg".
51645 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
51646 // cannot take an immediate as its first operand.
51648 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
51649 EFLAGS.getValueType().isInteger() &&
51650 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
51652 DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
51653 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
51654 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
51655 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
51656 DAG.getVTList(VT, MVT::i32), X,
51657 DAG.getConstant(0, DL, VT), NewEFLAGS);
51661 if (CC == X86::COND_AE) {
51662 // X + SETAE --> sbb X, -1
51663 // X - SETAE --> adc X, -1
51664 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
51665 DAG.getVTList(VT, MVT::i32), X,
51666 DAG.getConstant(-1, DL, VT), EFLAGS);
51669 if (CC == X86::COND_BE) {
51670 // X + SETBE --> sbb X, -1
51671 // X - SETBE --> adc X, -1
51672 // Try to convert COND_BE into COND_AE in an attempt to facilitate
51673 // materializing "setae reg".
51675 // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
51676 // cannot take an immediate as its first operand.
51678 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
51679 EFLAGS.getValueType().isInteger() &&
51680 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
51682 DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
51683 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
51684 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
51685 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
51686 DAG.getVTList(VT, MVT::i32), X,
51687 DAG.getConstant(-1, DL, VT), NewEFLAGS);
51691 if (CC != X86::COND_E && CC != X86::COND_NE)
51694 if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
51695 !X86::isZeroNode(EFLAGS.getOperand(1)) ||
51696 !EFLAGS.getOperand(0).getValueType().isInteger())
51699 SDValue Z = EFLAGS.getOperand(0);
51700 EVT ZVT = Z.getValueType();
51702 // If X is -1 or 0, then we have an opportunity to avoid constants required in
51703 // the general case below.
51705 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
51707 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
51708 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
51709 if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
51710 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
51711 SDValue Zero = DAG.getConstant(0, DL, ZVT);
51712 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
51713 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
51714 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
51715 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
51716 SDValue(Neg.getNode(), 1));
51719 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
51720 // with fake operands:
51721 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
51722 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
51723 if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
51724 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
51725 SDValue One = DAG.getConstant(1, DL, ZVT);
51726 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
51727 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
51728 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
51729 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
51734 // (cmp Z, 1) sets the carry flag if Z is 0.
51735 SDValue One = DAG.getConstant(1, DL, ZVT);
51736 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
51737 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
51739 // Add the flags type for ADC/SBB nodes.
51740 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
51742 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
51743 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
51744 if (CC == X86::COND_NE)
51745 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
51746 DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
51748 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
51749 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
51750 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
51751 DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
51754 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
51755 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
51756 /// with CMP+{ADC, SBB}.
51757 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
51758 bool IsSub = N->getOpcode() == ISD::SUB;
51759 SDValue X = N->getOperand(0);
51760 SDValue Y = N->getOperand(1);
51761 EVT VT = N->getValueType(0);
51764 if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
51767 // Commute and try again (negate the result for subtracts).
51768 if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
51771 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
51778 static SDValue combineOrXorWithSETCC(SDNode *N, SDValue N0, SDValue N1,
51779 SelectionDAG &DAG) {
51780 assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::OR) &&
51781 "Unexpected opcode");
51783 // Delegate to combineAddOrSubToADCOrSBB if we have:
51785 // (xor/or (zero_extend (setcc)) imm)
51787 // where imm is odd if and only if we have xor, in which case the XOR/OR are
51788 // equivalent to a SUB/ADD, respectively.
51789 if (N0.getOpcode() == ISD::ZERO_EXTEND &&
51790 N0.getOperand(0).getOpcode() == X86ISD::SETCC && N0.hasOneUse()) {
51791 if (auto *N1C = dyn_cast<ConstantSDNode>(N1)) {
51792 bool IsSub = N->getOpcode() == ISD::XOR;
51793 bool N1COdd = N1C->getZExtValue() & 1;
51794 if (IsSub ? N1COdd : !N1COdd) {
51796 EVT VT = N->getValueType(0);
51797 if (SDValue R = combineAddOrSubToADCOrSBB(IsSub, DL, VT, N1, N0, DAG))
51806 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
51807 TargetLowering::DAGCombinerInfo &DCI,
51808 const X86Subtarget &Subtarget) {
51809 SDValue N0 = N->getOperand(0);
51810 SDValue N1 = N->getOperand(1);
51811 EVT VT = N->getValueType(0);
51813 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51815 // If this is SSE1 only convert to FOR to avoid scalarization.
51816 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
51817 return DAG.getBitcast(MVT::v4i32,
51818 DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
51819 DAG.getBitcast(MVT::v4f32, N0),
51820 DAG.getBitcast(MVT::v4f32, N1)));
51823 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
51824 // TODO: Support multiple SrcOps.
51825 if (VT == MVT::i1) {
51826 SmallVector<SDValue, 2> SrcOps;
51827 SmallVector<APInt, 2> SrcPartials;
51828 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
51829 SrcOps.size() == 1) {
51830 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
51831 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
51832 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
51833 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
51834 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
51836 assert(SrcPartials[0].getBitWidth() == NumElts &&
51837 "Unexpected partial reduction mask");
51838 SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
51839 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
51840 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
51841 return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
51846 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
51849 if (SDValue R = combineBitOpWithShift(N, DAG))
51852 if (SDValue R = combineBitOpWithPACK(N, DAG))
51855 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
51858 if (DCI.isBeforeLegalizeOps())
51861 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
51864 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
51867 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
51870 // (0 - SetCC) | C -> (zext (not SetCC)) * (C + 1) - 1 if we can get a LEA out of it.
51871 if ((VT == MVT::i32 || VT == MVT::i64) &&
51872 N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
51873 isNullConstant(N0.getOperand(0))) {
51874 SDValue Cond = N0.getOperand(1);
51875 if (Cond.getOpcode() == ISD::ZERO_EXTEND && Cond.hasOneUse())
51876 Cond = Cond.getOperand(0);
51878 if (Cond.getOpcode() == X86ISD::SETCC && Cond.hasOneUse()) {
51879 if (auto *CN = dyn_cast<ConstantSDNode>(N1)) {
51880 uint64_t Val = CN->getZExtValue();
51881 if (Val == 1 || Val == 2 || Val == 3 || Val == 4 || Val == 7 || Val == 8) {
51882 X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
51883 CCode = X86::GetOppositeBranchCondition(CCode);
51884 SDValue NotCond = getSETCC(CCode, Cond.getOperand(1), SDLoc(Cond), DAG);
51886 SDValue R = DAG.getZExtOrTrunc(NotCond, dl, VT);
51887 R = DAG.getNode(ISD::MUL, dl, VT, R, DAG.getConstant(Val + 1, dl, VT));
51888 R = DAG.getNode(ISD::SUB, dl, VT, R, DAG.getConstant(1, dl, VT));
51895 // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
51896 // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
51897 // iff the upper elements of the non-shifted arg are zero.
51898 // KUNPCK require 16+ bool vector elements.
51899 if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
51900 unsigned NumElts = VT.getVectorNumElements();
51901 unsigned HalfElts = NumElts / 2;
51902 APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
51903 if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
51904 N1.getConstantOperandAPInt(1) == HalfElts &&
51905 DAG.MaskedVectorIsZero(N0, UpperElts)) {
51906 return DAG.getNode(
51907 ISD::CONCAT_VECTORS, dl, VT,
51908 extractSubVector(N0, 0, DAG, dl, HalfElts),
51909 extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
51911 if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
51912 N0.getConstantOperandAPInt(1) == HalfElts &&
51913 DAG.MaskedVectorIsZero(N1, UpperElts)) {
51914 return DAG.getNode(
51915 ISD::CONCAT_VECTORS, dl, VT,
51916 extractSubVector(N1, 0, DAG, dl, HalfElts),
51917 extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
51921 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
51922 // Attempt to recursively combine an OR of shuffles.
51924 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
51927 // If either operand is a constant mask, then only the elements that aren't
51928 // allones are actually demanded by the other operand.
51929 auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
51931 SmallVector<APInt> EltBits;
51932 int NumElts = VT.getVectorNumElements();
51933 int EltSizeInBits = VT.getScalarSizeInBits();
51934 if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
51937 APInt DemandedElts = APInt::getZero(NumElts);
51938 for (int I = 0; I != NumElts; ++I)
51939 if (!EltBits[I].isAllOnes())
51940 DemandedElts.setBit(I);
51942 return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
51944 if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
51945 if (N->getOpcode() != ISD::DELETED_NODE)
51946 DCI.AddToWorklist(N);
51947 return SDValue(N, 0);
51951 // We should fold "masked merge" patterns when `andn` is not available.
51952 if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
51953 if (SDValue R = foldMaskedMerge(N, DAG))
51956 if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
51962 /// Try to turn tests against the signbit in the form of:
51963 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
51966 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
51967 // This is only worth doing if the output type is i8 or i1.
51968 EVT ResultType = N->getValueType(0);
51969 if (ResultType != MVT::i8 && ResultType != MVT::i1)
51972 SDValue N0 = N->getOperand(0);
51973 SDValue N1 = N->getOperand(1);
51975 // We should be performing an xor against a truncated shift.
51976 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
51979 // Make sure we are performing an xor against one.
51980 if (!isOneConstant(N1))
51983 // SetCC on x86 zero extends so only act on this if it's a logical shift.
51984 SDValue Shift = N0.getOperand(0);
51985 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
51988 // Make sure we are truncating from one of i16, i32 or i64.
51989 EVT ShiftTy = Shift.getValueType();
51990 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
51993 // Make sure the shift amount extracts the sign bit.
51994 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
51995 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
51998 // Create a greater-than comparison against -1.
51999 // N.B. Using SETGE against 0 works but we want a canonical looking
52000 // comparison, using SETGT matches up with what TranslateX86CC.
52002 SDValue ShiftOp = Shift.getOperand(0);
52003 EVT ShiftOpTy = ShiftOp.getValueType();
52004 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52005 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
52006 *DAG.getContext(), ResultType);
52007 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
52008 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
52009 if (SetCCResultType != ResultType)
52010 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
52014 /// Turn vector tests of the signbit in the form of:
52015 /// xor (sra X, elt_size(X)-1), -1
52019 /// This should be called before type legalization because the pattern may not
52020 /// persist after that.
52021 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
52022 const X86Subtarget &Subtarget) {
52023 EVT VT = N->getValueType(0);
52024 if (!VT.isSimple())
52027 switch (VT.getSimpleVT().SimpleTy) {
52028 default: return SDValue();
52032 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
52036 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
52039 // There must be a shift right algebraic before the xor, and the xor must be a
52040 // 'not' operation.
52041 SDValue Shift = N->getOperand(0);
52042 SDValue Ones = N->getOperand(1);
52043 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
52044 !ISD::isBuildVectorAllOnes(Ones.getNode()))
52047 // The shift should be smearing the sign bit across each vector element.
52049 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
52051 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
52054 // Create a greater-than comparison against -1. We don't use the more obvious
52055 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
52056 return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
52059 /// Detect patterns of truncation with unsigned saturation:
52061 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
52062 /// Return the source value x to be truncated or SDValue() if the pattern was
52065 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
52066 /// where C1 >= 0 and C2 is unsigned max of destination type.
52068 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
52069 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
52071 /// These two patterns are equivalent to:
52072 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
52073 /// So return the smax(x, C1) value to be truncated or SDValue() if the
52074 /// pattern was not matched.
52075 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
52077 EVT InVT = In.getValueType();
52079 // Saturation with truncation. We truncate from InVT to VT.
52080 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
52081 "Unexpected types for truncate operation");
52083 // Match min/max and return limit value as a parameter.
52084 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
52085 if (V.getOpcode() == Opcode &&
52086 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
52087 return V.getOperand(0);
52092 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
52093 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
52094 // the element size of the destination type.
52095 if (C2.isMask(VT.getScalarSizeInBits()))
52098 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
52099 if (MatchMinMax(SMin, ISD::SMAX, C1))
52100 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
52103 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
52104 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
52105 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
52107 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
52113 /// Detect patterns of truncation with signed saturation:
52114 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
52115 /// signed_max_of_dest_type)) to dest_type)
52117 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
52118 /// signed_min_of_dest_type)) to dest_type).
52119 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
52120 /// Return the source value to be truncated or SDValue() if the pattern was not
52122 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
52123 unsigned NumDstBits = VT.getScalarSizeInBits();
52124 unsigned NumSrcBits = In.getScalarValueSizeInBits();
52125 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
52127 auto MatchMinMax = [](SDValue V, unsigned Opcode,
52128 const APInt &Limit) -> SDValue {
52130 if (V.getOpcode() == Opcode &&
52131 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
52132 return V.getOperand(0);
52136 APInt SignedMax, SignedMin;
52138 SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
52139 SignedMin = APInt(NumSrcBits, 0);
52141 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
52142 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
52145 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
52146 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
52149 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
52150 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
52156 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
52158 const X86Subtarget &Subtarget) {
52159 if (!Subtarget.hasSSE2() || !VT.isVector())
52162 EVT SVT = VT.getVectorElementType();
52163 EVT InVT = In.getValueType();
52164 EVT InSVT = InVT.getVectorElementType();
52166 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
52167 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
52168 // and concatenate at the same time. Then we can use a final vpmovuswb to
52170 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
52171 InVT == MVT::v16i32 && VT == MVT::v16i8) {
52172 if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
52173 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
52174 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
52175 DL, DAG, Subtarget);
52176 assert(Mid && "Failed to pack!");
52177 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
52181 // vXi32 truncate instructions are available with AVX512F.
52182 // vXi16 truncate instructions are only available with AVX512BW.
52183 // For 256-bit or smaller vectors, we require VLX.
52184 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
52185 // If the result type is 256-bits or larger and we have disable 512-bit
52186 // registers, we should go ahead and use the pack instructions if possible.
52187 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
52188 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
52189 (InVT.getSizeInBits() > 128) &&
52190 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
52191 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
52193 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
52194 VT.getSizeInBits() >= 64 &&
52195 (SVT == MVT::i8 || SVT == MVT::i16) &&
52196 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
52197 if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
52198 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
52199 // Only do this when the result is at least 64 bits or we'll leaving
52200 // dangling PACKSSDW nodes.
52201 if (SVT == MVT::i8 && InSVT == MVT::i32) {
52202 EVT MidVT = VT.changeVectorElementType(MVT::i16);
52203 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
52205 assert(Mid && "Failed to pack!");
52206 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
52208 assert(V && "Failed to pack!");
52210 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
52211 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
52214 if (SDValue SSatVal = detectSSatPattern(In, VT))
52215 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
52219 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52220 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
52221 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
52222 (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
52223 unsigned TruncOpc = 0;
52225 if (SDValue SSatVal = detectSSatPattern(In, VT)) {
52227 TruncOpc = X86ISD::VTRUNCS;
52228 } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
52230 TruncOpc = X86ISD::VTRUNCUS;
52233 unsigned ResElts = VT.getVectorNumElements();
52234 // If the input type is less than 512 bits and we don't have VLX, we need
52235 // to widen to 512 bits.
52236 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
52237 unsigned NumConcats = 512 / InVT.getSizeInBits();
52238 ResElts *= NumConcats;
52239 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
52240 ConcatOps[0] = SatVal;
52241 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
52242 NumConcats * InVT.getVectorNumElements());
52243 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
52245 // Widen the result if its narrower than 128 bits.
52246 if (ResElts * SVT.getSizeInBits() < 128)
52247 ResElts = 128 / SVT.getSizeInBits();
52248 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
52249 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
52250 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
52251 DAG.getIntPtrConstant(0, DL));
52258 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
52259 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
52260 /// ISD::AVGCEILU (AVG) instruction.
52261 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
52262 const X86Subtarget &Subtarget,
52264 if (!VT.isVector())
52266 EVT InVT = In.getValueType();
52267 unsigned NumElems = VT.getVectorNumElements();
52269 EVT ScalarVT = VT.getVectorElementType();
52270 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
52273 // InScalarVT is the intermediate type in AVG pattern and it should be greater
52274 // than the original input type (i8/i16).
52275 EVT InScalarVT = InVT.getVectorElementType();
52276 if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
52279 if (!Subtarget.hasSSE2())
52282 // Detect the following pattern:
52284 // %1 = zext <N x i8> %a to <N x i32>
52285 // %2 = zext <N x i8> %b to <N x i32>
52286 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
52287 // %4 = add nuw nsw <N x i32> %3, %2
52288 // %5 = lshr <N x i32> %N, <i32 1 x N>
52289 // %6 = trunc <N x i32> %5 to <N x i8>
52291 // In AVX512, the last instruction can also be a trunc store.
52292 if (In.getOpcode() != ISD::SRL)
52295 // A lambda checking the given SDValue is a constant vector and each element
52296 // is in the range [Min, Max].
52297 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
52298 return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
52299 return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
52303 auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
52304 unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
52305 return MaxActiveBits <= ScalarVT.getSizeInBits();
52308 // Check if each element of the vector is right-shifted by one.
52309 SDValue LHS = In.getOperand(0);
52310 SDValue RHS = In.getOperand(1);
52311 if (!IsConstVectorInRange(RHS, 1, 1))
52313 if (LHS.getOpcode() != ISD::ADD)
52316 // Detect a pattern of a + b + 1 where the order doesn't matter.
52317 SDValue Operands[3];
52318 Operands[0] = LHS.getOperand(0);
52319 Operands[1] = LHS.getOperand(1);
52321 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
52322 ArrayRef<SDValue> Ops) {
52323 return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
52326 auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
52327 for (SDValue &Op : Ops)
52328 if (Op.getValueType() != VT)
52329 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
52330 // Pad to a power-of-2 vector, split+apply and extract the original vector.
52331 unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
52332 EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
52333 if (NumElemsPow2 != NumElems) {
52334 for (SDValue &Op : Ops) {
52335 SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
52336 for (unsigned i = 0; i != NumElems; ++i) {
52337 SDValue Idx = DAG.getIntPtrConstant(i, DL);
52339 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
52341 Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
52344 SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
52345 if (NumElemsPow2 == NumElems)
52347 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
52348 DAG.getIntPtrConstant(0, DL));
52351 // Take care of the case when one of the operands is a constant vector whose
52352 // element is in the range [1, 256].
52353 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
52354 IsZExtLike(Operands[0])) {
52355 // The pattern is detected. Subtract one from the constant vector, then
52356 // demote it and emit X86ISD::AVG instruction.
52357 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
52358 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
52359 return AVGSplitter({Operands[0], Operands[1]});
52362 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
52363 // Match the or case only if its 'add-like' - can be replaced by an add.
52364 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
52365 if (ISD::ADD == V.getOpcode()) {
52366 Op0 = V.getOperand(0);
52367 Op1 = V.getOperand(1);
52370 if (ISD::ZERO_EXTEND != V.getOpcode())
52372 V = V.getOperand(0);
52373 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
52374 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
52376 Op0 = V.getOperand(0);
52377 Op1 = V.getOperand(1);
52382 if (FindAddLike(Operands[0], Op0, Op1))
52383 std::swap(Operands[0], Operands[1]);
52384 else if (!FindAddLike(Operands[1], Op0, Op1))
52389 // Now we have three operands of two additions. Check that one of them is a
52390 // constant vector with ones, and the other two can be promoted from i8/i16.
52391 for (SDValue &Op : Operands) {
52392 if (!IsConstVectorInRange(Op, 1, 1))
52394 std::swap(Op, Operands[2]);
52396 // Check if Operands[0] and Operands[1] are results of type promotion.
52397 for (int j = 0; j < 2; ++j)
52398 if (Operands[j].getValueType() != VT)
52399 if (!IsZExtLike(Operands[j]))
52402 // The pattern is detected, emit X86ISD::AVG instruction(s).
52403 return AVGSplitter({Operands[0], Operands[1]});
52409 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
52410 TargetLowering::DAGCombinerInfo &DCI,
52411 const X86Subtarget &Subtarget) {
52412 LoadSDNode *Ld = cast<LoadSDNode>(N);
52413 EVT RegVT = Ld->getValueType(0);
52414 EVT MemVT = Ld->getMemoryVT();
52416 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52418 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
52419 // into two 16-byte operations. Also split non-temporal aligned loads on
52420 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
52421 ISD::LoadExtType Ext = Ld->getExtensionType();
52423 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
52424 Ext == ISD::NON_EXTLOAD &&
52425 ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
52426 Ld->getAlign() >= Align(16)) ||
52427 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
52428 *Ld->getMemOperand(), &Fast) &&
52430 unsigned NumElems = RegVT.getVectorNumElements();
52434 unsigned HalfOffset = 16;
52435 SDValue Ptr1 = Ld->getBasePtr();
52437 DAG.getMemBasePlusOffset(Ptr1, TypeSize::Fixed(HalfOffset), dl);
52438 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
52441 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
52442 Ld->getOriginalAlign(),
52443 Ld->getMemOperand()->getFlags());
52444 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
52445 Ld->getPointerInfo().getWithOffset(HalfOffset),
52446 Ld->getOriginalAlign(),
52447 Ld->getMemOperand()->getFlags());
52448 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
52449 Load1.getValue(1), Load2.getValue(1));
52451 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
52452 return DCI.CombineTo(N, NewVec, TF, true);
52455 // Bool vector load - attempt to cast to an integer, as we have good
52456 // (vXiY *ext(vXi1 bitcast(iX))) handling.
52457 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
52458 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
52459 unsigned NumElts = RegVT.getVectorNumElements();
52460 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
52461 if (TLI.isTypeLegal(IntVT)) {
52462 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
52463 Ld->getPointerInfo(),
52464 Ld->getOriginalAlign(),
52465 Ld->getMemOperand()->getFlags());
52466 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
52467 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
52471 // If we also broadcast this as a subvector to a wider type, then just extract
52472 // the lowest subvector.
52473 if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
52474 (RegVT.is128BitVector() || RegVT.is256BitVector())) {
52475 SDValue Ptr = Ld->getBasePtr();
52476 SDValue Chain = Ld->getChain();
52477 for (SDNode *User : Ptr->uses()) {
52478 if (User != N && User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
52479 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
52480 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
52481 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
52482 MemVT.getSizeInBits() &&
52483 !User->hasAnyUseOfValue(1) &&
52484 User->getValueSizeInBits(0).getFixedValue() >
52485 RegVT.getFixedSizeInBits()) {
52486 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
52487 RegVT.getSizeInBits());
52488 Extract = DAG.getBitcast(RegVT, Extract);
52489 return DCI.CombineTo(N, Extract, SDValue(User, 1));
52494 // Cast ptr32 and ptr64 pointers to the default address space before a load.
52495 unsigned AddrSpace = Ld->getAddressSpace();
52496 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
52497 AddrSpace == X86AS::PTR32_UPTR) {
52498 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
52499 if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
52501 DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
52502 return DAG.getLoad(RegVT, dl, Ld->getChain(), Cast, Ld->getPointerInfo(),
52503 Ld->getOriginalAlign(),
52504 Ld->getMemOperand()->getFlags());
52511 /// If V is a build vector of boolean constants and exactly one of those
52512 /// constants is true, return the operand index of that true element.
52513 /// Otherwise, return -1.
52514 static int getOneTrueElt(SDValue V) {
52515 // This needs to be a build vector of booleans.
52516 // TODO: Checking for the i1 type matches the IR definition for the mask,
52517 // but the mask check could be loosened to i8 or other types. That might
52518 // also require checking more than 'allOnesValue'; eg, the x86 HW
52519 // instructions only require that the MSB is set for each mask element.
52520 // The ISD::MSTORE comments/definition do not specify how the mask operand
52522 auto *BV = dyn_cast<BuildVectorSDNode>(V);
52523 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
52526 int TrueIndex = -1;
52527 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
52528 for (unsigned i = 0; i < NumElts; ++i) {
52529 const SDValue &Op = BV->getOperand(i);
52532 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
52535 if (ConstNode->getAPIntValue().countr_one() >= 1) {
52536 // If we already found a one, this is too many.
52537 if (TrueIndex >= 0)
52545 /// Given a masked memory load/store operation, return true if it has one mask
52546 /// bit set. If it has one mask bit set, then also return the memory address of
52547 /// the scalar element to load/store, the vector index to insert/extract that
52548 /// scalar element, and the alignment for the scalar memory access.
52549 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
52550 SelectionDAG &DAG, SDValue &Addr,
52551 SDValue &Index, Align &Alignment,
52552 unsigned &Offset) {
52553 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
52554 if (TrueMaskElt < 0)
52557 // Get the address of the one scalar element that is specified by the mask
52558 // using the appropriate offset from the base pointer.
52559 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
52561 Addr = MaskedOp->getBasePtr();
52562 if (TrueMaskElt != 0) {
52563 Offset = TrueMaskElt * EltVT.getStoreSize();
52564 Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::Fixed(Offset),
52568 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
52569 Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
52570 EltVT.getStoreSize());
52574 /// If exactly one element of the mask is set for a non-extending masked load,
52575 /// it is a scalar load and vector insert.
52576 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
52577 /// mask have already been optimized in IR, so we don't bother with those here.
52579 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
52580 TargetLowering::DAGCombinerInfo &DCI,
52581 const X86Subtarget &Subtarget) {
52582 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
52583 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
52584 // However, some target hooks may need to be added to know when the transform
52585 // is profitable. Endianness would also have to be considered.
52587 SDValue Addr, VecIndex;
52590 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
52593 // Load the one scalar element that is specified by the mask using the
52594 // appropriate offset from the base pointer.
52596 EVT VT = ML->getValueType(0);
52597 EVT EltVT = VT.getVectorElementType();
52600 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
52602 CastVT = VT.changeVectorElementType(EltVT);
52606 DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
52607 ML->getPointerInfo().getWithOffset(Offset),
52608 Alignment, ML->getMemOperand()->getFlags());
52610 SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
52612 // Insert the loaded element into the appropriate place in the vector.
52614 DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
52615 Insert = DAG.getBitcast(VT, Insert);
52616 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
52620 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
52621 TargetLowering::DAGCombinerInfo &DCI) {
52622 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
52623 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
52627 EVT VT = ML->getValueType(0);
52629 // If we are loading the first and last elements of a vector, it is safe and
52630 // always faster to load the whole vector. Replace the masked load with a
52631 // vector load and select.
52632 unsigned NumElts = VT.getVectorNumElements();
52633 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
52634 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
52635 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
52636 if (LoadFirstElt && LoadLastElt) {
52637 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
52638 ML->getMemOperand());
52639 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
52640 ML->getPassThru());
52641 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
52644 // Convert a masked load with a constant mask into a masked load and a select.
52645 // This allows the select operation to use a faster kind of select instruction
52646 // (for example, vblendvps -> vblendps).
52648 // Don't try this if the pass-through operand is already undefined. That would
52649 // cause an infinite loop because that's what we're about to create.
52650 if (ML->getPassThru().isUndef())
52653 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
52656 // The new masked load has an undef pass-through operand. The select uses the
52657 // original pass-through operand.
52658 SDValue NewML = DAG.getMaskedLoad(
52659 VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
52660 DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
52661 ML->getAddressingMode(), ML->getExtensionType());
52662 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
52663 ML->getPassThru());
52665 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
52668 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
52669 TargetLowering::DAGCombinerInfo &DCI,
52670 const X86Subtarget &Subtarget) {
52671 auto *Mld = cast<MaskedLoadSDNode>(N);
52673 // TODO: Expanding load with constant mask may be optimized as well.
52674 if (Mld->isExpandingLoad())
52677 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
52678 if (SDValue ScalarLoad =
52679 reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
52682 // TODO: Do some AVX512 subsets benefit from this transform?
52683 if (!Subtarget.hasAVX512())
52684 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
52688 // If the mask value has been legalized to a non-boolean vector, try to
52689 // simplify ops leading up to it. We only demand the MSB of each lane.
52690 SDValue Mask = Mld->getMask();
52691 if (Mask.getScalarValueSizeInBits() != 1) {
52692 EVT VT = Mld->getValueType(0);
52693 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52694 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
52695 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
52696 if (N->getOpcode() != ISD::DELETED_NODE)
52697 DCI.AddToWorklist(N);
52698 return SDValue(N, 0);
52700 if (SDValue NewMask =
52701 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
52702 return DAG.getMaskedLoad(
52703 VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
52704 NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
52705 Mld->getAddressingMode(), Mld->getExtensionType());
52711 /// If exactly one element of the mask is set for a non-truncating masked store,
52712 /// it is a vector extract and scalar store.
52713 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
52714 /// mask have already been optimized in IR, so we don't bother with those here.
52715 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
52717 const X86Subtarget &Subtarget) {
52718 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
52719 // However, some target hooks may need to be added to know when the transform
52720 // is profitable. Endianness would also have to be considered.
52722 SDValue Addr, VecIndex;
52725 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
52728 // Extract the one scalar element that is actually being stored.
52730 SDValue Value = MS->getValue();
52731 EVT VT = Value.getValueType();
52732 EVT EltVT = VT.getVectorElementType();
52733 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
52735 EVT CastVT = VT.changeVectorElementType(EltVT);
52736 Value = DAG.getBitcast(CastVT, Value);
52739 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
52741 // Store that element at the appropriate offset from the base pointer.
52742 return DAG.getStore(MS->getChain(), DL, Extract, Addr,
52743 MS->getPointerInfo().getWithOffset(Offset),
52744 Alignment, MS->getMemOperand()->getFlags());
52747 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
52748 TargetLowering::DAGCombinerInfo &DCI,
52749 const X86Subtarget &Subtarget) {
52750 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
52751 if (Mst->isCompressingStore())
52754 EVT VT = Mst->getValue().getValueType();
52756 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52758 if (Mst->isTruncatingStore())
52761 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
52762 return ScalarStore;
52764 // If the mask value has been legalized to a non-boolean vector, try to
52765 // simplify ops leading up to it. We only demand the MSB of each lane.
52766 SDValue Mask = Mst->getMask();
52767 if (Mask.getScalarValueSizeInBits() != 1) {
52768 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
52769 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
52770 if (N->getOpcode() != ISD::DELETED_NODE)
52771 DCI.AddToWorklist(N);
52772 return SDValue(N, 0);
52774 if (SDValue NewMask =
52775 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
52776 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
52777 Mst->getBasePtr(), Mst->getOffset(), NewMask,
52778 Mst->getMemoryVT(), Mst->getMemOperand(),
52779 Mst->getAddressingMode());
52782 SDValue Value = Mst->getValue();
52783 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
52784 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
52785 Mst->getMemoryVT())) {
52786 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
52787 Mst->getBasePtr(), Mst->getOffset(), Mask,
52788 Mst->getMemoryVT(), Mst->getMemOperand(),
52789 Mst->getAddressingMode(), true);
52795 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
52796 TargetLowering::DAGCombinerInfo &DCI,
52797 const X86Subtarget &Subtarget) {
52798 StoreSDNode *St = cast<StoreSDNode>(N);
52799 EVT StVT = St->getMemoryVT();
52801 SDValue StoredVal = St->getValue();
52802 EVT VT = StoredVal.getValueType();
52803 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52805 // Convert a store of vXi1 into a store of iX and a bitcast.
52806 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
52807 VT.getVectorElementType() == MVT::i1) {
52809 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
52810 StoredVal = DAG.getBitcast(NewVT, StoredVal);
52812 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
52813 St->getPointerInfo(), St->getOriginalAlign(),
52814 St->getMemOperand()->getFlags());
52817 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
52818 // This will avoid a copy to k-register.
52819 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
52820 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
52821 StoredVal.getOperand(0).getValueType() == MVT::i8) {
52822 SDValue Val = StoredVal.getOperand(0);
52823 // We must store zeros to the unused bits.
52824 Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
52825 return DAG.getStore(St->getChain(), dl, Val,
52826 St->getBasePtr(), St->getPointerInfo(),
52827 St->getOriginalAlign(),
52828 St->getMemOperand()->getFlags());
52831 // Widen v2i1/v4i1 stores to v8i1.
52832 if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
52833 Subtarget.hasAVX512()) {
52834 unsigned NumConcats = 8 / VT.getVectorNumElements();
52835 // We must store zeros to the unused bits.
52836 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
52837 Ops[0] = StoredVal;
52838 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
52839 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
52840 St->getPointerInfo(), St->getOriginalAlign(),
52841 St->getMemOperand()->getFlags());
52844 // Turn vXi1 stores of constants into a scalar store.
52845 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
52846 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
52847 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
52848 // If its a v64i1 store without 64-bit support, we need two stores.
52849 if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
52850 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
52851 StoredVal->ops().slice(0, 32));
52852 Lo = combinevXi1ConstantToInteger(Lo, DAG);
52853 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
52854 StoredVal->ops().slice(32, 32));
52855 Hi = combinevXi1ConstantToInteger(Hi, DAG);
52857 SDValue Ptr0 = St->getBasePtr();
52858 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(4), dl);
52861 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
52862 St->getOriginalAlign(),
52863 St->getMemOperand()->getFlags());
52865 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
52866 St->getPointerInfo().getWithOffset(4),
52867 St->getOriginalAlign(),
52868 St->getMemOperand()->getFlags());
52869 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
52872 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
52873 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
52874 St->getPointerInfo(), St->getOriginalAlign(),
52875 St->getMemOperand()->getFlags());
52878 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
52879 // Sandy Bridge, perform two 16-byte stores.
52881 if (VT.is256BitVector() && StVT == VT &&
52882 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
52883 *St->getMemOperand(), &Fast) &&
52885 unsigned NumElems = VT.getVectorNumElements();
52889 return splitVectorStore(St, DAG);
52892 // Split under-aligned vector non-temporal stores.
52893 if (St->isNonTemporal() && StVT == VT &&
52894 St->getAlign().value() < VT.getStoreSize()) {
52895 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
52896 // vectors or the legalizer can scalarize it to use MOVNTI.
52897 if (VT.is256BitVector() || VT.is512BitVector()) {
52898 unsigned NumElems = VT.getVectorNumElements();
52901 return splitVectorStore(St, DAG);
52904 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
52906 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
52907 MVT NTVT = Subtarget.hasSSE4A()
52909 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
52910 return scalarizeVectorStore(St, NTVT, DAG);
52914 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
52915 // supported, but avx512f is by extending to v16i32 and truncating.
52916 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
52917 St->getValue().getOpcode() == ISD::TRUNCATE &&
52918 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
52919 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
52920 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
52921 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
52922 St->getValue().getOperand(0));
52923 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
52924 MVT::v16i8, St->getMemOperand());
52927 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
52928 if (!St->isTruncatingStore() &&
52929 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
52930 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
52931 StoredVal.hasOneUse() &&
52932 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
52933 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
52934 return EmitTruncSStore(IsSigned, St->getChain(),
52935 dl, StoredVal.getOperand(0), St->getBasePtr(),
52936 VT, St->getMemOperand(), DAG);
52939 // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
52940 if (!St->isTruncatingStore()) {
52941 auto IsExtractedElement = [](SDValue V) {
52942 if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
52943 V = V.getOperand(0);
52944 unsigned Opc = V.getOpcode();
52945 if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
52946 isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
52947 V.getOperand(0).hasOneUse())
52948 return V.getOperand(0);
52951 if (SDValue Extract = IsExtractedElement(StoredVal)) {
52952 SDValue Trunc = peekThroughOneUseBitcasts(Extract);
52953 if (Trunc.getOpcode() == X86ISD::VTRUNC) {
52954 SDValue Src = Trunc.getOperand(0);
52955 MVT DstVT = Trunc.getSimpleValueType();
52956 MVT SrcVT = Src.getSimpleValueType();
52957 unsigned NumSrcElts = SrcVT.getVectorNumElements();
52958 unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
52959 MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
52960 if (NumTruncBits == VT.getSizeInBits() &&
52961 TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
52962 return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
52963 TruncVT, St->getMemOperand());
52969 // Optimize trunc store (of multiple scalars) to shuffle and store.
52970 // First, pack all of the elements in one place. Next, store to memory
52971 // in fewer chunks.
52972 if (St->isTruncatingStore() && VT.isVector()) {
52973 // Check if we can detect an AVG pattern from the truncation. If yes,
52974 // replace the trunc store by a normal store with the result of X86ISD::AVG
52976 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
52977 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
52979 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
52980 St->getPointerInfo(), St->getOriginalAlign(),
52981 St->getMemOperand()->getFlags());
52983 if (TLI.isTruncStoreLegal(VT, StVT)) {
52984 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
52985 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
52986 dl, Val, St->getBasePtr(),
52987 St->getMemoryVT(), St->getMemOperand(), DAG);
52988 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
52990 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
52991 dl, Val, St->getBasePtr(),
52992 St->getMemoryVT(), St->getMemOperand(), DAG);
52998 // Cast ptr32 and ptr64 pointers to the default address space before a store.
52999 unsigned AddrSpace = St->getAddressSpace();
53000 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
53001 AddrSpace == X86AS::PTR32_UPTR) {
53002 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
53003 if (PtrVT != St->getBasePtr().getSimpleValueType()) {
53005 DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
53006 return DAG.getStore(St->getChain(), dl, StoredVal, Cast,
53007 St->getPointerInfo(), St->getOriginalAlign(),
53008 St->getMemOperand()->getFlags(), St->getAAInfo());
53012 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
53013 // the FP state in cases where an emms may be missing.
53014 // A preferable solution to the general problem is to figure out the right
53015 // places to insert EMMS. This qualifies as a quick hack.
53017 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
53018 if (VT.getSizeInBits() != 64)
53021 const Function &F = DAG.getMachineFunction().getFunction();
53022 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
53024 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
53025 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
53026 isa<LoadSDNode>(St->getValue()) &&
53027 cast<LoadSDNode>(St->getValue())->isSimple() &&
53028 St->getChain().hasOneUse() && St->isSimple()) {
53029 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
53031 if (!ISD::isNormalLoad(Ld))
53034 // Avoid the transformation if there are multiple uses of the loaded value.
53035 if (!Ld->hasNUsesOfValue(1, 0))
53040 // Lower to a single movq load/store pair.
53041 SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
53042 Ld->getBasePtr(), Ld->getMemOperand());
53044 // Make sure new load is placed in same chain order.
53045 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
53046 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
53047 St->getMemOperand());
53050 // This is similar to the above case, but here we handle a scalar 64-bit
53051 // integer store that is extracted from a vector on a 32-bit target.
53052 // If we have SSE2, then we can treat it like a floating-point double
53053 // to get past legalization. The execution dependencies fixup pass will
53054 // choose the optimal machine instruction for the store if this really is
53055 // an integer or v2f32 rather than an f64.
53056 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
53057 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
53058 SDValue OldExtract = St->getOperand(1);
53059 SDValue ExtOp0 = OldExtract.getOperand(0);
53060 unsigned VecSize = ExtOp0.getValueSizeInBits();
53061 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
53062 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
53063 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
53064 BitCast, OldExtract.getOperand(1));
53065 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
53066 St->getPointerInfo(), St->getOriginalAlign(),
53067 St->getMemOperand()->getFlags());
53073 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
53074 TargetLowering::DAGCombinerInfo &DCI,
53075 const X86Subtarget &Subtarget) {
53076 auto *St = cast<MemIntrinsicSDNode>(N);
53078 SDValue StoredVal = N->getOperand(1);
53079 MVT VT = StoredVal.getSimpleValueType();
53080 EVT MemVT = St->getMemoryVT();
53082 // Figure out which elements we demand.
53083 unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
53084 APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
53086 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53087 if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
53088 if (N->getOpcode() != ISD::DELETED_NODE)
53089 DCI.AddToWorklist(N);
53090 return SDValue(N, 0);
53096 /// Return 'true' if this vector operation is "horizontal"
53097 /// and return the operands for the horizontal operation in LHS and RHS. A
53098 /// horizontal operation performs the binary operation on successive elements
53099 /// of its first operand, then on successive elements of its second operand,
53100 /// returning the resulting values in a vector. For example, if
53101 /// A = < float a0, float a1, float a2, float a3 >
53103 /// B = < float b0, float b1, float b2, float b3 >
53104 /// then the result of doing a horizontal operation on A and B is
53105 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
53106 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
53107 /// A horizontal-op B, for some already available A and B, and if so then LHS is
53108 /// set to A, RHS to B, and the routine returns 'true'.
53109 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
53110 SelectionDAG &DAG, const X86Subtarget &Subtarget,
53111 bool IsCommutative,
53112 SmallVectorImpl<int> &PostShuffleMask) {
53113 // If either operand is undef, bail out. The binop should be simplified.
53114 if (LHS.isUndef() || RHS.isUndef())
53117 // Look for the following pattern:
53118 // A = < float a0, float a1, float a2, float a3 >
53119 // B = < float b0, float b1, float b2, float b3 >
53121 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
53122 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
53123 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
53124 // which is A horizontal-op B.
53126 MVT VT = LHS.getSimpleValueType();
53127 assert((VT.is128BitVector() || VT.is256BitVector()) &&
53128 "Unsupported vector type for horizontal add/sub");
53129 unsigned NumElts = VT.getVectorNumElements();
53131 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
53132 SmallVectorImpl<int> &ShuffleMask) {
53133 bool UseSubVector = false;
53134 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
53135 Op.getOperand(0).getValueType().is256BitVector() &&
53136 llvm::isNullConstant(Op.getOperand(1))) {
53137 Op = Op.getOperand(0);
53138 UseSubVector = true;
53140 SmallVector<SDValue, 2> SrcOps;
53141 SmallVector<int, 16> SrcMask, ScaledMask;
53142 SDValue BC = peekThroughBitcasts(Op);
53143 if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
53144 !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
53145 return Op.getValueSizeInBits() == BC.getValueSizeInBits();
53147 resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
53148 if (!UseSubVector && SrcOps.size() <= 2 &&
53149 scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
53150 N0 = !SrcOps.empty() ? SrcOps[0] : SDValue();
53151 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
53152 ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
53154 if (UseSubVector && SrcOps.size() == 1 &&
53155 scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
53156 std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
53157 ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
53158 ShuffleMask.assign(Mask.begin(), Mask.end());
53163 // View LHS in the form
53164 // LHS = VECTOR_SHUFFLE A, B, LMask
53165 // If LHS is not a shuffle, then pretend it is the identity shuffle:
53166 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
53167 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
53169 SmallVector<int, 16> LMask;
53170 GetShuffle(LHS, A, B, LMask);
53172 // Likewise, view RHS in the form
53173 // RHS = VECTOR_SHUFFLE C, D, RMask
53175 SmallVector<int, 16> RMask;
53176 GetShuffle(RHS, C, D, RMask);
53178 // At least one of the operands should be a vector shuffle.
53179 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
53180 if (NumShuffles == 0)
53183 if (LMask.empty()) {
53185 for (unsigned i = 0; i != NumElts; ++i)
53186 LMask.push_back(i);
53189 if (RMask.empty()) {
53191 for (unsigned i = 0; i != NumElts; ++i)
53192 RMask.push_back(i);
53195 // If we have an unary mask, ensure the other op is set to null.
53196 if (isUndefOrInRange(LMask, 0, NumElts))
53198 else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
53201 if (isUndefOrInRange(RMask, 0, NumElts))
53203 else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
53206 // If A and B occur in reverse order in RHS, then canonicalize by commuting
53207 // RHS operands and shuffle mask.
53210 ShuffleVectorSDNode::commuteMask(RMask);
53212 // Check that the shuffles are both shuffling the same vectors.
53213 if (!(A == C && B == D))
53216 PostShuffleMask.clear();
53217 PostShuffleMask.append(NumElts, SM_SentinelUndef);
53219 // LHS and RHS are now:
53220 // LHS = shuffle A, B, LMask
53221 // RHS = shuffle A, B, RMask
53222 // Check that the masks correspond to performing a horizontal operation.
53223 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
53224 // so we just repeat the inner loop if this is a 256-bit op.
53225 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
53226 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
53227 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
53228 assert((NumEltsPer128BitChunk % 2 == 0) &&
53229 "Vector type should have an even number of elements in each lane");
53230 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
53231 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
53232 // Ignore undefined components.
53233 int LIdx = LMask[i + j], RIdx = RMask[i + j];
53234 if (LIdx < 0 || RIdx < 0 ||
53235 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
53236 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
53239 // Check that successive odd/even elements are being operated on. If not,
53240 // this is not a horizontal operation.
53241 if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
53242 !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
53245 // Compute the post-shuffle mask index based on where the element
53246 // is stored in the HOP result, and where it needs to be moved to.
53247 int Base = LIdx & ~1u;
53248 int Index = ((Base % NumEltsPer128BitChunk) / 2) +
53249 ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
53251 // The low half of the 128-bit result must choose from A.
53252 // The high half of the 128-bit result must choose from B,
53253 // unless B is undef. In that case, we are always choosing from A.
53254 if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
53255 Index += NumEltsPer64BitChunk;
53256 PostShuffleMask[i + j] = Index;
53260 SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
53261 SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
53263 bool IsIdentityPostShuffle =
53264 isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
53265 if (IsIdentityPostShuffle)
53266 PostShuffleMask.clear();
53268 // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
53269 if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
53270 isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
53273 // If the source nodes are already used in HorizOps then always accept this.
53274 // Shuffle folding should merge these back together.
53275 bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
53276 return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
53278 bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
53279 return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
53281 bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
53283 // Assume a SingleSource HOP if we only shuffle one input and don't need to
53284 // shuffle the result.
53285 if (!ForceHorizOp &&
53286 !shouldUseHorizontalOp(NewLHS == NewRHS &&
53287 (NumShuffles < 2 || !IsIdentityPostShuffle),
53291 LHS = DAG.getBitcast(VT, NewLHS);
53292 RHS = DAG.getBitcast(VT, NewRHS);
53296 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
53297 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
53298 const X86Subtarget &Subtarget) {
53299 EVT VT = N->getValueType(0);
53300 unsigned Opcode = N->getOpcode();
53301 bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
53302 SmallVector<int, 8> PostShuffleMask;
53307 if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
53308 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
53309 SDValue LHS = N->getOperand(0);
53310 SDValue RHS = N->getOperand(1);
53311 auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
53312 if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
53313 PostShuffleMask)) {
53314 SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
53315 if (!PostShuffleMask.empty())
53316 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
53317 DAG.getUNDEF(VT), PostShuffleMask);
53324 if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
53325 VT == MVT::v16i16 || VT == MVT::v8i32)) {
53326 SDValue LHS = N->getOperand(0);
53327 SDValue RHS = N->getOperand(1);
53328 auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
53329 if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
53330 PostShuffleMask)) {
53331 auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
53332 ArrayRef<SDValue> Ops) {
53333 return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
53335 SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
53336 {LHS, RHS}, HOpBuilder);
53337 if (!PostShuffleMask.empty())
53338 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
53339 DAG.getUNDEF(VT), PostShuffleMask);
53349 // Try to combine the following nodes
53350 // t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
53351 // <i32 -2147483648[float -0.000000e+00]> 0
53352 // t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
53353 // <(load 4 from constant-pool)> t0, t29
53354 // [t30: v16i32 = bitcast t27]
53355 // t6: v16i32 = xor t7, t27[t30]
53356 // t11: v16f32 = bitcast t6
53357 // t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
53358 // into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
53359 // t22: v16f32 = bitcast t7
53360 // t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
53361 // t24: v32f16 = bitcast t23
53362 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
53363 const X86Subtarget &Subtarget) {
53364 EVT VT = N->getValueType(0);
53365 SDValue LHS = N->getOperand(0);
53366 SDValue RHS = N->getOperand(1);
53367 int CombineOpcode =
53368 N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
53369 auto isConjugationConstant = [](const Constant *c) {
53370 if (const auto *CI = dyn_cast<ConstantInt>(c)) {
53371 APInt ConjugationInt32 = APInt(32, 0x80000000, true);
53372 APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
53373 switch (CI->getBitWidth()) {
53377 return CI->getValue() == ConjugationInt32;
53379 return CI->getValue() == ConjugationInt64;
53381 llvm_unreachable("Unexpected bit width");
53384 if (const auto *CF = dyn_cast<ConstantFP>(c))
53385 return CF->getType()->isFloatTy() && CF->isNegativeZeroValue();
53388 auto combineConjugation = [&](SDValue &r) {
53389 if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
53390 SDValue XOR = LHS.getOperand(0);
53391 if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
53392 SDValue XORRHS = XOR.getOperand(1);
53393 if (XORRHS.getOpcode() == ISD::BITCAST && XORRHS.hasOneUse())
53394 XORRHS = XORRHS.getOperand(0);
53395 if (XORRHS.getOpcode() == X86ISD::VBROADCAST_LOAD &&
53396 XORRHS.getOperand(1).getNumOperands()) {
53397 ConstantPoolSDNode *CP =
53398 dyn_cast<ConstantPoolSDNode>(XORRHS.getOperand(1).getOperand(0));
53399 if (CP && isConjugationConstant(CP->getConstVal())) {
53400 SelectionDAG::FlagInserter FlagsInserter(DAG, N);
53401 SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
53402 SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
53403 r = DAG.getBitcast(VT, FCMulC);
53412 if (combineConjugation(Res))
53414 std::swap(LHS, RHS);
53415 if (combineConjugation(Res))
53420 // Try to combine the following nodes:
53421 // FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
53422 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
53423 const X86Subtarget &Subtarget) {
53424 auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
53425 return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
53426 Flags.hasAllowContract();
53429 auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
53430 return DAG.getTarget().Options.NoSignedZerosFPMath ||
53431 Flags.hasNoSignedZeros();
53433 auto IsVectorAllNegativeZero = [](const SDNode *N) {
53434 if (N->getOpcode() != X86ISD::VBROADCAST_LOAD)
53436 assert(N->getSimpleValueType(0).getScalarType() == MVT::f32 &&
53437 "Unexpected vector type!");
53438 if (ConstantPoolSDNode *CP =
53439 dyn_cast<ConstantPoolSDNode>(N->getOperand(1)->getOperand(0))) {
53440 APInt AI = APInt(32, 0x80008000, true);
53441 if (const auto *CI = dyn_cast<ConstantInt>(CP->getConstVal()))
53442 return CI->getValue() == AI;
53443 if (const auto *CF = dyn_cast<ConstantFP>(CP->getConstVal()))
53444 return CF->getValue() == APFloat(APFloat::IEEEsingle(), AI);
53449 if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
53450 !AllowContract(N->getFlags()))
53453 EVT VT = N->getValueType(0);
53454 if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
53457 SDValue LHS = N->getOperand(0);
53458 SDValue RHS = N->getOperand(1);
53460 SDValue FAddOp1, MulOp0, MulOp1;
53461 auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
53462 &IsVectorAllNegativeZero,
53463 &HasNoSignedZero](SDValue N) -> bool {
53464 if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
53466 SDValue Op0 = N.getOperand(0);
53467 unsigned Opcode = Op0.getOpcode();
53468 if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
53469 if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
53470 MulOp0 = Op0.getOperand(0);
53471 MulOp1 = Op0.getOperand(1);
53472 IsConj = Opcode == X86ISD::VFCMULC;
53475 if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
53476 ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
53477 HasNoSignedZero(Op0->getFlags())) ||
53478 IsVectorAllNegativeZero(Op0->getOperand(2).getNode()))) {
53479 MulOp0 = Op0.getOperand(0);
53480 MulOp1 = Op0.getOperand(1);
53481 IsConj = Opcode == X86ISD::VFCMADDC;
53488 if (GetCFmulFrom(LHS))
53490 else if (GetCFmulFrom(RHS))
53495 MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
53496 FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
53497 unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
53498 // FIXME: How do we handle when fast math flags of FADD are different from
53501 DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
53502 return DAG.getBitcast(VT, CFmul);
53505 /// Do target-specific dag combines on floating-point adds/subs.
53506 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
53507 const X86Subtarget &Subtarget) {
53508 if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
53511 if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
53517 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
53519 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
53520 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
53521 /// anything that is guaranteed to be transformed by DAGCombiner.
53522 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
53523 const X86Subtarget &Subtarget,
53525 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
53526 SDValue Src = N->getOperand(0);
53527 unsigned SrcOpcode = Src.getOpcode();
53528 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53530 EVT VT = N->getValueType(0);
53531 EVT SrcVT = Src.getValueType();
53533 auto IsFreeTruncation = [VT](SDValue Op) {
53534 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
53536 // See if this has been extended from a smaller/equal size to
53537 // the truncation size, allowing a truncation to combine with the extend.
53538 unsigned Opcode = Op.getOpcode();
53539 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
53540 Opcode == ISD::ZERO_EXTEND) &&
53541 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
53544 // See if this is a single use constant which can be constant folded.
53545 // NOTE: We don't peek throught bitcasts here because there is currently
53546 // no support for constant folding truncate+bitcast+vector_of_constants. So
53547 // we'll just send up with a truncate on both operands which will
53548 // get turned back into (truncate (binop)) causing an infinite loop.
53549 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
53552 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
53553 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
53554 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
53555 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
53558 // Don't combine if the operation has other uses.
53559 if (!Src.hasOneUse())
53562 // Only support vector truncation for now.
53563 // TODO: i64 scalar math would benefit as well.
53564 if (!VT.isVector())
53567 // In most cases its only worth pre-truncating if we're only facing the cost
53568 // of one truncation.
53569 // i.e. if one of the inputs will constant fold or the input is repeated.
53570 switch (SrcOpcode) {
53572 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
53573 // better to truncate if we have the chance.
53574 if (SrcVT.getScalarType() == MVT::i64 &&
53575 TLI.isOperationLegal(SrcOpcode, VT) &&
53576 !TLI.isOperationLegal(SrcOpcode, SrcVT))
53577 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
53584 SDValue Op0 = Src.getOperand(0);
53585 SDValue Op1 = Src.getOperand(1);
53586 if (TLI.isOperationLegal(SrcOpcode, VT) &&
53587 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
53588 return TruncateArithmetic(Op0, Op1);
53596 /// This function transforms vector truncation of 'extended sign-bits' or
53597 /// 'extended zero-bits' values.
53598 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
53599 /// TODO: Remove this and just use LowerTruncateVecPackWithSignBits.
53600 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
53602 const X86Subtarget &Subtarget) {
53604 if (!Subtarget.hasSSE2())
53607 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
53610 SDValue In = N->getOperand(0);
53611 if (!In.getValueType().isSimple())
53614 MVT VT = N->getValueType(0).getSimpleVT();
53615 MVT SVT = VT.getScalarType();
53617 MVT InVT = In.getValueType().getSimpleVT();
53618 MVT InSVT = InVT.getScalarType();
53620 // Check we have a truncation suited for PACKSS/PACKUS.
53621 if (!isPowerOf2_32(VT.getVectorNumElements()))
53623 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
53625 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
53628 // Truncation to sub-128bit vXi32 can be better handled with shuffles.
53629 if (SVT == MVT::i32 && VT.getSizeInBits() < 128)
53632 // AVX512 has fast truncate, but if the input is already going to be split,
53633 // there's no harm in trying pack.
53634 if (Subtarget.hasAVX512() &&
53635 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
53636 InVT.is512BitVector())) {
53637 // PACK should still be worth it for 128-bit vectors if the sources were
53638 // originally concatenated from subvectors.
53639 if (VT.getSizeInBits() > 128 || !isFreeToSplitVector(In.getNode(), DAG))
53643 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
53644 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
53646 // Use PACKUS if the input has zero-bits that extend all the way to the
53647 // packed/truncated value. e.g. masks, zext_in_reg, etc.
53648 KnownBits Known = DAG.computeKnownBits(In);
53649 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
53650 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
53651 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
53653 // Use PACKSS if the input has sign-bits that extend all the way to the
53654 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
53655 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
53657 // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
53658 // a sign splat. ComputeNumSignBits struggles to see through BITCASTs later
53659 // on and combines/simplifications can't then use it.
53660 if (SVT == MVT::i32 && NumSignBits != InSVT.getSizeInBits())
53663 unsigned MinSignBits = InSVT.getSizeInBits() - NumPackedSignBits;
53664 if (NumSignBits > MinSignBits)
53665 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
53667 // If we have a srl that only generates signbits that we will discard in
53668 // the truncation then we can use PACKSS by converting the srl to a sra.
53669 // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
53670 if (In.getOpcode() == ISD::SRL && N->isOnlyUserOf(In.getNode()))
53671 if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
53672 In, APInt::getAllOnes(VT.getVectorNumElements()))) {
53673 if (*ShAmt == MinSignBits) {
53674 SDValue NewIn = DAG.getNode(ISD::SRA, DL, InVT, In->ops());
53675 return truncateVectorWithPACK(X86ISD::PACKSS, VT, NewIn, DL, DAG,
53683 // Try to form a MULHU or MULHS node by looking for
53684 // (trunc (srl (mul ext, ext), 16))
53685 // TODO: This is X86 specific because we want to be able to handle wide types
53686 // before type legalization. But we can only do it if the vector will be
53687 // legalized via widening/splitting. Type legalization can't handle promotion
53688 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
53690 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
53691 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
53692 // First instruction should be a right shift of a multiply.
53693 if (Src.getOpcode() != ISD::SRL ||
53694 Src.getOperand(0).getOpcode() != ISD::MUL)
53697 if (!Subtarget.hasSSE2())
53700 // Only handle vXi16 types that are at least 128-bits unless they will be
53702 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
53705 // Input type should be at least vXi32.
53706 EVT InVT = Src.getValueType();
53707 if (InVT.getVectorElementType().getSizeInBits() < 32)
53710 // Need a shift by 16.
53712 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
53716 SDValue LHS = Src.getOperand(0).getOperand(0);
53717 SDValue RHS = Src.getOperand(0).getOperand(1);
53719 // Count leading sign/zero bits on both inputs - if there are enough then
53720 // truncation back to vXi16 will be cheap - either as a pack/shuffle
53721 // sequence or using AVX512 truncations. If the inputs are sext/zext then the
53722 // truncations may actually be free by peeking through to the ext source.
53723 auto IsSext = [&DAG](SDValue V) {
53724 return DAG.ComputeMaxSignificantBits(V) <= 16;
53726 auto IsZext = [&DAG](SDValue V) {
53727 return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
53730 bool IsSigned = IsSext(LHS) && IsSext(RHS);
53731 bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
53732 if (!IsSigned && !IsUnsigned)
53735 // Check if both inputs are extensions, which will be removed by truncation.
53736 bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
53737 LHS.getOpcode() == ISD::ZERO_EXTEND) &&
53738 (RHS.getOpcode() == ISD::SIGN_EXTEND ||
53739 RHS.getOpcode() == ISD::ZERO_EXTEND) &&
53740 LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
53741 RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
53743 // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
53744 // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
53745 // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
53746 // will have to split anyway.
53747 unsigned InSizeInBits = InVT.getSizeInBits();
53748 if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
53749 !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
53750 (InSizeInBits % 16) == 0) {
53751 EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
53752 InVT.getSizeInBits() / 16);
53753 SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
53754 DAG.getBitcast(BCVT, RHS));
53755 return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
53758 // Truncate back to source type.
53759 LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
53760 RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
53762 unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
53763 return DAG.getNode(Opc, DL, VT, LHS, RHS);
53766 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
53767 // from one vector with signed bytes from another vector, adds together
53768 // adjacent pairs of 16-bit products, and saturates the result before
53769 // truncating to 16-bits.
53771 // Which looks something like this:
53772 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
53773 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
53774 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
53775 const X86Subtarget &Subtarget,
53777 if (!VT.isVector() || !Subtarget.hasSSSE3())
53780 unsigned NumElems = VT.getVectorNumElements();
53781 EVT ScalarVT = VT.getVectorElementType();
53782 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
53785 SDValue SSatVal = detectSSatPattern(In, VT);
53786 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
53789 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
53790 // of multiplies from even/odd elements.
53791 SDValue N0 = SSatVal.getOperand(0);
53792 SDValue N1 = SSatVal.getOperand(1);
53794 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
53797 SDValue N00 = N0.getOperand(0);
53798 SDValue N01 = N0.getOperand(1);
53799 SDValue N10 = N1.getOperand(0);
53800 SDValue N11 = N1.getOperand(1);
53802 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
53803 // Canonicalize zero_extend to LHS.
53804 if (N01.getOpcode() == ISD::ZERO_EXTEND)
53805 std::swap(N00, N01);
53806 if (N11.getOpcode() == ISD::ZERO_EXTEND)
53807 std::swap(N10, N11);
53809 // Ensure we have a zero_extend and a sign_extend.
53810 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
53811 N01.getOpcode() != ISD::SIGN_EXTEND ||
53812 N10.getOpcode() != ISD::ZERO_EXTEND ||
53813 N11.getOpcode() != ISD::SIGN_EXTEND)
53816 // Peek through the extends.
53817 N00 = N00.getOperand(0);
53818 N01 = N01.getOperand(0);
53819 N10 = N10.getOperand(0);
53820 N11 = N11.getOperand(0);
53822 // Ensure the extend is from vXi8.
53823 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
53824 N01.getValueType().getVectorElementType() != MVT::i8 ||
53825 N10.getValueType().getVectorElementType() != MVT::i8 ||
53826 N11.getValueType().getVectorElementType() != MVT::i8)
53829 // All inputs should be build_vectors.
53830 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
53831 N01.getOpcode() != ISD::BUILD_VECTOR ||
53832 N10.getOpcode() != ISD::BUILD_VECTOR ||
53833 N11.getOpcode() != ISD::BUILD_VECTOR)
53836 // N00/N10 are zero extended. N01/N11 are sign extended.
53838 // For each element, we need to ensure we have an odd element from one vector
53839 // multiplied by the odd element of another vector and the even element from
53840 // one of the same vectors being multiplied by the even element from the
53841 // other vector. So we need to make sure for each element i, this operator
53842 // is being performed:
53843 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
53844 SDValue ZExtIn, SExtIn;
53845 for (unsigned i = 0; i != NumElems; ++i) {
53846 SDValue N00Elt = N00.getOperand(i);
53847 SDValue N01Elt = N01.getOperand(i);
53848 SDValue N10Elt = N10.getOperand(i);
53849 SDValue N11Elt = N11.getOperand(i);
53850 // TODO: Be more tolerant to undefs.
53851 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53852 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53853 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53854 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
53856 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
53857 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
53858 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
53859 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
53860 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
53862 unsigned IdxN00 = ConstN00Elt->getZExtValue();
53863 unsigned IdxN01 = ConstN01Elt->getZExtValue();
53864 unsigned IdxN10 = ConstN10Elt->getZExtValue();
53865 unsigned IdxN11 = ConstN11Elt->getZExtValue();
53866 // Add is commutative so indices can be reordered.
53867 if (IdxN00 > IdxN10) {
53868 std::swap(IdxN00, IdxN10);
53869 std::swap(IdxN01, IdxN11);
53871 // N0 indices be the even element. N1 indices must be the next odd element.
53872 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
53873 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
53875 SDValue N00In = N00Elt.getOperand(0);
53876 SDValue N01In = N01Elt.getOperand(0);
53877 SDValue N10In = N10Elt.getOperand(0);
53878 SDValue N11In = N11Elt.getOperand(0);
53879 // First time we find an input capture it.
53884 if (ZExtIn != N00In || SExtIn != N01In ||
53885 ZExtIn != N10In || SExtIn != N11In)
53889 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
53890 ArrayRef<SDValue> Ops) {
53891 // Shrink by adding truncate nodes and let DAGCombine fold with the
53893 EVT InVT = Ops[0].getValueType();
53894 assert(InVT.getScalarType() == MVT::i8 &&
53895 "Unexpected scalar element type");
53896 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
53897 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
53898 InVT.getVectorNumElements() / 2);
53899 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
53901 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
53905 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
53906 const X86Subtarget &Subtarget) {
53907 EVT VT = N->getValueType(0);
53908 SDValue Src = N->getOperand(0);
53911 // Attempt to pre-truncate inputs to arithmetic ops instead.
53912 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
53915 // Try to detect AVG pattern first.
53916 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
53919 // Try to detect PMADD
53920 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
53923 // Try to combine truncation with signed/unsigned saturation.
53924 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
53927 // Try to combine PMULHUW/PMULHW for vXi16.
53928 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
53931 // The bitcast source is a direct mmx result.
53932 // Detect bitcasts between i32 to x86mmx
53933 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
53934 SDValue BCSrc = Src.getOperand(0);
53935 if (BCSrc.getValueType() == MVT::x86mmx)
53936 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
53939 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
53940 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
53946 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
53947 TargetLowering::DAGCombinerInfo &DCI) {
53948 EVT VT = N->getValueType(0);
53949 SDValue In = N->getOperand(0);
53952 if (SDValue SSatVal = detectSSatPattern(In, VT))
53953 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
53954 if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
53955 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
53957 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53958 APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
53959 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53960 return SDValue(N, 0);
53965 /// Returns the negated value if the node \p N flips sign of FP value.
53967 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
53969 /// AVX512F does not have FXOR, so FNEG is lowered as
53970 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
53971 /// In this case we go though all bitcasts.
53972 /// This also recognizes splat of a negated value and returns the splat of that
53974 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
53975 if (N->getOpcode() == ISD::FNEG)
53976 return N->getOperand(0);
53978 // Don't recurse exponentially.
53979 if (Depth > SelectionDAG::MaxRecursionDepth)
53982 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
53984 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
53985 EVT VT = Op->getValueType(0);
53987 // Make sure the element size doesn't change.
53988 if (VT.getScalarSizeInBits() != ScalarSize)
53991 unsigned Opc = Op.getOpcode();
53993 case ISD::VECTOR_SHUFFLE: {
53994 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
53995 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
53996 if (!Op.getOperand(1).isUndef())
53998 if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
53999 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
54000 return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
54001 cast<ShuffleVectorSDNode>(Op)->getMask());
54004 case ISD::INSERT_VECTOR_ELT: {
54005 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
54007 SDValue InsVector = Op.getOperand(0);
54008 SDValue InsVal = Op.getOperand(1);
54009 if (!InsVector.isUndef())
54011 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
54012 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
54013 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
54014 NegInsVal, Op.getOperand(2));
54019 case X86ISD::FXOR: {
54020 SDValue Op1 = Op.getOperand(1);
54021 SDValue Op0 = Op.getOperand(0);
54023 // For XOR and FXOR, we want to check if constant
54024 // bits of Op1 are sign bit masks. For FSUB, we
54025 // have to check if constant bits of Op0 are sign
54026 // bit masks and hence we swap the operands.
54027 if (Opc == ISD::FSUB)
54028 std::swap(Op0, Op1);
54031 SmallVector<APInt, 16> EltBits;
54032 // Extract constant bits and see if they are all
54033 // sign bit masks. Ignore the undef elements.
54034 if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
54035 /* AllowWholeUndefs */ true,
54036 /* AllowPartialUndefs */ false)) {
54037 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
54038 if (!UndefElts[I] && !EltBits[I].isSignMask())
54041 // Only allow bitcast from correctly-sized constant.
54042 Op0 = peekThroughBitcasts(Op0);
54043 if (Op0.getScalarValueSizeInBits() == ScalarSize)
54053 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
54057 default: llvm_unreachable("Unexpected opcode");
54058 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
54059 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FNMADD; break;
54060 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
54061 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
54062 case X86ISD::STRICT_FMSUB: Opcode = X86ISD::STRICT_FNMSUB; break;
54063 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
54064 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
54065 case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA; break;
54066 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
54067 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
54068 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB; break;
54069 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
54075 default: llvm_unreachable("Unexpected opcode");
54076 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
54077 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FMSUB; break;
54078 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
54079 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
54080 case X86ISD::STRICT_FMSUB: Opcode = ISD::STRICT_FMA; break;
54081 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
54082 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
54083 case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
54084 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
54085 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
54086 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
54087 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
54088 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
54089 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
54090 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
54091 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
54097 // For accuracy reason, we never combine fneg and fma under strict FP.
54098 default: llvm_unreachable("Unexpected opcode");
54099 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
54100 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
54101 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
54102 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
54103 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
54104 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
54105 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
54106 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
54113 /// Do target-specific dag combines on floating point negations.
54114 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
54115 TargetLowering::DAGCombinerInfo &DCI,
54116 const X86Subtarget &Subtarget) {
54117 EVT OrigVT = N->getValueType(0);
54118 SDValue Arg = isFNEG(DAG, N);
54122 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54123 EVT VT = Arg.getValueType();
54124 EVT SVT = VT.getScalarType();
54127 // Let legalize expand this if it isn't a legal type yet.
54128 if (!TLI.isTypeLegal(VT))
54131 // If we're negating a FMUL node on a target with FMA, then we can avoid the
54132 // use of a constant by performing (-0 - A*B) instead.
54133 // FIXME: Check rounding control flags as well once it becomes available.
54134 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
54135 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
54136 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
54137 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
54138 Arg.getOperand(1), Zero);
54139 return DAG.getBitcast(OrigVT, NewNode);
54142 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
54143 bool LegalOperations = !DCI.isBeforeLegalizeOps();
54144 if (SDValue NegArg =
54145 TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
54146 return DAG.getBitcast(OrigVT, NegArg);
54151 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
54152 bool LegalOperations,
54154 NegatibleCost &Cost,
54155 unsigned Depth) const {
54156 // fneg patterns are removable even if they have multiple uses.
54157 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
54158 Cost = NegatibleCost::Cheaper;
54159 return DAG.getBitcast(Op.getValueType(), Arg);
54162 EVT VT = Op.getValueType();
54163 EVT SVT = VT.getScalarType();
54164 unsigned Opc = Op.getOpcode();
54165 SDNodeFlags Flags = Op.getNode()->getFlags();
54168 case X86ISD::FMSUB:
54169 case X86ISD::FNMADD:
54170 case X86ISD::FNMSUB:
54171 case X86ISD::FMADD_RND:
54172 case X86ISD::FMSUB_RND:
54173 case X86ISD::FNMADD_RND:
54174 case X86ISD::FNMSUB_RND: {
54175 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
54176 !(SVT == MVT::f32 || SVT == MVT::f64) ||
54177 !isOperationLegal(ISD::FMA, VT))
54180 // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
54181 // if it may have signed zeros.
54182 if (!Flags.hasNoSignedZeros())
54185 // This is always negatible for free but we might be able to remove some
54186 // extra operand negations as well.
54187 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
54188 for (int i = 0; i != 3; ++i)
54189 NewOps[i] = getCheaperNegatedExpression(
54190 Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
54192 bool NegA = !!NewOps[0];
54193 bool NegB = !!NewOps[1];
54194 bool NegC = !!NewOps[2];
54195 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
54197 Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
54198 : NegatibleCost::Neutral;
54200 // Fill in the non-negated ops with the original values.
54201 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
54203 NewOps[i] = Op.getOperand(i);
54204 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
54207 if (SDValue NegOp0 =
54208 getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
54209 ForCodeSize, Cost, Depth + 1))
54210 return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
54214 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
54215 ForCodeSize, Cost, Depth);
54218 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
54219 const X86Subtarget &Subtarget) {
54220 MVT VT = N->getSimpleValueType(0);
54221 // If we have integer vector types available, use the integer opcodes.
54222 if (!VT.isVector() || !Subtarget.hasSSE2())
54227 unsigned IntBits = VT.getScalarSizeInBits();
54228 MVT IntSVT = MVT::getIntegerVT(IntBits);
54229 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
54231 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
54232 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
54233 unsigned IntOpcode;
54234 switch (N->getOpcode()) {
54235 default: llvm_unreachable("Unexpected FP logic op");
54236 case X86ISD::FOR: IntOpcode = ISD::OR; break;
54237 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
54238 case X86ISD::FAND: IntOpcode = ISD::AND; break;
54239 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
54241 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
54242 return DAG.getBitcast(VT, IntOp);
54246 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
54247 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
54248 if (N->getOpcode() != ISD::XOR)
54251 SDValue LHS = N->getOperand(0);
54252 if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
54255 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
54256 X86::CondCode(LHS->getConstantOperandVal(0)));
54258 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
54261 static SDValue combineXorSubCTLZ(SDNode *N, SelectionDAG &DAG,
54262 const X86Subtarget &Subtarget) {
54263 assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::SUB) &&
54264 "Invalid opcode for combing with CTLZ");
54265 if (Subtarget.hasFastLZCNT())
54268 EVT VT = N->getValueType(0);
54269 if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32 &&
54270 (VT != MVT::i64 || !Subtarget.is64Bit()))
54273 SDValue N0 = N->getOperand(0);
54274 SDValue N1 = N->getOperand(1);
54276 if (N0.getOpcode() != ISD::CTLZ_ZERO_UNDEF &&
54277 N1.getOpcode() != ISD::CTLZ_ZERO_UNDEF)
54283 if (N1.getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
54286 } else if (N->getOpcode() == ISD::SUB) {
54293 if (!OpCTLZ.hasOneUse())
54295 auto *C = dyn_cast<ConstantSDNode>(OpSizeTM1);
54299 if (C->getZExtValue() != uint64_t(OpCTLZ.getValueSizeInBits() - 1))
54303 SDValue Op = OpCTLZ.getOperand(0);
54304 if (VT == MVT::i8) {
54305 // Zero extend to i32 since there is not an i8 bsr.
54307 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, OpVT, Op);
54310 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
54311 Op = DAG.getNode(X86ISD::BSR, DL, VTs, Op);
54313 Op = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Op);
54318 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
54319 TargetLowering::DAGCombinerInfo &DCI,
54320 const X86Subtarget &Subtarget) {
54321 SDValue N0 = N->getOperand(0);
54322 SDValue N1 = N->getOperand(1);
54323 EVT VT = N->getValueType(0);
54325 // If this is SSE1 only convert to FXOR to avoid scalarization.
54326 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
54327 return DAG.getBitcast(MVT::v4i32,
54328 DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
54329 DAG.getBitcast(MVT::v4f32, N0),
54330 DAG.getBitcast(MVT::v4f32, N1)));
54333 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
54336 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
54339 if (SDValue R = combineBitOpWithShift(N, DAG))
54342 if (SDValue R = combineBitOpWithPACK(N, DAG))
54345 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
54348 if (SDValue R = combineXorSubCTLZ(N, DAG, Subtarget))
54351 if (DCI.isBeforeLegalizeOps())
54354 if (SDValue SetCC = foldXor1SetCC(N, DAG))
54357 if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
54360 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
54363 // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
54364 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54365 if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
54366 N0.getOperand(0).getValueType().isVector() &&
54367 N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54368 TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
54369 return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
54370 N0.getOperand(0).getValueType()));
54373 // Handle AVX512 mask widening.
54374 // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
54375 if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
54376 VT.getVectorElementType() == MVT::i1 &&
54377 N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
54378 TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
54379 return DAG.getNode(
54380 ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
54381 DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
54385 // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
54386 // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
54387 // TODO: Under what circumstances could this be performed in DAGCombine?
54388 if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
54389 N0.getOperand(0).getOpcode() == N->getOpcode()) {
54390 SDValue TruncExtSrc = N0.getOperand(0);
54391 auto *N1C = dyn_cast<ConstantSDNode>(N1);
54392 auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
54393 if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
54395 SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
54396 SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
54397 return DAG.getNode(ISD::XOR, DL, VT, LHS,
54398 DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
54402 if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
54405 return combineFneg(N, DAG, DCI, Subtarget);
54408 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
54409 TargetLowering::DAGCombinerInfo &DCI,
54410 const X86Subtarget &Subtarget) {
54411 EVT VT = N->getValueType(0);
54412 unsigned NumBits = VT.getSizeInBits();
54414 // TODO - Constant Folding.
54416 // Simplify the inputs.
54417 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54418 APInt DemandedMask(APInt::getAllOnes(NumBits));
54419 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
54420 return SDValue(N, 0);
54425 static bool isNullFPScalarOrVectorConst(SDValue V) {
54426 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
54429 /// If a value is a scalar FP zero or a vector FP zero (potentially including
54430 /// undefined elements), return a zero constant that may be used to fold away
54431 /// that value. In the case of a vector, the returned constant will not contain
54432 /// undefined elements even if the input parameter does. This makes it suitable
54433 /// to be used as a replacement operand with operations (eg, bitwise-and) where
54434 /// an undef should not propagate.
54435 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
54436 const X86Subtarget &Subtarget) {
54437 if (!isNullFPScalarOrVectorConst(V))
54440 if (V.getValueType().isVector())
54441 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
54446 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
54447 const X86Subtarget &Subtarget) {
54448 SDValue N0 = N->getOperand(0);
54449 SDValue N1 = N->getOperand(1);
54450 EVT VT = N->getValueType(0);
54453 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
54454 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
54455 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
54456 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
54459 auto isAllOnesConstantFP = [](SDValue V) {
54460 if (V.getSimpleValueType().isVector())
54461 return ISD::isBuildVectorAllOnes(V.getNode());
54462 auto *C = dyn_cast<ConstantFPSDNode>(V);
54463 return C && C->getConstantFPValue()->isAllOnesValue();
54466 // fand (fxor X, -1), Y --> fandn X, Y
54467 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
54468 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
54470 // fand X, (fxor Y, -1) --> fandn Y, X
54471 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
54472 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
54477 /// Do target-specific dag combines on X86ISD::FAND nodes.
54478 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
54479 const X86Subtarget &Subtarget) {
54480 // FAND(0.0, x) -> 0.0
54481 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
54484 // FAND(x, 0.0) -> 0.0
54485 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
54488 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
54491 return lowerX86FPLogicOp(N, DAG, Subtarget);
54494 /// Do target-specific dag combines on X86ISD::FANDN nodes.
54495 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
54496 const X86Subtarget &Subtarget) {
54497 // FANDN(0.0, x) -> x
54498 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
54499 return N->getOperand(1);
54501 // FANDN(x, 0.0) -> 0.0
54502 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
54505 return lowerX86FPLogicOp(N, DAG, Subtarget);
54508 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
54509 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
54510 TargetLowering::DAGCombinerInfo &DCI,
54511 const X86Subtarget &Subtarget) {
54512 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
54514 // F[X]OR(0.0, x) -> x
54515 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
54516 return N->getOperand(1);
54518 // F[X]OR(x, 0.0) -> x
54519 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
54520 return N->getOperand(0);
54522 if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
54525 return lowerX86FPLogicOp(N, DAG, Subtarget);
54528 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
54529 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
54530 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
54532 // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
54533 if (!DAG.getTarget().Options.NoNaNsFPMath ||
54534 !DAG.getTarget().Options.NoSignedZerosFPMath)
54537 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
54538 // into FMINC and FMAXC, which are Commutative operations.
54539 unsigned NewOp = 0;
54540 switch (N->getOpcode()) {
54541 default: llvm_unreachable("unknown opcode");
54542 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
54543 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
54546 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
54547 N->getOperand(0), N->getOperand(1));
54550 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
54551 const X86Subtarget &Subtarget) {
54552 EVT VT = N->getValueType(0);
54553 if (Subtarget.useSoftFloat() || isSoftFP16(VT, Subtarget))
54556 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54558 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
54559 (Subtarget.hasSSE2() && VT == MVT::f64) ||
54560 (Subtarget.hasFP16() && VT == MVT::f16) ||
54561 (VT.isVector() && TLI.isTypeLegal(VT))))
54564 SDValue Op0 = N->getOperand(0);
54565 SDValue Op1 = N->getOperand(1);
54567 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
54569 // If we don't have to respect NaN inputs, this is a direct translation to x86
54570 // min/max instructions.
54571 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
54572 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
54574 // If one of the operands is known non-NaN use the native min/max instructions
54575 // with the non-NaN input as second operand.
54576 if (DAG.isKnownNeverNaN(Op1))
54577 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
54578 if (DAG.isKnownNeverNaN(Op0))
54579 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
54581 // If we have to respect NaN inputs, this takes at least 3 instructions.
54582 // Favor a library call when operating on a scalar and minimizing code size.
54583 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
54586 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
54589 // There are 4 possibilities involving NaN inputs, and these are the required
54593 // ----------------
54594 // Num | Max | Op0 |
54595 // Op0 ----------------
54596 // NaN | Op1 | NaN |
54597 // ----------------
54599 // The SSE FP max/min instructions were not designed for this case, but rather
54601 // Min = Op1 < Op0 ? Op1 : Op0
54602 // Max = Op1 > Op0 ? Op1 : Op0
54604 // So they always return Op0 if either input is a NaN. However, we can still
54605 // use those instructions for fmaxnum by selecting away a NaN input.
54607 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
54608 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
54609 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
54611 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
54612 // are NaN, the NaN value of Op1 is the result.
54613 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
54616 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
54617 TargetLowering::DAGCombinerInfo &DCI) {
54618 EVT VT = N->getValueType(0);
54619 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54621 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
54622 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
54623 return SDValue(N, 0);
54625 // Convert a full vector load into vzload when not all bits are needed.
54626 SDValue In = N->getOperand(0);
54627 MVT InVT = In.getSimpleValueType();
54628 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
54629 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
54630 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
54631 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
54632 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
54633 MVT MemVT = MVT::getIntegerVT(NumBits);
54634 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
54635 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
54637 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
54638 DAG.getBitcast(InVT, VZLoad));
54639 DCI.CombineTo(N, Convert);
54640 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
54641 DCI.recursivelyDeleteUnusedNodes(LN);
54642 return SDValue(N, 0);
54649 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
54650 TargetLowering::DAGCombinerInfo &DCI) {
54651 bool IsStrict = N->isTargetStrictFPOpcode();
54652 EVT VT = N->getValueType(0);
54654 // Convert a full vector load into vzload when not all bits are needed.
54655 SDValue In = N->getOperand(IsStrict ? 1 : 0);
54656 MVT InVT = In.getSimpleValueType();
54657 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
54658 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
54659 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
54660 LoadSDNode *LN = cast<LoadSDNode>(In);
54661 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
54662 MVT MemVT = MVT::getFloatingPointVT(NumBits);
54663 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
54664 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
54668 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
54669 {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
54670 DCI.CombineTo(N, Convert, Convert.getValue(1));
54673 DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
54674 DCI.CombineTo(N, Convert);
54676 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
54677 DCI.recursivelyDeleteUnusedNodes(LN);
54678 return SDValue(N, 0);
54685 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
54686 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
54687 TargetLowering::DAGCombinerInfo &DCI,
54688 const X86Subtarget &Subtarget) {
54689 SDValue N0 = N->getOperand(0);
54690 SDValue N1 = N->getOperand(1);
54691 MVT VT = N->getSimpleValueType(0);
54692 int NumElts = VT.getVectorNumElements();
54693 unsigned EltSizeInBits = VT.getScalarSizeInBits();
54696 // ANDNP(undef, x) -> 0
54697 // ANDNP(x, undef) -> 0
54698 if (N0.isUndef() || N1.isUndef())
54699 return DAG.getConstant(0, DL, VT);
54701 // ANDNP(0, x) -> x
54702 if (ISD::isBuildVectorAllZeros(N0.getNode()))
54705 // ANDNP(x, 0) -> 0
54706 if (ISD::isBuildVectorAllZeros(N1.getNode()))
54707 return DAG.getConstant(0, DL, VT);
54709 // ANDNP(x, -1) -> NOT(x) -> XOR(x, -1)
54710 if (ISD::isBuildVectorAllOnes(N1.getNode()))
54711 return DAG.getNOT(DL, N0, VT);
54713 // Turn ANDNP back to AND if input is inverted.
54714 if (SDValue Not = IsNOT(N0, DAG))
54715 return DAG.getNode(ISD::AND, DL, VT, DAG.getBitcast(VT, Not), N1);
54717 // Fold for better commutatvity:
54718 // ANDNP(x,NOT(y)) -> AND(NOT(x),NOT(y)) -> NOT(OR(X,Y)).
54719 if (N1->hasOneUse())
54720 if (SDValue Not = IsNOT(N1, DAG))
54722 DL, DAG.getNode(ISD::OR, DL, VT, N0, DAG.getBitcast(VT, Not)), VT);
54724 // Constant Folding
54725 APInt Undefs0, Undefs1;
54726 SmallVector<APInt> EltBits0, EltBits1;
54727 if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
54728 if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
54729 SmallVector<APInt> ResultBits;
54730 for (int I = 0; I != NumElts; ++I)
54731 ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
54732 return getConstVector(ResultBits, VT, DAG, DL);
54735 // Constant fold NOT(N0) to allow us to use AND.
54736 // Ensure this is only performed if we can confirm that the bitcasted source
54737 // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
54738 if (N0->hasOneUse()) {
54739 SDValue BC0 = peekThroughOneUseBitcasts(N0);
54740 if (BC0.getOpcode() != ISD::BITCAST) {
54741 for (APInt &Elt : EltBits0)
54743 SDValue Not = getConstVector(EltBits0, VT, DAG, DL);
54744 return DAG.getNode(ISD::AND, DL, VT, Not, N1);
54749 // Attempt to recursively combine a bitmask ANDNP with shuffles.
54750 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
54752 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
54755 // If either operand is a constant mask, then only the elements that aren't
54756 // zero are actually demanded by the other operand.
54757 auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
54759 SmallVector<APInt> EltBits;
54760 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
54761 APInt DemandedElts = APInt::getAllOnes(NumElts);
54762 if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
54764 DemandedBits.clearAllBits();
54765 DemandedElts.clearAllBits();
54766 for (int I = 0; I != NumElts; ++I) {
54767 if (UndefElts[I]) {
54768 // We can't assume an undef src element gives an undef dst - the
54769 // other src might be zero.
54770 DemandedBits.setAllBits();
54771 DemandedElts.setBit(I);
54772 } else if ((Invert && !EltBits[I].isAllOnes()) ||
54773 (!Invert && !EltBits[I].isZero())) {
54774 DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
54775 DemandedElts.setBit(I);
54779 return std::make_pair(DemandedBits, DemandedElts);
54781 APInt Bits0, Elts0;
54782 APInt Bits1, Elts1;
54783 std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
54784 std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
54786 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54787 if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
54788 TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
54789 TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
54790 TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
54791 if (N->getOpcode() != ISD::DELETED_NODE)
54792 DCI.AddToWorklist(N);
54793 return SDValue(N, 0);
54800 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
54801 TargetLowering::DAGCombinerInfo &DCI) {
54802 SDValue N1 = N->getOperand(1);
54804 // BT ignores high bits in the bit index operand.
54805 unsigned BitWidth = N1.getValueSizeInBits();
54806 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
54807 if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
54808 if (N->getOpcode() != ISD::DELETED_NODE)
54809 DCI.AddToWorklist(N);
54810 return SDValue(N, 0);
54816 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
54817 TargetLowering::DAGCombinerInfo &DCI) {
54818 bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
54819 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
54821 if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
54822 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54823 APInt DemandedElts = APInt::getLowBitsSet(8, 4);
54824 if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
54825 if (N->getOpcode() != ISD::DELETED_NODE)
54826 DCI.AddToWorklist(N);
54827 return SDValue(N, 0);
54830 // Convert a full vector load into vzload when not all bits are needed.
54831 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
54832 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
54833 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
54836 SDValue Convert = DAG.getNode(
54837 N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
54838 {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
54839 DCI.CombineTo(N, Convert, Convert.getValue(1));
54841 SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
54842 DAG.getBitcast(MVT::v8i16, VZLoad));
54843 DCI.CombineTo(N, Convert);
54846 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
54847 DCI.recursivelyDeleteUnusedNodes(LN);
54848 return SDValue(N, 0);
54856 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
54857 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
54858 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
54860 EVT DstVT = N->getValueType(0);
54862 SDValue N0 = N->getOperand(0);
54863 SDValue N1 = N->getOperand(1);
54864 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
54866 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
54869 // Look through single use any_extends / truncs.
54870 SDValue IntermediateBitwidthOp;
54871 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
54873 IntermediateBitwidthOp = N0;
54874 N0 = N0.getOperand(0);
54877 // See if we have a single use cmov.
54878 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
54881 SDValue CMovOp0 = N0.getOperand(0);
54882 SDValue CMovOp1 = N0.getOperand(1);
54884 // Make sure both operands are constants.
54885 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
54886 !isa<ConstantSDNode>(CMovOp1.getNode()))
54891 // If we looked through an any_extend/trunc above, add one to the constants.
54892 if (IntermediateBitwidthOp) {
54893 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
54894 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
54895 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
54898 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
54899 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
54901 EVT CMovVT = DstVT;
54902 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
54903 if (DstVT == MVT::i16) {
54905 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
54906 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
54909 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
54910 N0.getOperand(2), N0.getOperand(3));
54912 if (CMovVT != DstVT)
54913 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
54918 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
54919 const X86Subtarget &Subtarget) {
54920 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
54922 if (SDValue V = combineSextInRegCmov(N, DAG))
54925 EVT VT = N->getValueType(0);
54926 SDValue N0 = N->getOperand(0);
54927 SDValue N1 = N->getOperand(1);
54928 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
54931 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
54932 // both SSE and AVX2 since there is no sign-extended shift right
54933 // operation on a vector with 64-bit elements.
54934 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
54935 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
54936 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
54937 N0.getOpcode() == ISD::SIGN_EXTEND)) {
54938 SDValue N00 = N0.getOperand(0);
54940 // EXTLOAD has a better solution on AVX2,
54941 // it may be replaced with X86ISD::VSEXT node.
54942 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
54943 if (!ISD::isNormalLoad(N00.getNode()))
54946 // Attempt to promote any comparison mask ops before moving the
54947 // SIGN_EXTEND_INREG in the way.
54948 if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
54949 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
54951 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
54953 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
54954 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
54960 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
54961 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
54962 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
54963 /// opportunities to combine math ops, use an LEA, or use a complex addressing
54964 /// mode. This can eliminate extend, add, and shift instructions.
54965 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
54966 const X86Subtarget &Subtarget) {
54967 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
54968 Ext->getOpcode() != ISD::ZERO_EXTEND)
54971 // TODO: This should be valid for other integer types.
54972 EVT VT = Ext->getValueType(0);
54973 if (VT != MVT::i64)
54976 SDValue Add = Ext->getOperand(0);
54977 if (Add.getOpcode() != ISD::ADD)
54980 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
54981 bool NSW = Add->getFlags().hasNoSignedWrap();
54982 bool NUW = Add->getFlags().hasNoUnsignedWrap();
54984 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
54986 if ((Sext && !NSW) || (!Sext && !NUW))
54989 // Having a constant operand to the 'add' ensures that we are not increasing
54990 // the instruction count because the constant is extended for free below.
54991 // A constant operand can also become the displacement field of an LEA.
54992 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
54996 // Don't make the 'add' bigger if there's no hope of combining it with some
54997 // other 'add' or 'shl' instruction.
54998 // TODO: It may be profitable to generate simpler LEA instructions in place
54999 // of single 'add' instructions, but the cost model for selecting an LEA
55000 // currently has a high threshold.
55001 bool HasLEAPotential = false;
55002 for (auto *User : Ext->uses()) {
55003 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
55004 HasLEAPotential = true;
55008 if (!HasLEAPotential)
55011 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
55012 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
55013 SDValue AddOp0 = Add.getOperand(0);
55014 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
55015 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
55017 // The wider add is guaranteed to not wrap because both operands are
55020 Flags.setNoSignedWrap(NSW);
55021 Flags.setNoUnsignedWrap(NUW);
55022 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
55025 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
55026 // operands and the result of CMOV is not used anywhere else - promote CMOV
55027 // itself instead of promoting its result. This could be beneficial, because:
55028 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
55029 // (or more) pseudo-CMOVs only when they go one-after-another and
55030 // getting rid of result extension code after CMOV will help that.
55031 // 2) Promotion of constant CMOV arguments is free, hence the
55032 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
55033 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
55034 // promotion is also good in terms of code-size.
55035 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
55037 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
55038 SDValue CMovN = Extend->getOperand(0);
55039 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
55042 EVT TargetVT = Extend->getValueType(0);
55043 unsigned ExtendOpcode = Extend->getOpcode();
55046 EVT VT = CMovN.getValueType();
55047 SDValue CMovOp0 = CMovN.getOperand(0);
55048 SDValue CMovOp1 = CMovN.getOperand(1);
55050 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
55051 !isa<ConstantSDNode>(CMovOp1.getNode()))
55054 // Only extend to i32 or i64.
55055 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
55058 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
55060 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
55063 // If this a zero extend to i64, we should only extend to i32 and use a free
55064 // zero extend to finish.
55065 EVT ExtendVT = TargetVT;
55066 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
55067 ExtendVT = MVT::i32;
55069 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
55070 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
55072 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
55073 CMovN.getOperand(2), CMovN.getOperand(3));
55075 // Finish extending if needed.
55076 if (ExtendVT != TargetVT)
55077 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
55082 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
55084 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
55085 const X86Subtarget &Subtarget) {
55086 SDValue N0 = N->getOperand(0);
55087 EVT VT = N->getValueType(0);
55090 // Only do this combine with AVX512 for vector extends.
55091 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
55094 // Only combine legal element types.
55095 EVT SVT = VT.getVectorElementType();
55096 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
55097 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
55100 // We don't have CMPP Instruction for vxf16
55101 if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
55103 // We can only do this if the vector size in 256 bits or less.
55104 unsigned Size = VT.getSizeInBits();
55105 if (Size > 256 && Subtarget.useAVX512Regs())
55108 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
55109 // that's the only integer compares with we have.
55110 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
55111 if (ISD::isUnsignedIntSetCC(CC))
55114 // Only do this combine if the extension will be fully consumed by the setcc.
55115 EVT N00VT = N0.getOperand(0).getValueType();
55116 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
55117 if (Size != MatchingVecType.getSizeInBits())
55120 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
55122 if (N->getOpcode() == ISD::ZERO_EXTEND)
55123 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
55128 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
55129 TargetLowering::DAGCombinerInfo &DCI,
55130 const X86Subtarget &Subtarget) {
55131 SDValue N0 = N->getOperand(0);
55132 EVT VT = N->getValueType(0);
55135 // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
55136 if (!DCI.isBeforeLegalizeOps() &&
55137 N0.getOpcode() == X86ISD::SETCC_CARRY) {
55138 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
55139 N0->getOperand(1));
55140 bool ReplaceOtherUses = !N0.hasOneUse();
55141 DCI.CombineTo(N, Setcc);
55142 // Replace other uses with a truncate of the widened setcc_carry.
55143 if (ReplaceOtherUses) {
55144 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
55145 N0.getValueType(), Setcc);
55146 DCI.CombineTo(N0.getNode(), Trunc);
55149 return SDValue(N, 0);
55152 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
55155 if (!DCI.isBeforeLegalizeOps())
55158 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
55161 if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
55162 DAG, DCI, Subtarget))
55165 if (VT.isVector()) {
55166 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
55169 if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
55170 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
55173 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
55179 // Inverting a constant vector is profitable if it can be eliminated and the
55180 // inverted vector is already present in DAG. Otherwise, it will be loaded
55183 // We determine which of the values can be completely eliminated and invert it.
55184 // If both are eliminable, select a vector with the first negative element.
55185 static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
55186 assert(ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()) &&
55187 "ConstantFP build vector expected");
55188 // Check if we can eliminate V. We assume if a value is only used in FMAs, we
55189 // can eliminate it. Since this function is invoked for each FMA with this
55191 auto IsNotFMA = [](SDNode *Use) {
55192 return Use->getOpcode() != ISD::FMA && Use->getOpcode() != ISD::STRICT_FMA;
55194 if (llvm::any_of(V->uses(), IsNotFMA))
55197 SmallVector<SDValue, 8> Ops;
55198 EVT VT = V.getValueType();
55199 EVT EltVT = VT.getVectorElementType();
55200 for (auto Op : V->op_values()) {
55201 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
55202 Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
55204 assert(Op.isUndef());
55205 Ops.push_back(DAG.getUNDEF(EltVT));
55209 SDNode *NV = DAG.getNodeIfExists(ISD::BUILD_VECTOR, DAG.getVTList(VT), Ops);
55213 // If an inverted version cannot be eliminated, choose it instead of the
55214 // original version.
55215 if (llvm::any_of(NV->uses(), IsNotFMA))
55216 return SDValue(NV, 0);
55218 // If the inverted version also can be eliminated, we have to consistently
55219 // prefer one of the values. We prefer a constant with a negative value on
55220 // the first place.
55221 // N.B. We need to skip undefs that may precede a value.
55222 for (auto op : V->op_values()) {
55223 if (auto *Cst = dyn_cast<ConstantFPSDNode>(op)) {
55224 if (Cst->isNegative())
55229 return SDValue(NV, 0);
55232 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
55233 TargetLowering::DAGCombinerInfo &DCI,
55234 const X86Subtarget &Subtarget) {
55236 EVT VT = N->getValueType(0);
55237 bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
55239 // Let legalize expand this if it isn't a legal type yet.
55240 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55241 if (!TLI.isTypeLegal(VT))
55244 SDValue A = N->getOperand(IsStrict ? 1 : 0);
55245 SDValue B = N->getOperand(IsStrict ? 2 : 1);
55246 SDValue C = N->getOperand(IsStrict ? 3 : 2);
55248 // If the operation allows fast-math and the target does not support FMA,
55249 // split this into mul+add to avoid libcall(s).
55250 SDNodeFlags Flags = N->getFlags();
55251 if (!IsStrict && Flags.hasAllowReassociation() &&
55252 TLI.isOperationExpand(ISD::FMA, VT)) {
55253 SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
55254 return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
55257 EVT ScalarVT = VT.getScalarType();
55258 if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
55259 !Subtarget.hasAnyFMA()) &&
55260 !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
55263 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
55264 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
55265 bool LegalOperations = !DCI.isBeforeLegalizeOps();
55266 if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
55271 // Look through extract_vector_elts. If it comes from an FNEG, create a
55272 // new extract from the FNEG input.
55273 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
55274 isNullConstant(V.getOperand(1))) {
55275 SDValue Vec = V.getOperand(0);
55276 if (SDValue NegV = TLI.getCheaperNegatedExpression(
55277 Vec, DAG, LegalOperations, CodeSize)) {
55278 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
55279 NegV, V.getOperand(1));
55283 // Lookup if there is an inverted version of constant vector V in DAG.
55284 if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode())) {
55285 if (SDValue NegV = getInvertedVectorForFMA(V, DAG)) {
55293 // Do not convert the passthru input of scalar intrinsics.
55294 // FIXME: We could allow negations of the lower element only.
55295 bool NegA = invertIfNegative(A);
55296 bool NegB = invertIfNegative(B);
55297 bool NegC = invertIfNegative(C);
55299 if (!NegA && !NegB && !NegC)
55302 unsigned NewOpcode =
55303 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
55305 // Propagate fast-math-flags to new FMA node.
55306 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
55308 assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
55309 return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
55310 {N->getOperand(0), A, B, C});
55312 if (N->getNumOperands() == 4)
55313 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
55314 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
55318 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
55319 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
55320 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
55321 TargetLowering::DAGCombinerInfo &DCI) {
55323 EVT VT = N->getValueType(0);
55324 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55325 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
55326 bool LegalOperations = !DCI.isBeforeLegalizeOps();
55328 SDValue N2 = N->getOperand(2);
55331 TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
55334 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
55336 if (N->getNumOperands() == 4)
55337 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
55338 NegN2, N->getOperand(3));
55339 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
55343 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
55344 TargetLowering::DAGCombinerInfo &DCI,
55345 const X86Subtarget &Subtarget) {
55347 SDValue N0 = N->getOperand(0);
55348 EVT VT = N->getValueType(0);
55350 // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
55351 // FIXME: Is this needed? We don't seem to have any tests for it.
55352 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
55353 N0.getOpcode() == X86ISD::SETCC_CARRY) {
55354 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
55355 N0->getOperand(1));
55356 bool ReplaceOtherUses = !N0.hasOneUse();
55357 DCI.CombineTo(N, Setcc);
55358 // Replace other uses with a truncate of the widened setcc_carry.
55359 if (ReplaceOtherUses) {
55360 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
55361 N0.getValueType(), Setcc);
55362 DCI.CombineTo(N0.getNode(), Trunc);
55365 return SDValue(N, 0);
55368 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
55371 if (DCI.isBeforeLegalizeOps())
55372 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
55375 if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
55376 DAG, DCI, Subtarget))
55380 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
55383 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
55386 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
55389 // TODO: Combine with any target/faux shuffle.
55390 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
55391 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
55392 SDValue N00 = N0.getOperand(0);
55393 SDValue N01 = N0.getOperand(1);
55394 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
55395 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
55396 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
55397 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
55398 return concatSubVectors(N00, N01, DAG, dl);
55405 /// If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
55406 /// pre-promote its result type since vXi1 vectors don't get promoted
55407 /// during type legalization.
55408 static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
55409 SDValue RHS, ISD::CondCode CC,
55410 const SDLoc &DL, SelectionDAG &DAG,
55411 const X86Subtarget &Subtarget) {
55412 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
55413 VT.getVectorElementType() == MVT::i1 &&
55414 (OpVT.getVectorElementType() == MVT::i8 ||
55415 OpVT.getVectorElementType() == MVT::i16)) {
55416 SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
55417 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
55422 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
55423 TargetLowering::DAGCombinerInfo &DCI,
55424 const X86Subtarget &Subtarget) {
55425 const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
55426 const SDValue LHS = N->getOperand(0);
55427 const SDValue RHS = N->getOperand(1);
55428 EVT VT = N->getValueType(0);
55429 EVT OpVT = LHS.getValueType();
55432 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
55433 if (SDValue V = combineVectorSizedSetCCEquality(VT, LHS, RHS, CC, DL, DAG,
55437 if (VT == MVT::i1) {
55438 X86::CondCode X86CC;
55440 MatchVectorAllEqualTest(LHS, RHS, CC, DL, Subtarget, DAG, X86CC))
55441 return DAG.getNode(ISD::TRUNCATE, DL, VT, getSETCC(X86CC, V, DL, DAG));
55444 if (OpVT.isScalarInteger()) {
55445 // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
55446 // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
55447 auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
55448 if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
55449 if (N0.getOperand(0) == N1)
55450 return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
55452 if (N0.getOperand(1) == N1)
55453 return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
55458 if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
55459 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
55460 if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
55461 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
55463 // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
55464 // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
55465 auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
55466 if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
55467 if (N0.getOperand(0) == N1)
55468 return DAG.getNode(ISD::AND, DL, OpVT, N1,
55469 DAG.getNOT(DL, N0.getOperand(1), OpVT));
55470 if (N0.getOperand(1) == N1)
55471 return DAG.getNode(ISD::AND, DL, OpVT, N1,
55472 DAG.getNOT(DL, N0.getOperand(0), OpVT));
55476 if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
55477 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
55478 if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
55479 return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
55481 // cmpeq(trunc(x),C) --> cmpeq(x,C)
55482 // cmpne(trunc(x),C) --> cmpne(x,C)
55483 // iff x upper bits are zero.
55484 if (LHS.getOpcode() == ISD::TRUNCATE &&
55485 LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
55486 isa<ConstantSDNode>(RHS) && !DCI.isBeforeLegalize()) {
55487 EVT SrcVT = LHS.getOperand(0).getValueType();
55488 APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
55489 OpVT.getScalarSizeInBits());
55490 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55491 auto *C = cast<ConstantSDNode>(RHS);
55492 if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
55493 TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
55494 return DAG.getSetCC(DL, VT, LHS.getOperand(0),
55495 DAG.getConstant(C->getAPIntValue().zextOrTrunc(
55496 SrcVT.getScalarSizeInBits()),
55501 // With C as a power of 2 and C != 0 and C != INT_MIN:
55502 // icmp eq Abs(X) C ->
55503 // (icmp eq A, C) | (icmp eq A, -C)
55504 // icmp ne Abs(X) C ->
55505 // (icmp ne A, C) & (icmp ne A, -C)
55506 // Both of these patterns can be better optimized in
55507 // DAGCombiner::foldAndOrOfSETCC. Note this only applies for scalar
55508 // integers which is checked above.
55509 if (LHS.getOpcode() == ISD::ABS && LHS.hasOneUse()) {
55510 if (auto *C = dyn_cast<ConstantSDNode>(RHS)) {
55511 const APInt &CInt = C->getAPIntValue();
55512 // We can better optimize this case in DAGCombiner::foldAndOrOfSETCC.
55513 if (CInt.isPowerOf2() && !CInt.isMinSignedValue()) {
55514 SDValue BaseOp = LHS.getOperand(0);
55515 SDValue SETCC0 = DAG.getSetCC(DL, VT, BaseOp, RHS, CC);
55516 SDValue SETCC1 = DAG.getSetCC(
55517 DL, VT, BaseOp, DAG.getConstant(-CInt, DL, OpVT), CC);
55518 return DAG.getNode(CC == ISD::SETEQ ? ISD::OR : ISD::AND, DL, VT,
55526 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
55527 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
55528 // Using temporaries to avoid messing up operand ordering for later
55529 // transformations if this doesn't work.
55532 ISD::CondCode TmpCC = CC;
55533 // Put build_vector on the right.
55534 if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
55535 std::swap(Op0, Op1);
55536 TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
55540 (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
55541 (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
55542 bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
55544 if (IsSEXT0 && IsVZero1) {
55545 assert(VT == Op0.getOperand(0).getValueType() &&
55546 "Unexpected operand type");
55547 if (TmpCC == ISD::SETGT)
55548 return DAG.getConstant(0, DL, VT);
55549 if (TmpCC == ISD::SETLE)
55550 return DAG.getConstant(1, DL, VT);
55551 if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
55552 return DAG.getNOT(DL, Op0.getOperand(0), VT);
55554 assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
55555 "Unexpected condition code!");
55556 return Op0.getOperand(0);
55560 // Try and make unsigned vector comparison signed. On pre AVX512 targets there
55561 // only are unsigned comparisons (`PCMPGT`) and on AVX512 its often better to
55562 // use `PCMPGT` if the result is mean to stay in a vector (and if its going to
55563 // a mask, there are signed AVX512 comparisons).
55564 if (VT.isVector() && OpVT.isVector() && OpVT.isInteger()) {
55565 bool CanMakeSigned = false;
55566 if (ISD::isUnsignedIntSetCC(CC)) {
55567 KnownBits CmpKnown =
55568 DAG.computeKnownBits(LHS).intersectWith(DAG.computeKnownBits(RHS));
55569 // If we know LHS/RHS share the same sign bit at each element we can
55570 // make this signed.
55571 // NOTE: `computeKnownBits` on a vector type aggregates common bits
55572 // across all lanes. So a pattern where the sign varies from lane to
55573 // lane, but at each lane Sign(LHS) is known to equal Sign(RHS), will be
55574 // missed. We could get around this by demanding each lane
55575 // independently, but this isn't the most important optimization and
55576 // that may eat into compile time.
55578 CmpKnown.Zero.isSignBitSet() || CmpKnown.One.isSignBitSet();
55580 if (CanMakeSigned || ISD::isSignedIntSetCC(CC)) {
55581 SDValue LHSOut = LHS;
55582 SDValue RHSOut = RHS;
55583 ISD::CondCode NewCC = CC;
55587 if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ true,
55590 else if (SDValue NewRHS = incDecVectorConstant(
55591 RHS, DAG, /*IsInc*/ false, /*NSW*/ true))
55598 NewCC = ISD::SETGT;
55603 if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ false,
55606 else if (SDValue NewRHS = incDecVectorConstant(RHS, DAG, /*IsInc*/ true,
55614 // Will be swapped to SETGT in LowerVSETCC*.
55615 NewCC = ISD::SETLT;
55621 if (SDValue R = truncateAVX512SetCCNoBWI(VT, OpVT, LHSOut, RHSOut,
55622 NewCC, DL, DAG, Subtarget))
55624 return DAG.getSetCC(DL, VT, LHSOut, RHSOut, NewCC);
55630 truncateAVX512SetCCNoBWI(VT, OpVT, LHS, RHS, CC, DL, DAG, Subtarget))
55633 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
55634 // to avoid scalarization via legalization because v4i32 is not a legal type.
55635 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
55636 LHS.getValueType() == MVT::v4f32)
55637 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
55639 // X pred 0.0 --> X pred -X
55640 // If the negation of X already exists, use it in the comparison. This removes
55641 // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
55642 // instructions in patterns with a 'select' node.
55643 if (isNullFPScalarOrVectorConst(RHS)) {
55644 SDVTList FNegVT = DAG.getVTList(OpVT);
55645 if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
55646 return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
55652 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
55653 TargetLowering::DAGCombinerInfo &DCI,
55654 const X86Subtarget &Subtarget) {
55655 SDValue Src = N->getOperand(0);
55656 MVT SrcVT = Src.getSimpleValueType();
55657 MVT VT = N->getSimpleValueType(0);
55658 unsigned NumBits = VT.getScalarSizeInBits();
55659 unsigned NumElts = SrcVT.getVectorNumElements();
55660 unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
55661 assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
55663 // Perform constant folding.
55665 SmallVector<APInt, 32> EltBits;
55666 if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
55668 for (unsigned Idx = 0; Idx != NumElts; ++Idx)
55669 if (!UndefElts[Idx] && EltBits[Idx].isNegative())
55672 return DAG.getConstant(Imm, SDLoc(N), VT);
55675 // Look through int->fp bitcasts that don't change the element width.
55676 unsigned EltWidth = SrcVT.getScalarSizeInBits();
55677 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
55678 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
55679 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
55681 // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
55682 // with scalar comparisons.
55683 if (SDValue NotSrc = IsNOT(Src, DAG)) {
55685 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
55686 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
55687 return DAG.getNode(ISD::XOR, DL, VT,
55688 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
55689 DAG.getConstant(NotMask, DL, VT));
55692 // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
55693 // results with scalar comparisons.
55694 if (Src.getOpcode() == X86ISD::PCMPGT &&
55695 ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
55697 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
55698 return DAG.getNode(ISD::XOR, DL, VT,
55699 DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
55700 DAG.getConstant(NotMask, DL, VT));
55703 // Fold movmsk(icmp_eq(and(x,c1),c1)) -> movmsk(shl(x,c2))
55704 // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
55705 // iff pow2splat(c1).
55706 // Use KnownBits to determine if only a single bit is non-zero
55707 // in each element (pow2 or zero), and shift that bit to the msb.
55708 if (Src.getOpcode() == X86ISD::PCMPEQ) {
55709 KnownBits KnownLHS = DAG.computeKnownBits(Src.getOperand(0));
55710 KnownBits KnownRHS = DAG.computeKnownBits(Src.getOperand(1));
55711 unsigned ShiftAmt = KnownLHS.countMinLeadingZeros();
55712 if (KnownLHS.countMaxPopulation() == 1 &&
55713 (KnownRHS.isZero() || (KnownRHS.countMaxPopulation() == 1 &&
55714 ShiftAmt == KnownRHS.countMinLeadingZeros()))) {
55716 MVT ShiftVT = SrcVT;
55717 SDValue ShiftLHS = Src.getOperand(0);
55718 SDValue ShiftRHS = Src.getOperand(1);
55719 if (ShiftVT.getScalarType() == MVT::i8) {
55720 // vXi8 shifts - we only care about the signbit so can use PSLLW.
55721 ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
55722 ShiftLHS = DAG.getBitcast(ShiftVT, ShiftLHS);
55723 ShiftRHS = DAG.getBitcast(ShiftVT, ShiftRHS);
55725 ShiftLHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
55726 ShiftLHS, ShiftAmt, DAG);
55727 ShiftRHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
55728 ShiftRHS, ShiftAmt, DAG);
55729 ShiftLHS = DAG.getBitcast(SrcVT, ShiftLHS);
55730 ShiftRHS = DAG.getBitcast(SrcVT, ShiftRHS);
55731 SDValue Res = DAG.getNode(ISD::XOR, DL, SrcVT, ShiftLHS, ShiftRHS);
55732 return DAG.getNode(X86ISD::MOVMSK, DL, VT, DAG.getNOT(DL, Res, SrcVT));
55736 // Fold movmsk(logic(X,C)) -> logic(movmsk(X),C)
55737 if (N->isOnlyUserOf(Src.getNode())) {
55738 SDValue SrcBC = peekThroughOneUseBitcasts(Src);
55739 if (ISD::isBitwiseLogicOp(SrcBC.getOpcode())) {
55741 SmallVector<APInt, 32> EltBits;
55742 if (getTargetConstantBitsFromNode(SrcBC.getOperand(1), NumBitsPerElt,
55743 UndefElts, EltBits)) {
55744 APInt Mask = APInt::getZero(NumBits);
55745 for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
55746 if (!UndefElts[Idx] && EltBits[Idx].isNegative())
55750 SDValue NewSrc = DAG.getBitcast(SrcVT, SrcBC.getOperand(0));
55751 SDValue NewMovMsk = DAG.getNode(X86ISD::MOVMSK, DL, VT, NewSrc);
55752 return DAG.getNode(SrcBC.getOpcode(), DL, VT, NewMovMsk,
55753 DAG.getConstant(Mask, DL, VT));
55758 // Simplify the inputs.
55759 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55760 APInt DemandedMask(APInt::getAllOnes(NumBits));
55761 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
55762 return SDValue(N, 0);
55767 static SDValue combineTESTP(SDNode *N, SelectionDAG &DAG,
55768 TargetLowering::DAGCombinerInfo &DCI,
55769 const X86Subtarget &Subtarget) {
55770 MVT VT = N->getSimpleValueType(0);
55771 unsigned NumBits = VT.getScalarSizeInBits();
55773 // Simplify the inputs.
55774 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55775 APInt DemandedMask(APInt::getAllOnes(NumBits));
55776 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
55777 return SDValue(N, 0);
55782 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
55783 TargetLowering::DAGCombinerInfo &DCI,
55784 const X86Subtarget &Subtarget) {
55785 auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
55786 SDValue BasePtr = MemOp->getBasePtr();
55787 SDValue Index = MemOp->getIndex();
55788 SDValue Scale = MemOp->getScale();
55789 SDValue Mask = MemOp->getMask();
55791 // Attempt to fold an index scale into the scale value directly.
55792 // For smaller indices, implicit sext is performed BEFORE scale, preventing
55793 // this fold under most circumstances.
55794 // TODO: Move this into X86DAGToDAGISel::matchVectorAddressRecursively?
55795 if ((Index.getOpcode() == X86ISD::VSHLI ||
55796 (Index.getOpcode() == ISD::ADD &&
55797 Index.getOperand(0) == Index.getOperand(1))) &&
55798 isa<ConstantSDNode>(Scale) &&
55799 BasePtr.getScalarValueSizeInBits() == Index.getScalarValueSizeInBits()) {
55800 unsigned ShiftAmt =
55801 Index.getOpcode() == ISD::ADD ? 1 : Index.getConstantOperandVal(1);
55802 uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
55803 uint64_t NewScaleAmt = ScaleAmt * (1ULL << ShiftAmt);
55804 if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
55805 SDValue NewIndex = Index.getOperand(0);
55807 DAG.getTargetConstant(NewScaleAmt, SDLoc(N), Scale.getValueType());
55808 if (N->getOpcode() == X86ISD::MGATHER)
55809 return getAVX2GatherNode(N->getOpcode(), SDValue(N, 0), DAG,
55810 MemOp->getOperand(1), Mask,
55811 MemOp->getBasePtr(), NewIndex, NewScale,
55812 MemOp->getChain(), Subtarget);
55813 if (N->getOpcode() == X86ISD::MSCATTER)
55814 return getScatterNode(N->getOpcode(), SDValue(N, 0), DAG,
55815 MemOp->getOperand(1), Mask, MemOp->getBasePtr(),
55816 NewIndex, NewScale, MemOp->getChain(), Subtarget);
55820 // With vector masks we only demand the upper bit of the mask.
55821 if (Mask.getScalarValueSizeInBits() != 1) {
55822 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55823 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
55824 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
55825 if (N->getOpcode() != ISD::DELETED_NODE)
55826 DCI.AddToWorklist(N);
55827 return SDValue(N, 0);
55834 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
55835 SDValue Index, SDValue Base, SDValue Scale,
55836 SelectionDAG &DAG) {
55839 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
55840 SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
55841 Gather->getMask(), Base, Index, Scale } ;
55842 return DAG.getMaskedGather(Gather->getVTList(),
55843 Gather->getMemoryVT(), DL, Ops,
55844 Gather->getMemOperand(),
55845 Gather->getIndexType(),
55846 Gather->getExtensionType());
55848 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
55849 SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
55850 Scatter->getMask(), Base, Index, Scale };
55851 return DAG.getMaskedScatter(Scatter->getVTList(),
55852 Scatter->getMemoryVT(), DL,
55853 Ops, Scatter->getMemOperand(),
55854 Scatter->getIndexType(),
55855 Scatter->isTruncatingStore());
55858 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
55859 TargetLowering::DAGCombinerInfo &DCI) {
55861 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
55862 SDValue Index = GorS->getIndex();
55863 SDValue Base = GorS->getBasePtr();
55864 SDValue Scale = GorS->getScale();
55866 if (DCI.isBeforeLegalize()) {
55867 unsigned IndexWidth = Index.getScalarValueSizeInBits();
55869 // Shrink constant indices if they are larger than 32-bits.
55870 // Only do this before legalize types since v2i64 could become v2i32.
55871 // FIXME: We could check that the type is legal if we're after legalize
55872 // types, but then we would need to construct test cases where that happens.
55873 // FIXME: We could support more than just constant vectors, but we need to
55874 // careful with costing. A truncate that can be optimized out would be fine.
55875 // Otherwise we might only want to create a truncate if it avoids a split.
55876 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
55877 if (BV->isConstant() && IndexWidth > 32 &&
55878 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
55879 EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
55880 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
55881 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
55885 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
55886 // there are sufficient sign bits. Only do this before legalize types to
55887 // avoid creating illegal types in truncate.
55888 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
55889 Index.getOpcode() == ISD::ZERO_EXTEND) &&
55891 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
55892 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
55893 EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
55894 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
55895 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
55899 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55900 EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
55901 // Try to move splat constant adders from the index operand to the base
55902 // pointer operand. Taking care to multiply by the scale. We can only do
55903 // this when index element type is the same as the pointer type.
55904 // Otherwise we need to be sure the math doesn't wrap before the scale.
55905 if (Index.getOpcode() == ISD::ADD &&
55906 Index.getValueType().getVectorElementType() == PtrVT &&
55907 isa<ConstantSDNode>(Scale)) {
55908 uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
55909 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
55910 BitVector UndefElts;
55911 if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
55912 // FIXME: Allow non-constant?
55913 if (UndefElts.none()) {
55914 // Apply the scale.
55915 APInt Adder = C->getAPIntValue() * ScaleAmt;
55916 // Add it to the existing base.
55917 Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
55918 DAG.getConstant(Adder, DL, PtrVT));
55919 Index = Index.getOperand(0);
55920 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
55924 // It's also possible base is just a constant. In that case, just
55925 // replace it with 0 and move the displacement into the index.
55926 if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
55927 isOneConstant(Scale)) {
55928 SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
55929 // Combine the constant build_vector and the constant base.
55930 Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
55931 Index.getOperand(1), Splat);
55932 // Add to the LHS of the original Index add.
55933 Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
55934 Index.getOperand(0), Splat);
55935 Base = DAG.getConstant(0, DL, Base.getValueType());
55936 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
55941 if (DCI.isBeforeLegalizeOps()) {
55942 unsigned IndexWidth = Index.getScalarValueSizeInBits();
55944 // Make sure the index is either i32 or i64
55945 if (IndexWidth != 32 && IndexWidth != 64) {
55946 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
55947 EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
55948 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
55949 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
55953 // With vector masks we only demand the upper bit of the mask.
55954 SDValue Mask = GorS->getMask();
55955 if (Mask.getScalarValueSizeInBits() != 1) {
55956 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55957 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
55958 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
55959 if (N->getOpcode() != ISD::DELETED_NODE)
55960 DCI.AddToWorklist(N);
55961 return SDValue(N, 0);
55968 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
55969 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
55970 const X86Subtarget &Subtarget) {
55972 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
55973 SDValue EFLAGS = N->getOperand(1);
55975 // Try to simplify the EFLAGS and condition code operands.
55976 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
55977 return getSETCC(CC, Flags, DL, DAG);
55982 /// Optimize branch condition evaluation.
55983 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
55984 const X86Subtarget &Subtarget) {
55986 SDValue EFLAGS = N->getOperand(3);
55987 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
55989 // Try to simplify the EFLAGS and condition code operands.
55990 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
55991 // RAUW them under us.
55992 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
55993 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
55994 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
55995 N->getOperand(1), Cond, Flags);
56001 // TODO: Could we move this to DAGCombine?
56002 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
56003 SelectionDAG &DAG) {
56004 // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
56005 // to optimize away operation when it's from a constant.
56007 // The general transformation is:
56008 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
56009 // AND(VECTOR_CMP(x,y), constant2)
56010 // constant2 = UNARYOP(constant)
56012 // Early exit if this isn't a vector operation, the operand of the
56013 // unary operation isn't a bitwise AND, or if the sizes of the operations
56014 // aren't the same.
56015 EVT VT = N->getValueType(0);
56016 bool IsStrict = N->isStrictFPOpcode();
56017 unsigned NumEltBits = VT.getScalarSizeInBits();
56018 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
56019 if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
56020 DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
56021 VT.getSizeInBits() != Op0.getValueSizeInBits())
56024 // Now check that the other operand of the AND is a constant. We could
56025 // make the transformation for non-constant splats as well, but it's unclear
56026 // that would be a benefit as it would not eliminate any operations, just
56027 // perform one more step in scalar code before moving to the vector unit.
56028 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
56029 // Bail out if the vector isn't a constant.
56030 if (!BV->isConstant())
56033 // Everything checks out. Build up the new and improved node.
56035 EVT IntVT = BV->getValueType(0);
56036 // Create a new constant of the appropriate type for the transformed
56038 SDValue SourceConst;
56040 SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
56041 {N->getOperand(0), SDValue(BV, 0)});
56043 SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
56044 // The AND node needs bitcasts to/from an integer vector type around it.
56045 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
56046 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
56048 SDValue Res = DAG.getBitcast(VT, NewAnd);
56050 return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
56057 /// If we are converting a value to floating-point, try to replace scalar
56058 /// truncate of an extracted vector element with a bitcast. This tries to keep
56059 /// the sequence on XMM registers rather than moving between vector and GPRs.
56060 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
56061 // TODO: This is currently only used by combineSIntToFP, but it is generalized
56062 // to allow being called by any similar cast opcode.
56063 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
56064 SDValue Trunc = N->getOperand(0);
56065 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
56068 SDValue ExtElt = Trunc.getOperand(0);
56069 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56070 !isNullConstant(ExtElt.getOperand(1)))
56073 EVT TruncVT = Trunc.getValueType();
56074 EVT SrcVT = ExtElt.getValueType();
56075 unsigned DestWidth = TruncVT.getSizeInBits();
56076 unsigned SrcWidth = SrcVT.getSizeInBits();
56077 if (SrcWidth % DestWidth != 0)
56080 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
56081 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
56082 unsigned VecWidth = SrcVecVT.getSizeInBits();
56083 unsigned NumElts = VecWidth / DestWidth;
56084 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
56085 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
56087 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
56088 BitcastVec, ExtElt.getOperand(1));
56089 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
56092 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
56093 const X86Subtarget &Subtarget) {
56094 bool IsStrict = N->isStrictFPOpcode();
56095 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
56096 EVT VT = N->getValueType(0);
56097 EVT InVT = Op0.getValueType();
56099 // UINT_TO_FP(vXi1~15) -> UINT_TO_FP(ZEXT(vXi1~15 to vXi16))
56100 // UINT_TO_FP(vXi17~31) -> UINT_TO_FP(ZEXT(vXi17~31 to vXi32))
56101 // UINT_TO_FP(vXi33~63) -> UINT_TO_FP(ZEXT(vXi33~63 to vXi64))
56102 if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
56103 unsigned ScalarSize = InVT.getScalarSizeInBits();
56104 if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
56107 EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
56108 ScalarSize < 16 ? MVT::i16
56109 : ScalarSize < 32 ? MVT::i32
56111 InVT.getVectorNumElements());
56112 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
56114 return DAG.getNode(ISD::STRICT_UINT_TO_FP, dl, {VT, MVT::Other},
56115 {N->getOperand(0), P});
56116 return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
56119 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
56120 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
56121 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
56122 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
56123 VT.getScalarType() != MVT::f16) {
56125 EVT DstVT = InVT.changeVectorElementType(MVT::i32);
56126 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
56128 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
56130 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
56131 {N->getOperand(0), P});
56132 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
56135 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
56136 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
56137 // the optimization here.
56138 if (DAG.SignBitIsZero(Op0)) {
56140 return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
56141 {N->getOperand(0), Op0});
56142 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
56148 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
56149 TargetLowering::DAGCombinerInfo &DCI,
56150 const X86Subtarget &Subtarget) {
56151 // First try to optimize away the conversion entirely when it's
56152 // conditionally from a constant. Vectors only.
56153 bool IsStrict = N->isStrictFPOpcode();
56154 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
56157 // Now move on to more general possibilities.
56158 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
56159 EVT VT = N->getValueType(0);
56160 EVT InVT = Op0.getValueType();
56162 // SINT_TO_FP(vXi1~15) -> SINT_TO_FP(SEXT(vXi1~15 to vXi16))
56163 // SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
56164 // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
56165 if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
56166 unsigned ScalarSize = InVT.getScalarSizeInBits();
56167 if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
56170 EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
56171 ScalarSize < 16 ? MVT::i16
56172 : ScalarSize < 32 ? MVT::i32
56174 InVT.getVectorNumElements());
56175 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
56177 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
56178 {N->getOperand(0), P});
56179 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
56182 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
56183 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
56184 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
56185 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
56186 VT.getScalarType() != MVT::f16) {
56188 EVT DstVT = InVT.changeVectorElementType(MVT::i32);
56189 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
56191 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
56192 {N->getOperand(0), P});
56193 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
56196 // Without AVX512DQ we only support i64 to float scalar conversion. For both
56197 // vectors and scalars, see if we know that the upper bits are all the sign
56198 // bit, in which case we can truncate the input to i32 and convert from that.
56199 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
56200 unsigned BitWidth = InVT.getScalarSizeInBits();
56201 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
56202 if (NumSignBits >= (BitWidth - 31)) {
56203 EVT TruncVT = MVT::i32;
56204 if (InVT.isVector())
56205 TruncVT = InVT.changeVectorElementType(TruncVT);
56207 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
56208 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
56210 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
56211 {N->getOperand(0), Trunc});
56212 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
56214 // If we're after legalize and the type is v2i32 we need to shuffle and
56216 assert(InVT == MVT::v2i64 && "Unexpected VT!");
56217 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
56218 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
56221 return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
56222 {N->getOperand(0), Shuf});
56223 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
56227 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
56228 // a 32-bit target where SSE doesn't support i64->FP operations.
56229 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
56230 Op0.getOpcode() == ISD::LOAD) {
56231 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
56233 // This transformation is not supported if the result type is f16 or f128.
56234 if (VT == MVT::f16 || VT == MVT::f128)
56237 // If we have AVX512DQ we can use packed conversion instructions unless
56239 if (Subtarget.hasDQI() && VT != MVT::f80)
56242 if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
56243 Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
56244 std::pair<SDValue, SDValue> Tmp =
56245 Subtarget.getTargetLowering()->BuildFILD(
56246 VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
56247 Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
56248 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
56256 if (SDValue V = combineToFPTruncExtElt(N, DAG))
56262 static bool needCarryOrOverflowFlag(SDValue Flags) {
56263 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
56265 for (const SDNode *User : Flags->uses()) {
56267 switch (User->getOpcode()) {
56269 // Be conservative.
56271 case X86ISD::SETCC:
56272 case X86ISD::SETCC_CARRY:
56273 CC = (X86::CondCode)User->getConstantOperandVal(0);
56275 case X86ISD::BRCOND:
56277 CC = (X86::CondCode)User->getConstantOperandVal(2);
56283 case X86::COND_A: case X86::COND_AE:
56284 case X86::COND_B: case X86::COND_BE:
56285 case X86::COND_O: case X86::COND_NO:
56286 case X86::COND_G: case X86::COND_GE:
56287 case X86::COND_L: case X86::COND_LE:
56295 static bool onlyZeroFlagUsed(SDValue Flags) {
56296 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
56298 for (const SDNode *User : Flags->uses()) {
56300 switch (User->getOpcode()) {
56302 // Be conservative.
56304 case X86ISD::SETCC:
56305 case X86ISD::SETCC_CARRY:
56308 case X86ISD::BRCOND:
56314 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
56315 if (CC != X86::COND_E && CC != X86::COND_NE)
56322 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
56323 // Only handle test patterns.
56324 if (!isNullConstant(N->getOperand(1)))
56327 // If we have a CMP of a truncated binop, see if we can make a smaller binop
56328 // and use its flags directly.
56329 // TODO: Maybe we should try promoting compares that only use the zero flag
56330 // first if we can prove the upper bits with computeKnownBits?
56332 SDValue Op = N->getOperand(0);
56333 EVT VT = Op.getValueType();
56335 // If we have a constant logical shift that's only used in a comparison
56336 // against zero turn it into an equivalent AND. This allows turning it into
56337 // a TEST instruction later.
56338 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
56339 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
56340 onlyZeroFlagUsed(SDValue(N, 0))) {
56341 unsigned BitWidth = VT.getSizeInBits();
56342 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
56343 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
56344 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
56345 APInt Mask = Op.getOpcode() == ISD::SRL
56346 ? APInt::getHighBitsSet(BitWidth, MaskBits)
56347 : APInt::getLowBitsSet(BitWidth, MaskBits);
56348 if (Mask.isSignedIntN(32)) {
56349 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
56350 DAG.getConstant(Mask, dl, VT));
56351 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
56352 DAG.getConstant(0, dl, VT));
56357 // Peek through any zero-extend if we're only testing for a zero result.
56358 if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
56359 SDValue Src = Op.getOperand(0);
56360 EVT SrcVT = Src.getValueType();
56361 if (SrcVT.getScalarSizeInBits() >= 8 &&
56362 DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
56363 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
56364 DAG.getConstant(0, dl, SrcVT));
56367 // Look for a truncate.
56368 if (Op.getOpcode() != ISD::TRUNCATE)
56371 SDValue Trunc = Op;
56372 Op = Op.getOperand(0);
56374 // See if we can compare with zero against the truncation source,
56375 // which should help using the Z flag from many ops. Only do this for
56376 // i32 truncated op to prevent partial-reg compares of promoted ops.
56377 EVT OpVT = Op.getValueType();
56379 APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
56380 if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
56381 onlyZeroFlagUsed(SDValue(N, 0))) {
56382 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
56383 DAG.getConstant(0, dl, OpVT));
56386 // After this the truncate and arithmetic op must have a single use.
56387 if (!Trunc.hasOneUse() || !Op.hasOneUse())
56391 switch (Op.getOpcode()) {
56392 default: return SDValue();
56394 // Skip and with constant. We have special handling for and with immediate
56395 // during isel to generate test instructions.
56396 if (isa<ConstantSDNode>(Op.getOperand(1)))
56398 NewOpc = X86ISD::AND;
56400 case ISD::OR: NewOpc = X86ISD::OR; break;
56401 case ISD::XOR: NewOpc = X86ISD::XOR; break;
56403 // If the carry or overflow flag is used, we can't truncate.
56404 if (needCarryOrOverflowFlag(SDValue(N, 0)))
56406 NewOpc = X86ISD::ADD;
56409 // If the carry or overflow flag is used, we can't truncate.
56410 if (needCarryOrOverflowFlag(SDValue(N, 0)))
56412 NewOpc = X86ISD::SUB;
56416 // We found an op we can narrow. Truncate its inputs.
56417 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
56418 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
56420 // Use a X86 specific opcode to avoid DAG combine messing with it.
56421 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
56422 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
56424 // For AND, keep a CMP so that we can match the test pattern.
56425 if (NewOpc == X86ISD::AND)
56426 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
56427 DAG.getConstant(0, dl, VT));
56429 // Return the flags.
56430 return Op.getValue(1);
56433 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
56434 TargetLowering::DAGCombinerInfo &DCI) {
56435 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
56436 "Expected X86ISD::ADD or X86ISD::SUB");
56439 SDValue LHS = N->getOperand(0);
56440 SDValue RHS = N->getOperand(1);
56441 MVT VT = LHS.getSimpleValueType();
56442 bool IsSub = X86ISD::SUB == N->getOpcode();
56443 unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
56445 // If we don't use the flag result, simplify back to a generic ADD/SUB.
56446 if (!N->hasAnyUseOfValue(1)) {
56447 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
56448 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
56451 // Fold any similar generic ADD/SUB opcodes to reuse this node.
56452 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
56453 SDValue Ops[] = {N0, N1};
56454 SDVTList VTs = DAG.getVTList(N->getValueType(0));
56455 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
56458 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
56459 DCI.CombineTo(GenericAddSub, Op);
56462 MatchGeneric(LHS, RHS, false);
56463 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
56465 // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
56466 // EFLAGS result doesn't change.
56467 return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
56468 /*ZeroSecondOpOnly*/ true);
56471 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
56472 SDValue LHS = N->getOperand(0);
56473 SDValue RHS = N->getOperand(1);
56474 SDValue BorrowIn = N->getOperand(2);
56476 if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
56477 MVT VT = N->getSimpleValueType(0);
56478 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
56479 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
56482 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
56483 // iff the flag result is dead.
56484 if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
56485 !N->hasAnyUseOfValue(1))
56486 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
56487 LHS.getOperand(1), BorrowIn);
56492 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
56493 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
56494 TargetLowering::DAGCombinerInfo &DCI) {
56495 SDValue LHS = N->getOperand(0);
56496 SDValue RHS = N->getOperand(1);
56497 SDValue CarryIn = N->getOperand(2);
56498 auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
56499 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
56501 // Canonicalize constant to RHS.
56503 return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
56506 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
56507 // the result is either zero or one (depending on the input carry bit).
56508 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
56509 if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
56510 // We don't have a good way to replace an EFLAGS use, so only do this when
56512 SDValue(N, 1).use_empty()) {
56514 EVT VT = N->getValueType(0);
56515 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
56516 SDValue Res1 = DAG.getNode(
56518 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
56519 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
56520 DAG.getConstant(1, DL, VT));
56521 return DCI.CombineTo(N, Res1, CarryOut);
56524 // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
56525 // iff the flag result is dead.
56526 // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
56527 if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
56529 APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
56530 return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
56531 DAG.getConstant(0, DL, LHS.getValueType()),
56532 DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
56535 if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
56536 MVT VT = N->getSimpleValueType(0);
56537 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
56538 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
56541 // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
56542 // iff the flag result is dead.
56543 if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
56544 !N->hasAnyUseOfValue(1))
56545 return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
56546 LHS.getOperand(1), CarryIn);
56551 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
56552 const SDLoc &DL, EVT VT,
56553 const X86Subtarget &Subtarget) {
56554 // Example of pattern we try to detect:
56555 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
56556 //(add (build_vector (extract_elt t, 0),
56557 // (extract_elt t, 2),
56558 // (extract_elt t, 4),
56559 // (extract_elt t, 6)),
56560 // (build_vector (extract_elt t, 1),
56561 // (extract_elt t, 3),
56562 // (extract_elt t, 5),
56563 // (extract_elt t, 7)))
56565 if (!Subtarget.hasSSE2())
56568 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
56569 Op1.getOpcode() != ISD::BUILD_VECTOR)
56572 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
56573 VT.getVectorNumElements() < 4 ||
56574 !isPowerOf2_32(VT.getVectorNumElements()))
56577 // Check if one of Op0,Op1 is of the form:
56578 // (build_vector (extract_elt Mul, 0),
56579 // (extract_elt Mul, 2),
56580 // (extract_elt Mul, 4),
56582 // the other is of the form:
56583 // (build_vector (extract_elt Mul, 1),
56584 // (extract_elt Mul, 3),
56585 // (extract_elt Mul, 5),
56587 // and identify Mul.
56589 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
56590 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
56591 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
56592 // TODO: Be more tolerant to undefs.
56593 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56594 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56595 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56596 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
56598 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
56599 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
56600 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
56601 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
56602 if (!Const0L || !Const1L || !Const0H || !Const1H)
56604 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
56605 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
56606 // Commutativity of mul allows factors of a product to reorder.
56608 std::swap(Idx0L, Idx1L);
56610 std::swap(Idx0H, Idx1H);
56611 // Commutativity of add allows pairs of factors to reorder.
56612 if (Idx0L > Idx0H) {
56613 std::swap(Idx0L, Idx0H);
56614 std::swap(Idx1L, Idx1H);
56616 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
56617 Idx1H != 2 * i + 3)
56620 // First time an extract_elt's source vector is visited. Must be a MUL
56621 // with 2X number of vector elements than the BUILD_VECTOR.
56622 // Both extracts must be from same MUL.
56623 Mul = Op0L->getOperand(0);
56624 if (Mul->getOpcode() != ISD::MUL ||
56625 Mul.getValueType().getVectorNumElements() != 2 * e)
56628 // Check that the extract is from the same MUL previously seen.
56629 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
56630 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
56634 // Check if the Mul source can be safely shrunk.
56636 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
56637 Mode == ShrinkMode::MULU16)
56640 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
56641 VT.getVectorNumElements() * 2);
56642 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
56643 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
56645 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
56646 ArrayRef<SDValue> Ops) {
56647 EVT InVT = Ops[0].getValueType();
56648 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
56649 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
56650 InVT.getVectorNumElements() / 2);
56651 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
56653 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
56656 // Attempt to turn this pattern into PMADDWD.
56657 // (add (mul (sext (build_vector)), (sext (build_vector))),
56658 // (mul (sext (build_vector)), (sext (build_vector)))
56659 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
56660 const SDLoc &DL, EVT VT,
56661 const X86Subtarget &Subtarget) {
56662 if (!Subtarget.hasSSE2())
56665 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
56668 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
56669 VT.getVectorNumElements() < 4 ||
56670 !isPowerOf2_32(VT.getVectorNumElements()))
56673 SDValue N00 = N0.getOperand(0);
56674 SDValue N01 = N0.getOperand(1);
56675 SDValue N10 = N1.getOperand(0);
56676 SDValue N11 = N1.getOperand(1);
56678 // All inputs need to be sign extends.
56679 // TODO: Support ZERO_EXTEND from known positive?
56680 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
56681 N01.getOpcode() != ISD::SIGN_EXTEND ||
56682 N10.getOpcode() != ISD::SIGN_EXTEND ||
56683 N11.getOpcode() != ISD::SIGN_EXTEND)
56686 // Peek through the extends.
56687 N00 = N00.getOperand(0);
56688 N01 = N01.getOperand(0);
56689 N10 = N10.getOperand(0);
56690 N11 = N11.getOperand(0);
56692 // Must be extending from vXi16.
56693 EVT InVT = N00.getValueType();
56694 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
56695 N10.getValueType() != InVT || N11.getValueType() != InVT)
56698 // All inputs should be build_vectors.
56699 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
56700 N01.getOpcode() != ISD::BUILD_VECTOR ||
56701 N10.getOpcode() != ISD::BUILD_VECTOR ||
56702 N11.getOpcode() != ISD::BUILD_VECTOR)
56705 // For each element, we need to ensure we have an odd element from one vector
56706 // multiplied by the odd element of another vector and the even element from
56707 // one of the same vectors being multiplied by the even element from the
56708 // other vector. So we need to make sure for each element i, this operator
56709 // is being performed:
56710 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
56712 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
56713 SDValue N00Elt = N00.getOperand(i);
56714 SDValue N01Elt = N01.getOperand(i);
56715 SDValue N10Elt = N10.getOperand(i);
56716 SDValue N11Elt = N11.getOperand(i);
56717 // TODO: Be more tolerant to undefs.
56718 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56719 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56720 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
56721 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
56723 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
56724 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
56725 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
56726 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
56727 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
56729 unsigned IdxN00 = ConstN00Elt->getZExtValue();
56730 unsigned IdxN01 = ConstN01Elt->getZExtValue();
56731 unsigned IdxN10 = ConstN10Elt->getZExtValue();
56732 unsigned IdxN11 = ConstN11Elt->getZExtValue();
56733 // Add is commutative so indices can be reordered.
56734 if (IdxN00 > IdxN10) {
56735 std::swap(IdxN00, IdxN10);
56736 std::swap(IdxN01, IdxN11);
56738 // N0 indices be the even element. N1 indices must be the next odd element.
56739 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
56740 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
56742 SDValue N00In = N00Elt.getOperand(0);
56743 SDValue N01In = N01Elt.getOperand(0);
56744 SDValue N10In = N10Elt.getOperand(0);
56745 SDValue N11In = N11Elt.getOperand(0);
56747 // First time we find an input capture it.
56752 // The input vectors must be at least as wide as the output.
56753 // If they are larger than the output, we extract subvector below.
56754 if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
56755 In1.getValueSizeInBits() < VT.getSizeInBits())
56758 // Mul is commutative so the input vectors can be in any order.
56759 // Canonicalize to make the compares easier.
56761 std::swap(N00In, N01In);
56763 std::swap(N10In, N11In);
56764 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
56768 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
56769 ArrayRef<SDValue> Ops) {
56770 EVT OpVT = Ops[0].getValueType();
56771 assert(OpVT.getScalarType() == MVT::i16 &&
56772 "Unexpected scalar element type");
56773 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
56774 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
56775 OpVT.getVectorNumElements() / 2);
56776 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
56779 // If the output is narrower than an input, extract the low part of the input
56781 EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
56782 VT.getVectorNumElements() * 2);
56783 if (OutVT16.bitsLT(In0.getValueType())) {
56784 In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
56785 DAG.getIntPtrConstant(0, DL));
56787 if (OutVT16.bitsLT(In1.getValueType())) {
56788 In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
56789 DAG.getIntPtrConstant(0, DL));
56791 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
56795 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
56796 // If upper element in each pair of both VPMADDWD are zero then we can merge
56797 // the operand elements and use the implicit add of VPMADDWD.
56798 // TODO: Add support for VPMADDUBSW (which isn't commutable).
56799 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
56800 const SDLoc &DL, EVT VT) {
56801 if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
56804 // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
56805 if (VT.getSizeInBits() > 128)
56808 unsigned NumElts = VT.getVectorNumElements();
56809 MVT OpVT = N0.getOperand(0).getSimpleValueType();
56810 APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
56811 APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
56814 DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
56815 DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
56817 DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
56818 DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
56820 // TODO: Check for zero lower elements once we have actual codegen that
56822 if (!Op0HiZero || !Op1HiZero)
56825 // Create a shuffle mask packing the lower elements from each VPMADDWD.
56826 SmallVector<int> Mask;
56827 for (int i = 0; i != (int)NumElts; ++i) {
56828 Mask.push_back(2 * i);
56829 Mask.push_back(2 * (i + NumElts));
56833 DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
56835 DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
56836 return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
56839 /// CMOV of constants requires materializing constant operands in registers.
56840 /// Try to fold those constants into an 'add' instruction to reduce instruction
56841 /// count. We do this with CMOV rather the generic 'select' because there are
56842 /// earlier folds that may be used to turn select-of-constants into logic hacks.
56843 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
56844 const X86Subtarget &Subtarget) {
56845 // If an operand is zero, add-of-0 gets simplified away, so that's clearly
56846 // better because we eliminate 1-2 instructions. This transform is still
56847 // an improvement without zero operands because we trade 2 move constants and
56848 // 1 add for 2 adds (LEA) as long as the constants can be represented as
56849 // immediate asm operands (fit in 32-bits).
56850 auto isSuitableCmov = [](SDValue V) {
56851 if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
56853 if (!isa<ConstantSDNode>(V.getOperand(0)) ||
56854 !isa<ConstantSDNode>(V.getOperand(1)))
56856 return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
56857 (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
56858 V.getConstantOperandAPInt(1).isSignedIntN(32));
56861 // Match an appropriate CMOV as the first operand of the add.
56862 SDValue Cmov = N->getOperand(0);
56863 SDValue OtherOp = N->getOperand(1);
56864 if (!isSuitableCmov(Cmov))
56865 std::swap(Cmov, OtherOp);
56866 if (!isSuitableCmov(Cmov))
56869 // Don't remove a load folding opportunity for the add. That would neutralize
56870 // any improvements from removing constant materializations.
56871 if (X86::mayFoldLoad(OtherOp, Subtarget))
56874 EVT VT = N->getValueType(0);
56876 SDValue FalseOp = Cmov.getOperand(0);
56877 SDValue TrueOp = Cmov.getOperand(1);
56879 // We will push the add through the select, but we can potentially do better
56880 // if we know there is another add in the sequence and this is pointer math.
56881 // In that case, we can absorb an add into the trailing memory op and avoid
56882 // a 3-operand LEA which is likely slower than a 2-operand LEA.
56883 // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
56884 if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
56885 !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
56886 all_of(N->uses(), [&](SDNode *Use) {
56887 auto *MemNode = dyn_cast<MemSDNode>(Use);
56888 return MemNode && MemNode->getBasePtr().getNode() == N;
56890 // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
56891 // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
56892 // it is possible that choosing op1 might be better.
56893 SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
56894 FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
56895 TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
56896 Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
56897 Cmov.getOperand(2), Cmov.getOperand(3));
56898 return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
56901 // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
56902 FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
56903 TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
56904 return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
56905 Cmov.getOperand(3));
56908 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
56909 TargetLowering::DAGCombinerInfo &DCI,
56910 const X86Subtarget &Subtarget) {
56911 EVT VT = N->getValueType(0);
56912 SDValue Op0 = N->getOperand(0);
56913 SDValue Op1 = N->getOperand(1);
56916 if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
56919 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
56921 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
56923 if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
56926 // Try to synthesize horizontal adds from adds of shuffles.
56927 if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
56930 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
56931 // (sub Y, (sext (vXi1 X))).
56932 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
56933 // generic DAG combine without a legal type check, but adding this there
56934 // caused regressions.
56935 if (VT.isVector()) {
56936 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56937 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
56938 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
56939 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
56940 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
56941 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
56944 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
56945 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
56946 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
56947 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
56948 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
56952 // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
56953 if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
56954 X86::isZeroNode(Op0.getOperand(1))) {
56955 assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
56956 return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
56957 Op0.getOperand(0), Op0.getOperand(2));
56960 return combineAddOrSubToADCOrSBB(N, DAG);
56963 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
56964 // condition comes from the subtract node that produced -X. This matches the
56965 // cmov expansion for absolute value. By swapping the operands we convert abs
56967 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
56968 SDValue N0 = N->getOperand(0);
56969 SDValue N1 = N->getOperand(1);
56971 if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
56974 X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
56975 if (CC != X86::COND_S && CC != X86::COND_NS)
56978 // Condition should come from a negate operation.
56979 SDValue Cond = N1.getOperand(3);
56980 if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
56982 assert(Cond.getResNo() == 1 && "Unexpected result number");
56984 // Get the X and -X from the negate.
56985 SDValue NegX = Cond.getValue(0);
56986 SDValue X = Cond.getOperand(1);
56988 SDValue FalseOp = N1.getOperand(0);
56989 SDValue TrueOp = N1.getOperand(1);
56991 // Cmov operands should be X and NegX. Order doesn't matter.
56992 if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
56995 // Build a new CMOV with the operands swapped.
56997 MVT VT = N->getSimpleValueType(0);
56998 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
56999 N1.getOperand(2), Cond);
57000 // Convert sub to add.
57001 return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
57004 static SDValue combineSubSetcc(SDNode *N, SelectionDAG &DAG) {
57005 SDValue Op0 = N->getOperand(0);
57006 SDValue Op1 = N->getOperand(1);
57008 // (sub C (zero_extend (setcc)))
57010 // (add (zero_extend (setcc inverted) C-1)) if C is a nonzero immediate
57011 // Don't disturb (sub 0 setcc), which is easily done with neg.
57012 EVT VT = N->getValueType(0);
57013 auto *Op0C = dyn_cast<ConstantSDNode>(Op0);
57014 if (Op1.getOpcode() == ISD::ZERO_EXTEND && Op1.hasOneUse() && Op0C &&
57015 !Op0C->isZero() && Op1.getOperand(0).getOpcode() == X86ISD::SETCC &&
57016 Op1.getOperand(0).hasOneUse()) {
57017 SDValue SetCC = Op1.getOperand(0);
57018 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
57019 X86::CondCode NewCC = X86::GetOppositeBranchCondition(CC);
57020 uint64_t NewImm = Op0C->getZExtValue() - 1;
57022 SDValue NewSetCC = getSETCC(NewCC, SetCC.getOperand(1), DL, DAG);
57023 NewSetCC = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NewSetCC);
57024 return DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(VT, VT), NewSetCC,
57025 DAG.getConstant(NewImm, DL, VT));
57031 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
57032 TargetLowering::DAGCombinerInfo &DCI,
57033 const X86Subtarget &Subtarget) {
57034 SDValue Op0 = N->getOperand(0);
57035 SDValue Op1 = N->getOperand(1);
57037 // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
57038 auto IsNonOpaqueConstant = [&](SDValue Op) {
57039 if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
57040 if (auto *Cst = dyn_cast<ConstantSDNode>(C))
57041 return !Cst->isOpaque();
57047 // X86 can't encode an immediate LHS of a sub. See if we can push the
57048 // negation into a preceding instruction. If the RHS of the sub is a XOR with
57049 // one use and a constant, invert the immediate, saving one register.
57050 // However, ignore cases where C1 is 0, as those will become a NEG.
57051 // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
57052 if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
57053 !isNullConstant(Op0) && IsNonOpaqueConstant(Op1.getOperand(1)) &&
57054 Op1->hasOneUse()) {
57056 EVT VT = Op0.getValueType();
57057 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
57058 DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
57060 DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
57061 return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
57064 if (SDValue V = combineSubABS(N, DAG))
57067 // Try to synthesize horizontal subs from subs of shuffles.
57068 if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
57071 // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
57072 if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
57073 X86::isZeroNode(Op1.getOperand(1))) {
57074 assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
57075 return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
57076 Op1.getOperand(0), Op1.getOperand(2));
57079 // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
57080 // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
57081 if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
57082 !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
57083 assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
57084 SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
57085 Op1.getOperand(1), Op1.getOperand(2));
57086 return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
57087 Op1.getOperand(0));
57090 if (SDValue V = combineXorSubCTLZ(N, DAG, Subtarget))
57093 if (SDValue V = combineAddOrSubToADCOrSBB(N, DAG))
57096 return combineSubSetcc(N, DAG);
57099 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
57100 const X86Subtarget &Subtarget) {
57101 MVT VT = N->getSimpleValueType(0);
57104 if (N->getOperand(0) == N->getOperand(1)) {
57105 if (N->getOpcode() == X86ISD::PCMPEQ)
57106 return DAG.getConstant(-1, DL, VT);
57107 if (N->getOpcode() == X86ISD::PCMPGT)
57108 return DAG.getConstant(0, DL, VT);
57114 /// Helper that combines an array of subvector ops as if they were the operands
57115 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
57116 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
57117 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
57118 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
57119 TargetLowering::DAGCombinerInfo &DCI,
57120 const X86Subtarget &Subtarget) {
57121 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
57122 unsigned EltSizeInBits = VT.getScalarSizeInBits();
57124 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
57125 return DAG.getUNDEF(VT);
57127 if (llvm::all_of(Ops, [](SDValue Op) {
57128 return ISD::isBuildVectorAllZeros(Op.getNode());
57130 return getZeroVector(VT, Subtarget, DAG, DL);
57132 SDValue Op0 = Ops[0];
57133 bool IsSplat = llvm::all_equal(Ops);
57135 // Repeated subvectors.
57137 (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
57138 // If this broadcast is inserted into both halves, use a larger broadcast.
57139 if (Op0.getOpcode() == X86ISD::VBROADCAST)
57140 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
57142 // If this simple subvector or scalar/subvector broadcast_load is inserted
57143 // into both halves, use a larger broadcast_load. Update other uses to use
57144 // an extracted subvector.
57145 if (ISD::isNormalLoad(Op0.getNode()) ||
57146 Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
57147 Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
57148 auto *Mem = cast<MemSDNode>(Op0);
57149 unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
57150 ? X86ISD::VBROADCAST_LOAD
57151 : X86ISD::SUBV_BROADCAST_LOAD;
57152 if (SDValue BcastLd =
57153 getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
57155 extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
57156 DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
57161 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
57162 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
57163 (Subtarget.hasAVX2() ||
57164 X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
57165 VT.getScalarType(), Subtarget)))
57166 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
57167 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
57169 DAG.getIntPtrConstant(0, DL)));
57171 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
57172 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
57173 (Subtarget.hasAVX2() ||
57174 (EltSizeInBits >= 32 &&
57175 X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
57176 Op0.getOperand(0).getValueType() == VT.getScalarType())
57177 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
57179 // concat_vectors(extract_subvector(broadcast(x)),
57180 // extract_subvector(broadcast(x))) -> broadcast(x)
57181 if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
57182 Op0.getOperand(0).getValueType() == VT) {
57183 if (Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST ||
57184 Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST_LOAD)
57185 return Op0.getOperand(0);
57189 // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
57190 // Only concat of subvector high halves which vperm2x128 is best at.
57191 // TODO: This should go in combineX86ShufflesRecursively eventually.
57192 if (VT.is256BitVector() && Ops.size() == 2) {
57193 SDValue Src0 = peekThroughBitcasts(Ops[0]);
57194 SDValue Src1 = peekThroughBitcasts(Ops[1]);
57195 if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
57196 Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
57197 EVT SrcVT0 = Src0.getOperand(0).getValueType();
57198 EVT SrcVT1 = Src1.getOperand(0).getValueType();
57199 unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
57200 unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
57201 if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
57202 Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
57203 Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
57204 return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
57205 DAG.getBitcast(VT, Src0.getOperand(0)),
57206 DAG.getBitcast(VT, Src1.getOperand(0)),
57207 DAG.getTargetConstant(0x31, DL, MVT::i8));
57212 // Repeated opcode.
57213 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
57214 // but it currently struggles with different vector widths.
57215 if (llvm::all_of(Ops, [Op0](SDValue Op) {
57216 return Op.getOpcode() == Op0.getOpcode();
57218 auto ConcatSubOperand = [&](EVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
57219 SmallVector<SDValue> Subs;
57220 for (SDValue SubOp : SubOps)
57221 Subs.push_back(SubOp.getOperand(I));
57222 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
57224 auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
57225 for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
57226 SDValue Sub = SubOps[I].getOperand(Op);
57227 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
57228 if (Sub.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
57229 Sub.getOperand(0).getValueType() != VT ||
57230 Sub.getConstantOperandAPInt(1) != (I * NumSubElts))
57236 unsigned NumOps = Ops.size();
57237 switch (Op0.getOpcode()) {
57238 case X86ISD::VBROADCAST: {
57239 if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
57240 return Op.getOperand(0).getValueType().is128BitVector();
57242 if (VT == MVT::v4f64 || VT == MVT::v4i64)
57243 return DAG.getNode(X86ISD::UNPCKL, DL, VT,
57244 ConcatSubOperand(VT, Ops, 0),
57245 ConcatSubOperand(VT, Ops, 0));
57246 // TODO: Add pseudo v8i32 PSHUFD handling to AVX1Only targets.
57247 if (VT == MVT::v8f32 || (VT == MVT::v8i32 && Subtarget.hasInt256()))
57248 return DAG.getNode(VT == MVT::v8f32 ? X86ISD::VPERMILPI
57250 DL, VT, ConcatSubOperand(VT, Ops, 0),
57251 getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
57255 case X86ISD::MOVDDUP:
57256 case X86ISD::MOVSHDUP:
57257 case X86ISD::MOVSLDUP: {
57259 return DAG.getNode(Op0.getOpcode(), DL, VT,
57260 ConcatSubOperand(VT, Ops, 0));
57263 case X86ISD::SHUFP: {
57264 // Add SHUFPD support if/when necessary.
57265 if (!IsSplat && VT.getScalarType() == MVT::f32 &&
57266 llvm::all_of(Ops, [Op0](SDValue Op) {
57267 return Op.getOperand(2) == Op0.getOperand(2);
57269 return DAG.getNode(Op0.getOpcode(), DL, VT,
57270 ConcatSubOperand(VT, Ops, 0),
57271 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
57275 case X86ISD::UNPCKH:
57276 case X86ISD::UNPCKL: {
57277 // Don't concatenate build_vector patterns.
57278 if (!IsSplat && VT.getScalarSizeInBits() >= 32 &&
57279 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
57280 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
57281 none_of(Ops, [](SDValue Op) {
57282 return peekThroughBitcasts(Op.getOperand(0)).getOpcode() ==
57283 ISD::SCALAR_TO_VECTOR ||
57284 peekThroughBitcasts(Op.getOperand(1)).getOpcode() ==
57285 ISD::SCALAR_TO_VECTOR;
57287 return DAG.getNode(Op0.getOpcode(), DL, VT,
57288 ConcatSubOperand(VT, Ops, 0),
57289 ConcatSubOperand(VT, Ops, 1));
57293 case X86ISD::PSHUFHW:
57294 case X86ISD::PSHUFLW:
57295 case X86ISD::PSHUFD:
57296 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
57297 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
57298 return DAG.getNode(Op0.getOpcode(), DL, VT,
57299 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
57302 case X86ISD::VPERMILPI:
57303 if (!IsSplat && VT.getScalarSizeInBits() == 32 &&
57304 (VT.is256BitVector() ||
57305 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
57306 all_of(Ops, [&Op0](SDValue Op) {
57307 return Op0.getOperand(1) == Op.getOperand(1);
57309 MVT FloatVT = VT.changeVectorElementType(MVT::f32);
57310 SDValue Res = DAG.getBitcast(FloatVT, ConcatSubOperand(VT, Ops, 0));
57312 DAG.getNode(X86ISD::VPERMILPI, DL, FloatVT, Res, Op0.getOperand(1));
57313 return DAG.getBitcast(VT, Res);
57315 if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
57316 uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
57317 uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
57318 uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
57319 return DAG.getNode(Op0.getOpcode(), DL, VT,
57320 ConcatSubOperand(VT, Ops, 0),
57321 DAG.getTargetConstant(Idx, DL, MVT::i8));
57324 case X86ISD::PSHUFB:
57325 case X86ISD::PSADBW:
57326 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
57327 (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
57328 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
57329 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
57330 NumOps * SrcVT.getVectorNumElements());
57331 return DAG.getNode(Op0.getOpcode(), DL, VT,
57332 ConcatSubOperand(SrcVT, Ops, 0),
57333 ConcatSubOperand(SrcVT, Ops, 1));
57336 case X86ISD::VPERMV:
57337 if (!IsSplat && NumOps == 2 &&
57338 (VT.is512BitVector() && Subtarget.useAVX512Regs())) {
57339 MVT OpVT = Op0.getSimpleValueType();
57340 int NumSrcElts = OpVT.getVectorNumElements();
57341 SmallVector<int, 64> ConcatMask;
57342 for (unsigned i = 0; i != NumOps; ++i) {
57343 SmallVector<int, 64> SubMask;
57344 SmallVector<SDValue, 2> SubOps;
57345 if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
57348 for (int M : SubMask) {
57350 M += i * NumSrcElts;
57351 ConcatMask.push_back(M);
57354 if (ConcatMask.size() == (NumOps * NumSrcElts)) {
57355 SDValue Src = concatSubVectors(Ops[0].getOperand(1),
57356 Ops[1].getOperand(1), DAG, DL);
57357 MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
57358 MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
57359 SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
57360 return DAG.getNode(X86ISD::VPERMV, DL, VT, Mask, Src);
57364 case X86ISD::VPERMV3:
57365 if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
57366 MVT OpVT = Op0.getSimpleValueType();
57367 int NumSrcElts = OpVT.getVectorNumElements();
57368 SmallVector<int, 64> ConcatMask;
57369 for (unsigned i = 0; i != NumOps; ++i) {
57370 SmallVector<int, 64> SubMask;
57371 SmallVector<SDValue, 2> SubOps;
57372 if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
57375 for (int M : SubMask) {
57377 M += M < NumSrcElts ? 0 : NumSrcElts;
57378 M += i * NumSrcElts;
57380 ConcatMask.push_back(M);
57383 if (ConcatMask.size() == (NumOps * NumSrcElts)) {
57384 SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
57385 Ops[1].getOperand(0), DAG, DL);
57386 SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
57387 Ops[1].getOperand(2), DAG, DL);
57388 MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
57389 MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
57390 SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
57391 return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
57395 case ISD::TRUNCATE:
57396 if (!IsSplat && NumOps == 2 && VT.is256BitVector()) {
57397 EVT SrcVT = Ops[0].getOperand(0).getValueType();
57398 if (SrcVT.is256BitVector() && SrcVT.isSimple() &&
57399 SrcVT == Ops[1].getOperand(0).getValueType() &&
57400 Subtarget.useAVX512Regs() &&
57401 Subtarget.getPreferVectorWidth() >= 512 &&
57402 (SrcVT.getScalarSizeInBits() > 16 || Subtarget.useBWIRegs())) {
57403 EVT NewSrcVT = SrcVT.getDoubleNumVectorElementsVT(*DAG.getContext());
57404 return DAG.getNode(ISD::TRUNCATE, DL, VT,
57405 ConcatSubOperand(NewSrcVT, Ops, 0));
57409 case X86ISD::VSHLI:
57410 case X86ISD::VSRLI:
57411 // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
57412 // TODO: Move this to LowerShiftByScalarImmediate?
57413 if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
57414 llvm::all_of(Ops, [](SDValue Op) {
57415 return Op.getConstantOperandAPInt(1) == 32;
57417 SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
57418 SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
57419 if (Op0.getOpcode() == X86ISD::VSHLI) {
57420 Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
57421 {8, 0, 8, 2, 8, 4, 8, 6});
57423 Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
57424 {1, 8, 3, 8, 5, 8, 7, 8});
57426 return DAG.getBitcast(VT, Res);
57429 case X86ISD::VSRAI:
57433 if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
57434 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
57435 (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
57436 llvm::all_of(Ops, [Op0](SDValue Op) {
57437 return Op0.getOperand(1) == Op.getOperand(1);
57439 return DAG.getNode(Op0.getOpcode(), DL, VT,
57440 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
57443 case X86ISD::VPERMI:
57444 case X86ISD::VROTLI:
57445 case X86ISD::VROTRI:
57446 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
57447 llvm::all_of(Ops, [Op0](SDValue Op) {
57448 return Op0.getOperand(1) == Op.getOperand(1);
57450 return DAG.getNode(Op0.getOpcode(), DL, VT,
57451 ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
57457 case X86ISD::ANDNP:
57458 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
57459 (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
57460 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
57461 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
57462 NumOps * SrcVT.getVectorNumElements());
57463 return DAG.getNode(Op0.getOpcode(), DL, VT,
57464 ConcatSubOperand(SrcVT, Ops, 0),
57465 ConcatSubOperand(SrcVT, Ops, 1));
57471 case ISD::CTTZ_ZERO_UNDEF:
57472 case ISD::CTLZ_ZERO_UNDEF:
57473 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
57474 (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
57475 return DAG.getNode(Op0.getOpcode(), DL, VT,
57476 ConcatSubOperand(VT, Ops, 0));
57479 case X86ISD::GF2P8AFFINEQB:
57481 (VT.is256BitVector() ||
57482 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
57483 llvm::all_of(Ops, [Op0](SDValue Op) {
57484 return Op0.getOperand(2) == Op.getOperand(2);
57486 return DAG.getNode(Op0.getOpcode(), DL, VT,
57487 ConcatSubOperand(VT, Ops, 0),
57488 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
57494 if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
57495 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
57496 (EltSizeInBits >= 32 || Subtarget.useBWIRegs())))) {
57497 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
57498 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
57499 NumOps * SrcVT.getVectorNumElements());
57500 return DAG.getNode(Op0.getOpcode(), DL, VT,
57501 ConcatSubOperand(SrcVT, Ops, 0),
57502 ConcatSubOperand(SrcVT, Ops, 1));
57505 // Due to VADD, VSUB, VMUL can executed on more ports than VINSERT and
57506 // their latency are short, so here we don't replace them.
57508 if (!IsSplat && (VT.is256BitVector() ||
57509 (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
57510 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
57511 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
57512 NumOps * SrcVT.getVectorNumElements());
57513 return DAG.getNode(Op0.getOpcode(), DL, VT,
57514 ConcatSubOperand(SrcVT, Ops, 0),
57515 ConcatSubOperand(SrcVT, Ops, 1));
57520 case X86ISD::FHADD:
57521 case X86ISD::FHSUB:
57522 case X86ISD::PACKSS:
57523 case X86ISD::PACKUS:
57524 if (!IsSplat && VT.is256BitVector() &&
57525 (VT.isFloatingPoint() || Subtarget.hasInt256())) {
57526 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
57527 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
57528 NumOps * SrcVT.getVectorNumElements());
57529 return DAG.getNode(Op0.getOpcode(), DL, VT,
57530 ConcatSubOperand(SrcVT, Ops, 0),
57531 ConcatSubOperand(SrcVT, Ops, 1));
57534 case X86ISD::PALIGNR:
57536 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
57537 (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
57538 llvm::all_of(Ops, [Op0](SDValue Op) {
57539 return Op0.getOperand(2) == Op.getOperand(2);
57541 return DAG.getNode(Op0.getOpcode(), DL, VT,
57542 ConcatSubOperand(VT, Ops, 0),
57543 ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
57547 if (!IsSplat && Subtarget.hasAVX512() &&
57548 (VT.is256BitVector() ||
57549 (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
57550 (EltSizeInBits >= 32 || Subtarget.hasBWI())) {
57551 EVT SelVT = Ops[0].getOperand(0).getValueType();
57552 if (SelVT.getVectorElementType() == MVT::i1) {
57553 SelVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
57554 Ops.size() * SelVT.getVectorNumElements());
57555 if (DAG.getTargetLoweringInfo().isTypeLegal(SelVT))
57556 return DAG.getNode(Op0.getOpcode(), DL, VT,
57557 ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
57558 ConcatSubOperand(VT, Ops, 1),
57559 ConcatSubOperand(VT, Ops, 2));
57563 case X86ISD::BLENDV:
57564 if (!IsSplat && VT.is256BitVector() && Ops.size() == 2 &&
57565 (EltSizeInBits >= 32 || Subtarget.hasInt256()) &&
57566 IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
57567 EVT SelVT = Ops[0].getOperand(0).getValueType();
57568 SelVT = SelVT.getDoubleNumVectorElementsVT(*DAG.getContext());
57569 if (DAG.getTargetLoweringInfo().isTypeLegal(SelVT))
57570 return DAG.getNode(Op0.getOpcode(), DL, VT,
57571 ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
57572 ConcatSubOperand(VT, Ops, 1),
57573 ConcatSubOperand(VT, Ops, 2));
57579 // Fold subvector loads into one.
57580 // If needed, look through bitcasts to get to the load.
57581 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
57583 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
57584 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
57585 *FirstLd->getMemOperand(), &Fast) &&
57588 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
57593 // Attempt to fold target constant loads.
57594 if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
57595 SmallVector<APInt> EltBits;
57596 APInt UndefElts = APInt::getZero(VT.getVectorNumElements());
57597 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
57599 SmallVector<APInt> OpEltBits;
57600 if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
57601 OpEltBits, true, false))
57603 EltBits.append(OpEltBits);
57604 UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
57606 if (EltBits.size() == VT.getVectorNumElements())
57607 return getConstVector(EltBits, UndefElts, VT, DAG, DL);
57613 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
57614 TargetLowering::DAGCombinerInfo &DCI,
57615 const X86Subtarget &Subtarget) {
57616 EVT VT = N->getValueType(0);
57617 EVT SrcVT = N->getOperand(0).getValueType();
57618 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
57619 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
57621 if (VT.getVectorElementType() == MVT::i1) {
57622 // Attempt to constant fold.
57623 unsigned SubSizeInBits = SrcVT.getSizeInBits();
57624 APInt Constant = APInt::getZero(VT.getSizeInBits());
57625 for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
57626 auto *C = dyn_cast<ConstantSDNode>(peekThroughBitcasts(Ops[I]));
57628 Constant.insertBits(C->getAPIntValue(), I * SubSizeInBits);
57629 if (I == (E - 1)) {
57630 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
57631 if (TLI.isTypeLegal(IntVT))
57632 return DAG.getBitcast(VT, DAG.getConstant(Constant, SDLoc(N), IntVT));
57636 // Don't do anything else for i1 vectors.
57640 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
57641 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
57649 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
57650 TargetLowering::DAGCombinerInfo &DCI,
57651 const X86Subtarget &Subtarget) {
57652 if (DCI.isBeforeLegalizeOps())
57655 MVT OpVT = N->getSimpleValueType(0);
57657 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
57660 SDValue Vec = N->getOperand(0);
57661 SDValue SubVec = N->getOperand(1);
57663 uint64_t IdxVal = N->getConstantOperandVal(2);
57664 MVT SubVecVT = SubVec.getSimpleValueType();
57666 if (Vec.isUndef() && SubVec.isUndef())
57667 return DAG.getUNDEF(OpVT);
57669 // Inserting undefs/zeros into zeros/undefs is a zero vector.
57670 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
57671 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
57672 return getZeroVector(OpVT, Subtarget, DAG, dl);
57674 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
57675 // If we're inserting into a zero vector and then into a larger zero vector,
57676 // just insert into the larger zero vector directly.
57677 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
57678 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
57679 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
57680 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
57681 getZeroVector(OpVT, Subtarget, DAG, dl),
57682 SubVec.getOperand(1),
57683 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
57686 // If we're inserting into a zero vector and our input was extracted from an
57687 // insert into a zero vector of the same type and the extraction was at
57688 // least as large as the original insertion. Just insert the original
57689 // subvector into a zero vector.
57690 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
57691 isNullConstant(SubVec.getOperand(1)) &&
57692 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
57693 SDValue Ins = SubVec.getOperand(0);
57694 if (isNullConstant(Ins.getOperand(2)) &&
57695 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
57696 Ins.getOperand(1).getValueSizeInBits().getFixedValue() <=
57697 SubVecVT.getFixedSizeInBits())
57698 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
57699 getZeroVector(OpVT, Subtarget, DAG, dl),
57700 Ins.getOperand(1), N->getOperand(2));
57704 // Stop here if this is an i1 vector.
57708 // Eliminate an intermediate vector widening:
57709 // insert_subvector X, (insert_subvector undef, Y, 0), Idx -->
57710 // insert_subvector X, Y, Idx
57711 // TODO: This is a more general version of a DAGCombiner fold, can we move it
57713 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
57714 SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
57715 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
57716 SubVec.getOperand(1), N->getOperand(2));
57718 // If this is an insert of an extract, combine to a shuffle. Don't do this
57719 // if the insert or extract can be represented with a subregister operation.
57720 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
57721 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
57723 !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
57724 int ExtIdxVal = SubVec.getConstantOperandVal(1);
57725 if (ExtIdxVal != 0) {
57726 int VecNumElts = OpVT.getVectorNumElements();
57727 int SubVecNumElts = SubVecVT.getVectorNumElements();
57728 SmallVector<int, 64> Mask(VecNumElts);
57729 // First create an identity shuffle mask.
57730 for (int i = 0; i != VecNumElts; ++i)
57732 // Now insert the extracted portion.
57733 for (int i = 0; i != SubVecNumElts; ++i)
57734 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
57736 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
57740 // Match concat_vector style patterns.
57741 SmallVector<SDValue, 2> SubVectorOps;
57742 if (collectConcatOps(N, SubVectorOps, DAG)) {
57744 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
57747 // If we're inserting all zeros into the upper half, change this to
57748 // a concat with zero. We will match this to a move
57749 // with implicit upper bit zeroing during isel.
57750 // We do this here because we don't want combineConcatVectorOps to
57751 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
57752 if (SubVectorOps.size() == 2 &&
57753 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
57754 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
57755 getZeroVector(OpVT, Subtarget, DAG, dl),
57756 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
57759 // If this is a broadcast insert into an upper undef, use a larger broadcast.
57760 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
57761 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
57763 // If this is a broadcast load inserted into an upper undef, use a larger
57765 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
57766 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
57767 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
57768 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
57769 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
57771 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
57772 MemIntr->getMemoryVT(),
57773 MemIntr->getMemOperand());
57774 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
57778 // If we're splatting the lower half subvector of a full vector load into the
57779 // upper half, attempt to create a subvector broadcast.
57780 if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
57781 Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
57782 auto *VecLd = dyn_cast<LoadSDNode>(Vec);
57783 auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
57784 if (VecLd && SubLd &&
57785 DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
57786 SubVec.getValueSizeInBits() / 8, 0))
57787 return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
57794 /// If we are extracting a subvector of a vector select and the select condition
57795 /// is composed of concatenated vectors, try to narrow the select width. This
57796 /// is a common pattern for AVX1 integer code because 256-bit selects may be
57797 /// legal, but there is almost no integer math/logic available for 256-bit.
57798 /// This function should only be called with legal types (otherwise, the calls
57799 /// to get simple value types will assert).
57800 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
57801 SDValue Sel = Ext->getOperand(0);
57802 if (Sel.getOpcode() != ISD::VSELECT ||
57803 !isFreeToSplitVector(Sel.getOperand(0).getNode(), DAG))
57806 // Note: We assume simple value types because this should only be called with
57807 // legal operations/types.
57808 // TODO: This can be extended to handle extraction to 256-bits.
57809 MVT VT = Ext->getSimpleValueType(0);
57810 if (!VT.is128BitVector())
57813 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
57814 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
57817 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
57818 MVT SelVT = Sel.getSimpleValueType();
57819 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
57820 "Unexpected vector type with legal operations");
57822 unsigned SelElts = SelVT.getVectorNumElements();
57823 unsigned CastedElts = WideVT.getVectorNumElements();
57824 unsigned ExtIdx = Ext->getConstantOperandVal(1);
57825 if (SelElts % CastedElts == 0) {
57826 // The select has the same or more (narrower) elements than the extract
57827 // operand. The extraction index gets scaled by that factor.
57828 ExtIdx *= (SelElts / CastedElts);
57829 } else if (CastedElts % SelElts == 0) {
57830 // The select has less (wider) elements than the extract operand. Make sure
57831 // that the extraction index can be divided evenly.
57832 unsigned IndexDivisor = CastedElts / SelElts;
57833 if (ExtIdx % IndexDivisor != 0)
57835 ExtIdx /= IndexDivisor;
57837 llvm_unreachable("Element count of simple vector types are not divisible?");
57840 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
57841 unsigned NarrowElts = SelElts / NarrowingFactor;
57842 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
57844 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
57845 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
57846 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
57847 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
57848 return DAG.getBitcast(VT, NarrowSel);
57851 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
57852 TargetLowering::DAGCombinerInfo &DCI,
57853 const X86Subtarget &Subtarget) {
57854 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
57855 // eventually get combined/lowered into ANDNP) with a concatenated operand,
57856 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
57857 // We let generic combining take over from there to simplify the
57858 // insert/extract and 'not'.
57859 // This pattern emerges during AVX1 legalization. We handle it before lowering
57860 // to avoid complications like splitting constant vector loads.
57862 // Capture the original wide type in the likely case that we need to bitcast
57863 // back to this type.
57864 if (!N->getValueType(0).isSimple())
57867 MVT VT = N->getSimpleValueType(0);
57868 SDValue InVec = N->getOperand(0);
57869 unsigned IdxVal = N->getConstantOperandVal(1);
57870 SDValue InVecBC = peekThroughBitcasts(InVec);
57871 EVT InVecVT = InVec.getValueType();
57872 unsigned SizeInBits = VT.getSizeInBits();
57873 unsigned InSizeInBits = InVecVT.getSizeInBits();
57874 unsigned NumSubElts = VT.getVectorNumElements();
57875 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
57877 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
57878 TLI.isTypeLegal(InVecVT) &&
57879 InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
57880 auto isConcatenatedNot = [](SDValue V) {
57881 V = peekThroughBitcasts(V);
57882 if (!isBitwiseNot(V))
57884 SDValue NotOp = V->getOperand(0);
57885 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
57887 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
57888 isConcatenatedNot(InVecBC.getOperand(1))) {
57889 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
57890 SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
57891 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
57892 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
57896 if (DCI.isBeforeLegalizeOps())
57899 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
57902 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
57903 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
57905 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
57906 if (VT.getScalarType() == MVT::i1)
57907 return DAG.getConstant(1, SDLoc(N), VT);
57908 return getOnesVector(VT, DAG, SDLoc(N));
57911 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
57912 return DAG.getBuildVector(VT, SDLoc(N),
57913 InVec->ops().slice(IdxVal, NumSubElts));
57915 // If we are extracting from an insert into a larger vector, replace with a
57916 // smaller insert if we don't access less than the original subvector. Don't
57917 // do this for i1 vectors.
57918 // TODO: Relax the matching indices requirement?
57919 if (VT.getVectorElementType() != MVT::i1 &&
57920 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
57921 IdxVal == InVec.getConstantOperandVal(2) &&
57922 InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
57924 SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
57925 InVec.getOperand(0), N->getOperand(1));
57926 unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
57927 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
57928 InVec.getOperand(1),
57929 DAG.getVectorIdxConstant(NewIdxVal, DL));
57932 // If we're extracting an upper subvector from a broadcast we should just
57933 // extract the lowest subvector instead which should allow
57934 // SimplifyDemandedVectorElts do more simplifications.
57935 if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
57936 InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
57937 DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
57938 return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
57940 // If we're extracting a broadcasted subvector, just use the lowest subvector.
57941 if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
57942 cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
57943 return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
57945 // Attempt to extract from the source of a shuffle vector.
57946 if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
57947 SmallVector<int, 32> ShuffleMask;
57948 SmallVector<int, 32> ScaledMask;
57949 SmallVector<SDValue, 2> ShuffleInputs;
57950 unsigned NumSubVecs = InSizeInBits / SizeInBits;
57951 // Decode the shuffle mask and scale it so its shuffling subvectors.
57952 if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
57953 scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
57954 unsigned SubVecIdx = IdxVal / NumSubElts;
57955 if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
57956 return DAG.getUNDEF(VT);
57957 if (ScaledMask[SubVecIdx] == SM_SentinelZero)
57958 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
57959 SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
57960 if (Src.getValueSizeInBits() == InSizeInBits) {
57961 unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
57962 unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
57963 return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
57964 SDLoc(N), SizeInBits);
57969 // If we're extracting the lowest subvector and we're the only user,
57970 // we may be able to perform this with a smaller vector width.
57971 unsigned InOpcode = InVec.getOpcode();
57972 if (InVec.hasOneUse()) {
57973 if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
57974 // v2f64 CVTDQ2PD(v4i32).
57975 if (InOpcode == ISD::SINT_TO_FP &&
57976 InVec.getOperand(0).getValueType() == MVT::v4i32) {
57977 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
57979 // v2f64 CVTUDQ2PD(v4i32).
57980 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
57981 InVec.getOperand(0).getValueType() == MVT::v4i32) {
57982 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
57984 // v2f64 CVTPS2PD(v4f32).
57985 if (InOpcode == ISD::FP_EXTEND &&
57986 InVec.getOperand(0).getValueType() == MVT::v4f32) {
57987 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
57991 (ISD::isExtOpcode(InOpcode) || ISD::isExtVecInRegOpcode(InOpcode)) &&
57992 (SizeInBits == 128 || SizeInBits == 256) &&
57993 InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
57995 SDValue Ext = InVec.getOperand(0);
57996 if (Ext.getValueSizeInBits() > SizeInBits)
57997 Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
57998 unsigned ExtOp = DAG.getOpcode_EXTEND_VECTOR_INREG(InOpcode);
57999 return DAG.getNode(ExtOp, DL, VT, Ext);
58001 if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
58002 InVec.getOperand(0).getValueType().is256BitVector() &&
58003 InVec.getOperand(1).getValueType().is256BitVector() &&
58004 InVec.getOperand(2).getValueType().is256BitVector()) {
58006 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
58007 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
58008 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
58009 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
58011 if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
58012 (VT.is128BitVector() || VT.is256BitVector())) {
58014 SDValue InVecSrc = InVec.getOperand(0);
58015 unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
58016 SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
58017 return DAG.getNode(InOpcode, DL, VT, Ext);
58019 if (InOpcode == X86ISD::MOVDDUP &&
58020 (VT.is128BitVector() || VT.is256BitVector())) {
58023 extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
58024 return DAG.getNode(InOpcode, DL, VT, Ext0);
58028 // Always split vXi64 logical shifts where we're extracting the upper 32-bits
58029 // as this is very likely to fold into a shuffle/truncation.
58030 if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
58031 InVecVT.getScalarSizeInBits() == 64 &&
58032 InVec.getConstantOperandAPInt(1) == 32) {
58035 extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
58036 return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
58042 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
58043 EVT VT = N->getValueType(0);
58044 SDValue Src = N->getOperand(0);
58047 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
58048 // This occurs frequently in our masked scalar intrinsic code and our
58049 // floating point select lowering with AVX512.
58050 // TODO: SimplifyDemandedBits instead?
58051 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse() &&
58052 isOneConstant(Src.getOperand(1)))
58053 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Src.getOperand(0));
58055 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
58056 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
58057 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
58058 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
58059 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
58061 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
58062 Src.getOperand(1));
58064 // Reduce v2i64 to v4i32 if we don't need the upper bits or are known zero.
58065 // TODO: Move to DAGCombine/SimplifyDemandedBits?
58066 if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Src.hasOneUse()) {
58067 auto IsExt64 = [&DAG](SDValue Op, bool IsZeroExt) {
58068 if (Op.getValueType() != MVT::i64)
58070 unsigned Opc = IsZeroExt ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND;
58071 if (Op.getOpcode() == Opc &&
58072 Op.getOperand(0).getScalarValueSizeInBits() <= 32)
58073 return Op.getOperand(0);
58074 unsigned Ext = IsZeroExt ? ISD::ZEXTLOAD : ISD::EXTLOAD;
58075 if (auto *Ld = dyn_cast<LoadSDNode>(Op))
58076 if (Ld->getExtensionType() == Ext &&
58077 Ld->getMemoryVT().getScalarSizeInBits() <= 32)
58080 KnownBits Known = DAG.computeKnownBits(Op);
58081 if (!Known.isConstant() && Known.countMinLeadingZeros() >= 32)
58087 if (SDValue AnyExt = IsExt64(peekThroughOneUseBitcasts(Src), false))
58088 return DAG.getBitcast(
58089 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
58090 DAG.getAnyExtOrTrunc(AnyExt, DL, MVT::i32)));
58092 if (SDValue ZeroExt = IsExt64(peekThroughOneUseBitcasts(Src), true))
58093 return DAG.getBitcast(
58095 DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v4i32,
58096 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
58097 DAG.getZExtOrTrunc(ZeroExt, DL, MVT::i32))));
58100 // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
58101 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
58102 Src.getOperand(0).getValueType() == MVT::x86mmx)
58103 return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
58105 // See if we're broadcasting the scalar value, in which case just reuse that.
58106 // Ensure the same SDValue from the SDNode use is being used.
58107 if (VT.getScalarType() == Src.getValueType())
58108 for (SDNode *User : Src->uses())
58109 if (User->getOpcode() == X86ISD::VBROADCAST &&
58110 Src == User->getOperand(0)) {
58111 unsigned SizeInBits = VT.getFixedSizeInBits();
58112 unsigned BroadcastSizeInBits =
58113 User->getValueSizeInBits(0).getFixedValue();
58114 if (BroadcastSizeInBits == SizeInBits)
58115 return SDValue(User, 0);
58116 if (BroadcastSizeInBits > SizeInBits)
58117 return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
58118 // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
58125 // Simplify PMULDQ and PMULUDQ operations.
58126 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
58127 TargetLowering::DAGCombinerInfo &DCI,
58128 const X86Subtarget &Subtarget) {
58129 SDValue LHS = N->getOperand(0);
58130 SDValue RHS = N->getOperand(1);
58132 // Canonicalize constant to RHS.
58133 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
58134 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
58135 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
58137 // Multiply by zero.
58138 // Don't return RHS as it may contain UNDEFs.
58139 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
58140 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
58142 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
58143 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
58144 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
58145 return SDValue(N, 0);
58147 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
58148 // convert it to any_extend_invec, due to the LegalOperations check, do the
58149 // conversion directly to a vector shuffle manually. This exposes combine
58150 // opportunities missed by combineEXTEND_VECTOR_INREG not calling
58151 // combineX86ShufflesRecursively on SSE4.1 targets.
58152 // FIXME: This is basically a hack around several other issues related to
58153 // ANY_EXTEND_VECTOR_INREG.
58154 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
58155 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
58156 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
58157 LHS.getOperand(0).getValueType() == MVT::v4i32) {
58159 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
58160 LHS.getOperand(0), { 0, -1, 1, -1 });
58161 LHS = DAG.getBitcast(MVT::v2i64, LHS);
58162 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
58164 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
58165 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
58166 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
58167 RHS.getOperand(0).getValueType() == MVT::v4i32) {
58169 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
58170 RHS.getOperand(0), { 0, -1, 1, -1 });
58171 RHS = DAG.getBitcast(MVT::v2i64, RHS);
58172 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
58178 // Simplify VPMADDUBSW/VPMADDWD operations.
58179 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
58180 TargetLowering::DAGCombinerInfo &DCI) {
58181 EVT VT = N->getValueType(0);
58182 SDValue LHS = N->getOperand(0);
58183 SDValue RHS = N->getOperand(1);
58185 // Multiply by zero.
58186 // Don't return LHS/RHS as it may contain UNDEFs.
58187 if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
58188 ISD::isBuildVectorAllZeros(RHS.getNode()))
58189 return DAG.getConstant(0, SDLoc(N), VT);
58191 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
58192 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
58193 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
58194 return SDValue(N, 0);
58199 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
58200 TargetLowering::DAGCombinerInfo &DCI,
58201 const X86Subtarget &Subtarget) {
58202 EVT VT = N->getValueType(0);
58203 SDValue In = N->getOperand(0);
58204 unsigned Opcode = N->getOpcode();
58205 unsigned InOpcode = In.getOpcode();
58206 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
58209 // Try to merge vector loads and extend_inreg to an extload.
58210 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
58212 auto *Ld = cast<LoadSDNode>(In);
58213 if (Ld->isSimple()) {
58214 MVT SVT = In.getSimpleValueType().getVectorElementType();
58215 ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
58218 EVT MemVT = VT.changeVectorElementType(SVT);
58219 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
58220 SDValue Load = DAG.getExtLoad(
58221 Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
58222 MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
58223 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
58229 // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
58230 if (Opcode == InOpcode)
58231 return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
58233 // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
58234 // -> EXTEND_VECTOR_INREG(X).
58235 // TODO: Handle non-zero subvector indices.
58236 if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
58237 In.getOperand(0).getOpcode() == DAG.getOpcode_EXTEND(Opcode) &&
58238 In.getOperand(0).getOperand(0).getValueSizeInBits() ==
58239 In.getValueSizeInBits())
58240 return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
58242 // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
58243 // TODO: Move to DAGCombine?
58244 if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
58245 In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
58246 In.getValueSizeInBits() == VT.getSizeInBits()) {
58247 unsigned NumElts = VT.getVectorNumElements();
58248 unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
58249 EVT EltVT = In.getOperand(0).getValueType();
58250 SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
58251 for (unsigned I = 0; I != NumElts; ++I)
58252 Elts[I * Scale] = In.getOperand(I);
58253 return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
58256 // Attempt to combine as a shuffle on SSE41+ targets.
58257 if (Subtarget.hasSSE41()) {
58259 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
58260 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
58267 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
58268 TargetLowering::DAGCombinerInfo &DCI) {
58269 EVT VT = N->getValueType(0);
58271 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
58272 return DAG.getConstant(0, SDLoc(N), VT);
58274 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
58275 APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
58276 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
58277 return SDValue(N, 0);
58282 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
58283 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
58284 // extra instructions between the conversion due to going to scalar and back.
58285 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
58286 const X86Subtarget &Subtarget) {
58287 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
58290 if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
58293 if (N->getValueType(0) != MVT::f32 ||
58294 N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
58298 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
58299 N->getOperand(0).getOperand(0));
58300 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
58301 DAG.getTargetConstant(4, dl, MVT::i32));
58302 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
58303 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
58304 DAG.getIntPtrConstant(0, dl));
58307 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
58308 const X86Subtarget &Subtarget) {
58309 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
58312 if (Subtarget.hasFP16())
58315 bool IsStrict = N->isStrictFPOpcode();
58316 EVT VT = N->getValueType(0);
58317 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
58318 EVT SrcVT = Src.getValueType();
58320 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
58323 if (VT.getVectorElementType() != MVT::f32 &&
58324 VT.getVectorElementType() != MVT::f64)
58327 unsigned NumElts = VT.getVectorNumElements();
58328 if (NumElts == 1 || !isPowerOf2_32(NumElts))
58333 // Convert the input to vXi16.
58334 EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
58335 Src = DAG.getBitcast(IntVT, Src);
58337 // Widen to at least 8 input elements.
58339 unsigned NumConcats = 8 / NumElts;
58340 SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
58341 : DAG.getConstant(0, dl, IntVT);
58342 SmallVector<SDValue, 4> Ops(NumConcats, Fill);
58344 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
58347 // Destination is vXf32 with at least 4 elements.
58348 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
58349 std::max(4U, NumElts));
58350 SDValue Cvt, Chain;
58352 Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
58353 {N->getOperand(0), Src});
58354 Chain = Cvt.getValue(1);
58356 Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
58360 assert(NumElts == 2 && "Unexpected size");
58361 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
58362 DAG.getIntPtrConstant(0, dl));
58366 // Extend to the original VT if necessary.
58367 if (Cvt.getValueType() != VT) {
58368 Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
58370 Chain = Cvt.getValue(1);
58372 return DAG.getMergeValues({Cvt, Chain}, dl);
58375 // Extend to the original VT if necessary.
58376 return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
58379 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
58380 // from. Limit this to cases where the loads have the same input chain and the
58381 // output chains are unused. This avoids any memory ordering issues.
58382 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
58383 TargetLowering::DAGCombinerInfo &DCI) {
58384 assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
58385 N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
58386 "Unknown broadcast load type");
58388 // Only do this if the chain result is unused.
58389 if (N->hasAnyUseOfValue(1))
58392 auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
58394 SDValue Ptr = MemIntrin->getBasePtr();
58395 SDValue Chain = MemIntrin->getChain();
58396 EVT VT = N->getSimpleValueType(0);
58397 EVT MemVT = MemIntrin->getMemoryVT();
58399 // Look at other users of our base pointer and try to find a wider broadcast.
58400 // The input chain and the size of the memory VT must match.
58401 for (SDNode *User : Ptr->uses())
58402 if (User != N && User->getOpcode() == N->getOpcode() &&
58403 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
58404 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
58405 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
58406 MemVT.getSizeInBits() &&
58407 !User->hasAnyUseOfValue(1) &&
58408 User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) {
58409 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
58410 VT.getSizeInBits());
58411 Extract = DAG.getBitcast(VT, Extract);
58412 return DCI.CombineTo(N, Extract, SDValue(User, 1));
58418 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
58419 const X86Subtarget &Subtarget) {
58420 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
58423 bool IsStrict = N->isStrictFPOpcode();
58424 EVT VT = N->getValueType(0);
58425 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
58426 EVT SrcVT = Src.getValueType();
58428 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
58429 SrcVT.getVectorElementType() != MVT::f32)
58434 SDValue Cvt, Chain;
58435 unsigned NumElts = VT.getVectorNumElements();
58436 if (Subtarget.hasFP16()) {
58437 // Combine (v8f16 fp_round(concat_vectors(v4f32 (xint_to_fp v4i64), ..)))
58438 // into (v8f16 vector_shuffle(v8f16 (CVTXI2P v4i64), ..))
58439 if (NumElts == 8 && Src.getOpcode() == ISD::CONCAT_VECTORS) {
58440 SDValue Cvt0, Cvt1;
58441 SDValue Op0 = Src.getOperand(0);
58442 SDValue Op1 = Src.getOperand(1);
58443 bool IsOp0Strict = Op0->isStrictFPOpcode();
58444 if (Op0.getOpcode() != Op1.getOpcode() ||
58445 Op0.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64 ||
58446 Op1.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64) {
58449 int Mask[8] = {0, 1, 2, 3, 8, 9, 10, 11};
58451 assert(IsOp0Strict && "Op0 must be strict node");
58452 unsigned Opc = Op0.getOpcode() == ISD::STRICT_SINT_TO_FP
58453 ? X86ISD::STRICT_CVTSI2P
58454 : X86ISD::STRICT_CVTUI2P;
58455 Cvt0 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
58456 {Op0.getOperand(0), Op0.getOperand(1)});
58457 Cvt1 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
58458 {Op1.getOperand(0), Op1.getOperand(1)});
58459 Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
58460 return DAG.getMergeValues({Cvt, Cvt0.getValue(1)}, dl);
58462 unsigned Opc = Op0.getOpcode() == ISD::SINT_TO_FP ? X86ISD::CVTSI2P
58464 Cvt0 = DAG.getNode(Opc, dl, MVT::v8f16, Op0.getOperand(0));
58465 Cvt1 = DAG.getNode(Opc, dl, MVT::v8f16, Op1.getOperand(0));
58466 return Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
58471 if (NumElts == 1 || !isPowerOf2_32(NumElts))
58474 // Widen to at least 4 input elements.
58476 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
58477 DAG.getConstantFP(0.0, dl, SrcVT));
58479 // Destination is v8i16 with at least 8 elements.
58481 EVT::getVectorVT(*DAG.getContext(), MVT::i16, std::max(8U, NumElts));
58482 SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
58484 Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
58485 {N->getOperand(0), Src, Rnd});
58486 Chain = Cvt.getValue(1);
58488 Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
58491 // Extract down to real number of elements.
58493 EVT IntVT = VT.changeVectorElementTypeToInteger();
58494 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
58495 DAG.getIntPtrConstant(0, dl));
58498 Cvt = DAG.getBitcast(VT, Cvt);
58501 return DAG.getMergeValues({Cvt, Chain}, dl);
58506 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
58507 SDValue Src = N->getOperand(0);
58509 // Turn MOVDQ2Q+simple_load into an mmx load.
58510 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
58511 LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
58513 if (LN->isSimple()) {
58514 SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
58516 LN->getPointerInfo(),
58517 LN->getOriginalAlign(),
58518 LN->getMemOperand()->getFlags());
58519 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
58527 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
58528 TargetLowering::DAGCombinerInfo &DCI) {
58529 unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
58530 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
58531 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
58532 return SDValue(N, 0);
58537 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
58538 DAGCombinerInfo &DCI) const {
58539 SelectionDAG &DAG = DCI.DAG;
58540 switch (N->getOpcode()) {
58542 case ISD::SCALAR_TO_VECTOR:
58543 return combineScalarToVector(N, DAG);
58544 case ISD::EXTRACT_VECTOR_ELT:
58545 case X86ISD::PEXTRW:
58546 case X86ISD::PEXTRB:
58547 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
58548 case ISD::CONCAT_VECTORS:
58549 return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
58550 case ISD::INSERT_SUBVECTOR:
58551 return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
58552 case ISD::EXTRACT_SUBVECTOR:
58553 return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
58556 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
58557 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
58558 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
58559 case X86ISD::CMP: return combineCMP(N, DAG);
58560 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
58561 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
58563 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
58564 case X86ISD::SBB: return combineSBB(N, DAG);
58565 case X86ISD::ADC: return combineADC(N, DAG, DCI);
58566 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
58567 case ISD::SHL: return combineShiftLeft(N, DAG);
58568 case ISD::SRA: return combineShiftRightArithmetic(N, DAG, Subtarget);
58569 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI, Subtarget);
58570 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
58571 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
58572 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
58573 case X86ISD::BEXTR:
58574 case X86ISD::BEXTRI: return combineBEXTR(N, DAG, DCI, Subtarget);
58575 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
58576 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
58577 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
58578 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
58579 case X86ISD::VEXTRACT_STORE:
58580 return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
58581 case ISD::SINT_TO_FP:
58582 case ISD::STRICT_SINT_TO_FP:
58583 return combineSIntToFP(N, DAG, DCI, Subtarget);
58584 case ISD::UINT_TO_FP:
58585 case ISD::STRICT_UINT_TO_FP:
58586 return combineUIntToFP(N, DAG, Subtarget);
58588 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
58589 case X86ISD::VFCMULC:
58590 case X86ISD::VFMULC: return combineFMulcFCMulc(N, DAG, Subtarget);
58591 case ISD::FNEG: return combineFneg(N, DAG, DCI, Subtarget);
58592 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
58593 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG, DCI);
58594 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
58595 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
58596 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
58598 case X86ISD::FOR: return combineFOr(N, DAG, DCI, Subtarget);
58600 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
58602 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
58603 case X86ISD::CVTSI2P:
58604 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
58605 case X86ISD::CVTP2SI:
58606 case X86ISD::CVTP2UI:
58607 case X86ISD::STRICT_CVTTP2SI:
58608 case X86ISD::CVTTP2SI:
58609 case X86ISD::STRICT_CVTTP2UI:
58610 case X86ISD::CVTTP2UI:
58611 return combineCVTP2I_CVTTP2I(N, DAG, DCI);
58612 case X86ISD::STRICT_CVTPH2PS:
58613 case X86ISD::CVTPH2PS: return combineCVTPH2PS(N, DAG, DCI);
58614 case X86ISD::BT: return combineBT(N, DAG, DCI);
58615 case ISD::ANY_EXTEND:
58616 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
58617 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
58618 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
58619 case ISD::ANY_EXTEND_VECTOR_INREG:
58620 case ISD::SIGN_EXTEND_VECTOR_INREG:
58621 case ISD::ZERO_EXTEND_VECTOR_INREG:
58622 return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
58623 case ISD::SETCC: return combineSetCC(N, DAG, DCI, Subtarget);
58624 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
58625 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
58626 case X86ISD::PACKSS:
58627 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
58630 case X86ISD::FHADD:
58631 case X86ISD::FHSUB: return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
58635 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
58636 case X86ISD::VSHLI:
58637 case X86ISD::VSRAI:
58638 case X86ISD::VSRLI:
58639 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
58640 case ISD::INSERT_VECTOR_ELT:
58641 case X86ISD::PINSRB:
58642 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
58643 case X86ISD::SHUFP: // Handle all target specific shuffles
58644 case X86ISD::INSERTPS:
58645 case X86ISD::EXTRQI:
58646 case X86ISD::INSERTQI:
58647 case X86ISD::VALIGN:
58648 case X86ISD::PALIGNR:
58649 case X86ISD::VSHLDQ:
58650 case X86ISD::VSRLDQ:
58651 case X86ISD::BLENDI:
58652 case X86ISD::UNPCKH:
58653 case X86ISD::UNPCKL:
58654 case X86ISD::MOVHLPS:
58655 case X86ISD::MOVLHPS:
58656 case X86ISD::PSHUFB:
58657 case X86ISD::PSHUFD:
58658 case X86ISD::PSHUFHW:
58659 case X86ISD::PSHUFLW:
58660 case X86ISD::MOVSHDUP:
58661 case X86ISD::MOVSLDUP:
58662 case X86ISD::MOVDDUP:
58663 case X86ISD::MOVSS:
58664 case X86ISD::MOVSD:
58665 case X86ISD::MOVSH:
58666 case X86ISD::VBROADCAST:
58667 case X86ISD::VPPERM:
58668 case X86ISD::VPERMI:
58669 case X86ISD::VPERMV:
58670 case X86ISD::VPERMV3:
58671 case X86ISD::VPERMIL2:
58672 case X86ISD::VPERMILPI:
58673 case X86ISD::VPERMILPV:
58674 case X86ISD::VPERM2X128:
58675 case X86ISD::SHUF128:
58676 case X86ISD::VZEXT_MOVL:
58677 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
58678 case X86ISD::FMADD_RND:
58679 case X86ISD::FMSUB:
58680 case X86ISD::STRICT_FMSUB:
58681 case X86ISD::FMSUB_RND:
58682 case X86ISD::FNMADD:
58683 case X86ISD::STRICT_FNMADD:
58684 case X86ISD::FNMADD_RND:
58685 case X86ISD::FNMSUB:
58686 case X86ISD::STRICT_FNMSUB:
58687 case X86ISD::FNMSUB_RND:
58689 case ISD::STRICT_FMA: return combineFMA(N, DAG, DCI, Subtarget);
58690 case X86ISD::FMADDSUB_RND:
58691 case X86ISD::FMSUBADD_RND:
58692 case X86ISD::FMADDSUB:
58693 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
58694 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
58695 case X86ISD::TESTP: return combineTESTP(N, DAG, DCI, Subtarget);
58696 case X86ISD::MGATHER:
58697 case X86ISD::MSCATTER:
58698 return combineX86GatherScatter(N, DAG, DCI, Subtarget);
58700 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
58701 case X86ISD::PCMPEQ:
58702 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
58703 case X86ISD::PMULDQ:
58704 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
58705 case X86ISD::VPMADDUBSW:
58706 case X86ISD::VPMADDWD: return combineVPMADD(N, DAG, DCI);
58707 case X86ISD::KSHIFTL:
58708 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
58709 case ISD::FP16_TO_FP: return combineFP16_TO_FP(N, DAG, Subtarget);
58710 case ISD::STRICT_FP_EXTEND:
58711 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DAG, Subtarget);
58712 case ISD::STRICT_FP_ROUND:
58713 case ISD::FP_ROUND: return combineFP_ROUND(N, DAG, Subtarget);
58714 case X86ISD::VBROADCAST_LOAD:
58715 case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
58716 case X86ISD::MOVDQ2Q: return combineMOVDQ2Q(N, DAG);
58717 case X86ISD::PDEP: return combinePDEP(N, DAG, DCI);
58723 bool X86TargetLowering::preferABDSToABSWithNSW(EVT VT) const {
58727 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
58728 if (!isTypeLegal(VT))
58731 // There are no vXi8 shifts.
58732 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
58735 // TODO: Almost no 8-bit ops are desirable because they have no actual
58736 // size/speed advantages vs. 32-bit ops, but they do have a major
58737 // potential disadvantage by causing partial register stalls.
58739 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
58740 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
58741 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
58742 // check for a constant operand to the multiply.
58743 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
58746 // i16 instruction encodings are longer and some i16 instructions are slow,
58747 // so those are not desirable.
58748 if (VT == MVT::i16) {
58753 case ISD::SIGN_EXTEND:
58754 case ISD::ZERO_EXTEND:
58755 case ISD::ANY_EXTEND:
58769 // Any legal type not explicitly accounted for above here is desirable.
58773 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
58774 SDValue Value, SDValue Addr,
58775 SelectionDAG &DAG) const {
58776 const Module *M = DAG.getMachineFunction().getMMI().getModule();
58777 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
58778 if (IsCFProtectionSupported) {
58779 // In case control-flow branch protection is enabled, we need to add
58780 // notrack prefix to the indirect branch.
58781 // In order to do that we create NT_BRIND SDNode.
58782 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
58783 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
58786 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
58789 TargetLowering::AndOrSETCCFoldKind
58790 X86TargetLowering::isDesirableToCombineLogicOpOfSETCC(
58791 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
58792 using AndOrSETCCFoldKind = TargetLowering::AndOrSETCCFoldKind;
58793 EVT VT = LogicOp->getValueType(0);
58794 EVT OpVT = SETCC0->getOperand(0).getValueType();
58795 if (!VT.isInteger())
58796 return AndOrSETCCFoldKind::None;
58799 return AndOrSETCCFoldKind(AndOrSETCCFoldKind::NotAnd |
58800 (isOperationLegal(ISD::ABS, OpVT)
58801 ? AndOrSETCCFoldKind::ABS
58802 : AndOrSETCCFoldKind::None));
58804 // Don't use `NotAnd` as even though `not` is generally shorter code size than
58805 // `add`, `add` can lower to LEA which can save moves / spills. Any case where
58806 // `NotAnd` applies, `AddAnd` does as well.
58807 // TODO: Currently we lower (icmp eq/ne (and ~X, Y), 0) -> `test (not X), Y`,
58808 // if we change that to `andn Y, X` it may be worth prefering `NotAnd` here.
58809 return AndOrSETCCFoldKind::AddAnd;
58812 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
58813 EVT VT = Op.getValueType();
58814 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
58815 isa<ConstantSDNode>(Op.getOperand(1));
58817 // i16 is legal, but undesirable since i16 instruction encodings are longer
58818 // and some i16 instructions are slow.
58819 // 8-bit multiply-by-constant can usually be expanded to something cheaper
58820 // using LEA and/or other ALU ops.
58821 if (VT != MVT::i16 && !Is8BitMulByConstant)
58824 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
58825 if (!Op.hasOneUse())
58827 SDNode *User = *Op->use_begin();
58828 if (!ISD::isNormalStore(User))
58830 auto *Ld = cast<LoadSDNode>(Load);
58831 auto *St = cast<StoreSDNode>(User);
58832 return Ld->getBasePtr() == St->getBasePtr();
58835 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
58836 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
58838 if (!Op.hasOneUse())
58840 SDNode *User = *Op->use_begin();
58841 if (User->getOpcode() != ISD::ATOMIC_STORE)
58843 auto *Ld = cast<AtomicSDNode>(Load);
58844 auto *St = cast<AtomicSDNode>(User);
58845 return Ld->getBasePtr() == St->getBasePtr();
58848 bool Commute = false;
58849 switch (Op.getOpcode()) {
58850 default: return false;
58851 case ISD::SIGN_EXTEND:
58852 case ISD::ZERO_EXTEND:
58853 case ISD::ANY_EXTEND:
58858 SDValue N0 = Op.getOperand(0);
58859 // Look out for (store (shl (load), x)).
58860 if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
58872 SDValue N0 = Op.getOperand(0);
58873 SDValue N1 = Op.getOperand(1);
58874 // Avoid disabling potential load folding opportunities.
58875 if (X86::mayFoldLoad(N1, Subtarget) &&
58876 (!Commute || !isa<ConstantSDNode>(N0) ||
58877 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
58879 if (X86::mayFoldLoad(N0, Subtarget) &&
58880 ((Commute && !isa<ConstantSDNode>(N1)) ||
58881 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
58883 if (IsFoldableAtomicRMW(N0, Op) ||
58884 (Commute && IsFoldableAtomicRMW(N1, Op)))
58893 //===----------------------------------------------------------------------===//
58894 // X86 Inline Assembly Support
58895 //===----------------------------------------------------------------------===//
58897 // Helper to match a string separated by whitespace.
58898 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
58899 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
58901 for (StringRef Piece : Pieces) {
58902 if (!S.startswith(Piece)) // Check if the piece matches.
58905 S = S.substr(Piece.size());
58906 StringRef::size_type Pos = S.find_first_not_of(" \t");
58907 if (Pos == 0) // We matched a prefix.
58916 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
58918 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
58919 if (llvm::is_contained(AsmPieces, "~{cc}") &&
58920 llvm::is_contained(AsmPieces, "~{flags}") &&
58921 llvm::is_contained(AsmPieces, "~{fpsr}")) {
58923 if (AsmPieces.size() == 3)
58925 else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
58932 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
58933 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
58935 const std::string &AsmStr = IA->getAsmString();
58937 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
58938 if (!Ty || Ty->getBitWidth() % 16 != 0)
58941 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
58942 SmallVector<StringRef, 4> AsmPieces;
58943 SplitString(AsmStr, AsmPieces, ";\n");
58945 switch (AsmPieces.size()) {
58946 default: return false;
58948 // FIXME: this should verify that we are targeting a 486 or better. If not,
58949 // we will turn this bswap into something that will be lowered to logical
58950 // ops instead of emitting the bswap asm. For now, we don't support 486 or
58951 // lower so don't worry about this.
58953 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
58954 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
58955 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
58956 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
58957 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
58958 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
58959 // No need to check constraints, nothing other than the equivalent of
58960 // "=r,0" would be valid here.
58961 return IntrinsicLowering::LowerToByteSwap(CI);
58964 // rorw $$8, ${0:w} --> llvm.bswap.i16
58965 if (CI->getType()->isIntegerTy(16) &&
58966 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
58967 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
58968 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
58970 StringRef ConstraintsStr = IA->getConstraintString();
58971 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
58972 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
58973 if (clobbersFlagRegisters(AsmPieces))
58974 return IntrinsicLowering::LowerToByteSwap(CI);
58978 if (CI->getType()->isIntegerTy(32) &&
58979 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
58980 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
58981 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
58982 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
58984 StringRef ConstraintsStr = IA->getConstraintString();
58985 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
58986 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
58987 if (clobbersFlagRegisters(AsmPieces))
58988 return IntrinsicLowering::LowerToByteSwap(CI);
58991 if (CI->getType()->isIntegerTy(64)) {
58992 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
58993 if (Constraints.size() >= 2 &&
58994 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
58995 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
58996 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
58997 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
58998 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
58999 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
59000 return IntrinsicLowering::LowerToByteSwap(CI);
59008 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
59009 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
59010 .Case("{@cca}", X86::COND_A)
59011 .Case("{@ccae}", X86::COND_AE)
59012 .Case("{@ccb}", X86::COND_B)
59013 .Case("{@ccbe}", X86::COND_BE)
59014 .Case("{@ccc}", X86::COND_B)
59015 .Case("{@cce}", X86::COND_E)
59016 .Case("{@ccz}", X86::COND_E)
59017 .Case("{@ccg}", X86::COND_G)
59018 .Case("{@ccge}", X86::COND_GE)
59019 .Case("{@ccl}", X86::COND_L)
59020 .Case("{@ccle}", X86::COND_LE)
59021 .Case("{@ccna}", X86::COND_BE)
59022 .Case("{@ccnae}", X86::COND_B)
59023 .Case("{@ccnb}", X86::COND_AE)
59024 .Case("{@ccnbe}", X86::COND_A)
59025 .Case("{@ccnc}", X86::COND_AE)
59026 .Case("{@ccne}", X86::COND_NE)
59027 .Case("{@ccnz}", X86::COND_NE)
59028 .Case("{@ccng}", X86::COND_LE)
59029 .Case("{@ccnge}", X86::COND_L)
59030 .Case("{@ccnl}", X86::COND_GE)
59031 .Case("{@ccnle}", X86::COND_G)
59032 .Case("{@ccno}", X86::COND_NO)
59033 .Case("{@ccnp}", X86::COND_NP)
59034 .Case("{@ccns}", X86::COND_NS)
59035 .Case("{@cco}", X86::COND_O)
59036 .Case("{@ccp}", X86::COND_P)
59037 .Case("{@ccs}", X86::COND_S)
59038 .Default(X86::COND_INVALID);
59042 /// Given a constraint letter, return the type of constraint for this target.
59043 X86TargetLowering::ConstraintType
59044 X86TargetLowering::getConstraintType(StringRef Constraint) const {
59045 if (Constraint.size() == 1) {
59046 switch (Constraint[0]) {
59057 case 'k': // AVX512 masking registers.
59058 return C_RegisterClass;
59074 return C_Immediate;
59083 else if (Constraint.size() == 2) {
59084 switch (Constraint[0]) {
59088 switch (Constraint[1]) {
59098 return C_RegisterClass;
59101 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
59103 return TargetLowering::getConstraintType(Constraint);
59106 /// Examine constraint type and operand type and determine a weight value.
59107 /// This object must already have been set up with the operand type
59108 /// and the current alternative constraint selected.
59109 TargetLowering::ConstraintWeight
59110 X86TargetLowering::getSingleConstraintMatchWeight(
59111 AsmOperandInfo &info, const char *constraint) const {
59112 ConstraintWeight weight = CW_Invalid;
59113 Value *CallOperandVal = info.CallOperandVal;
59114 // If we don't have a value, we can't do a match,
59115 // but allow it at the lowest weight.
59116 if (!CallOperandVal)
59118 Type *type = CallOperandVal->getType();
59119 // Look at the constraint type.
59120 switch (*constraint) {
59122 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
59134 if (CallOperandVal->getType()->isIntegerTy())
59135 weight = CW_SpecificReg;
59140 if (type->isFloatingPointTy())
59141 weight = CW_SpecificReg;
59144 if (type->isX86_MMXTy() && Subtarget.hasMMX())
59145 weight = CW_SpecificReg;
59148 if (StringRef(constraint).size() != 2)
59150 switch (constraint[1]) {
59155 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
59156 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
59157 ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
59158 return CW_SpecificReg;
59160 // Conditional OpMask regs (AVX512)
59162 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
59163 return CW_Register;
59167 if (type->isX86_MMXTy() && Subtarget.hasMMX())
59170 // Any SSE reg when ISA >= SSE2, same as 'x'
59174 if (!Subtarget.hasSSE2())
59180 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
59181 weight = CW_Register;
59184 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
59185 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
59186 weight = CW_Register;
59189 // Enable conditional vector operations using %k<#> registers.
59190 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
59191 weight = CW_Register;
59194 if (auto *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
59195 if (C->getZExtValue() <= 31)
59196 weight = CW_Constant;
59200 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59201 if (C->getZExtValue() <= 63)
59202 weight = CW_Constant;
59206 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59207 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
59208 weight = CW_Constant;
59212 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59213 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
59214 weight = CW_Constant;
59218 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59219 if (C->getZExtValue() <= 3)
59220 weight = CW_Constant;
59224 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59225 if (C->getZExtValue() <= 0xff)
59226 weight = CW_Constant;
59231 if (isa<ConstantFP>(CallOperandVal)) {
59232 weight = CW_Constant;
59236 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59237 if ((C->getSExtValue() >= -0x80000000LL) &&
59238 (C->getSExtValue() <= 0x7fffffffLL))
59239 weight = CW_Constant;
59243 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
59244 if (C->getZExtValue() <= 0xffffffff)
59245 weight = CW_Constant;
59252 /// Try to replace an X constraint, which matches anything, with another that
59253 /// has more specific requirements based on the type of the corresponding
59255 const char *X86TargetLowering::
59256 LowerXConstraint(EVT ConstraintVT) const {
59257 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
59258 // 'f' like normal targets.
59259 if (ConstraintVT.isFloatingPoint()) {
59260 if (Subtarget.hasSSE1())
59264 return TargetLowering::LowerXConstraint(ConstraintVT);
59267 // Lower @cc targets via setcc.
59268 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
59269 SDValue &Chain, SDValue &Glue, const SDLoc &DL,
59270 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
59271 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
59272 if (Cond == X86::COND_INVALID)
59274 // Check that return type is valid.
59275 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
59276 OpInfo.ConstraintVT.getSizeInBits() < 8)
59277 report_fatal_error("Glue output operand is of invalid type");
59279 // Get EFLAGS register. Only update chain when copyfrom is glued.
59280 if (Glue.getNode()) {
59281 Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Glue);
59282 Chain = Glue.getValue(1);
59284 Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
59285 // Extract CC code.
59286 SDValue CC = getSETCC(Cond, Glue, DL, DAG);
59287 // Extend to 32-bits
59288 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
59293 /// Lower the specified operand into the Ops vector.
59294 /// If it is invalid, don't add anything to Ops.
59295 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
59296 std::string &Constraint,
59297 std::vector<SDValue>&Ops,
59298 SelectionDAG &DAG) const {
59301 // Only support length 1 constraints for now.
59302 if (Constraint.length() > 1) return;
59304 char ConstraintLetter = Constraint[0];
59305 switch (ConstraintLetter) {
59308 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59309 if (C->getZExtValue() <= 31) {
59310 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59311 Op.getValueType());
59317 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59318 if (C->getZExtValue() <= 63) {
59319 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59320 Op.getValueType());
59326 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59327 if (isInt<8>(C->getSExtValue())) {
59328 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59329 Op.getValueType());
59335 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59336 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
59337 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
59338 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
59339 Op.getValueType());
59345 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59346 if (C->getZExtValue() <= 3) {
59347 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59348 Op.getValueType());
59354 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59355 if (C->getZExtValue() <= 255) {
59356 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59357 Op.getValueType());
59363 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59364 if (C->getZExtValue() <= 127) {
59365 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59366 Op.getValueType());
59372 // 32-bit signed value
59373 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59374 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
59375 C->getSExtValue())) {
59376 // Widen to 64 bits here to get it sign extended.
59377 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
59380 // FIXME gcc accepts some relocatable values here too, but only in certain
59381 // memory models; it's complicated.
59386 // 32-bit unsigned value
59387 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
59388 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
59389 C->getZExtValue())) {
59390 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
59391 Op.getValueType());
59395 // FIXME gcc accepts some relocatable values here too, but only in certain
59396 // memory models; it's complicated.
59400 // Literal immediates are always ok.
59401 if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
59402 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
59403 BooleanContent BCont = getBooleanContents(MVT::i64);
59404 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
59405 : ISD::SIGN_EXTEND;
59406 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
59407 : CST->getSExtValue();
59408 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
59412 // In any sort of PIC mode addresses need to be computed at runtime by
59413 // adding in a register or some sort of table lookup. These can't
59414 // be used as immediates. BlockAddresses and BasicBlocks are fine though.
59415 if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
59416 !(isa<BlockAddressSDNode>(Op) || isa<BasicBlockSDNode>(Op)))
59419 // If we are in non-pic codegen mode, we allow the address of a global (with
59420 // an optional displacement) to be used with 'i'.
59421 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
59422 // If we require an extra load to get this address, as in PIC mode, we
59423 // can't accept it.
59424 if (isGlobalStubReference(
59425 Subtarget.classifyGlobalReference(GA->getGlobal())))
59431 if (Result.getNode()) {
59432 Ops.push_back(Result);
59435 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
59438 /// Check if \p RC is a general purpose register class.
59439 /// I.e., GR* or one of their variant.
59440 static bool isGRClass(const TargetRegisterClass &RC) {
59441 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
59442 RC.hasSuperClassEq(&X86::GR16RegClass) ||
59443 RC.hasSuperClassEq(&X86::GR32RegClass) ||
59444 RC.hasSuperClassEq(&X86::GR64RegClass) ||
59445 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
59448 /// Check if \p RC is a vector register class.
59449 /// I.e., FR* / VR* or one of their variant.
59450 static bool isFRClass(const TargetRegisterClass &RC) {
59451 return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
59452 RC.hasSuperClassEq(&X86::FR32XRegClass) ||
59453 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
59454 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
59455 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
59456 RC.hasSuperClassEq(&X86::VR512RegClass);
59459 /// Check if \p RC is a mask register class.
59460 /// I.e., VK* or one of their variant.
59461 static bool isVKClass(const TargetRegisterClass &RC) {
59462 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
59463 RC.hasSuperClassEq(&X86::VK2RegClass) ||
59464 RC.hasSuperClassEq(&X86::VK4RegClass) ||
59465 RC.hasSuperClassEq(&X86::VK8RegClass) ||
59466 RC.hasSuperClassEq(&X86::VK16RegClass) ||
59467 RC.hasSuperClassEq(&X86::VK32RegClass) ||
59468 RC.hasSuperClassEq(&X86::VK64RegClass);
59471 std::pair<unsigned, const TargetRegisterClass *>
59472 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
59473 StringRef Constraint,
59475 // First, see if this is a constraint that directly corresponds to an LLVM
59477 if (Constraint.size() == 1) {
59478 // GCC Constraint Letters
59479 switch (Constraint[0]) {
59481 // 'A' means [ER]AX + [ER]DX.
59483 if (Subtarget.is64Bit())
59484 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
59485 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
59486 "Expecting 64, 32 or 16 bit subtarget");
59487 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
59489 // TODO: Slight differences here in allocation order and leaving
59490 // RIP in the class. Do they matter any more here than they do
59491 // in the normal allocation?
59493 if (Subtarget.hasAVX512()) {
59495 return std::make_pair(0U, &X86::VK1RegClass);
59497 return std::make_pair(0U, &X86::VK8RegClass);
59498 if (VT == MVT::i16)
59499 return std::make_pair(0U, &X86::VK16RegClass);
59501 if (Subtarget.hasBWI()) {
59502 if (VT == MVT::i32)
59503 return std::make_pair(0U, &X86::VK32RegClass);
59504 if (VT == MVT::i64)
59505 return std::make_pair(0U, &X86::VK64RegClass);
59508 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
59509 if (Subtarget.is64Bit()) {
59510 if (VT == MVT::i8 || VT == MVT::i1)
59511 return std::make_pair(0U, &X86::GR8RegClass);
59512 if (VT == MVT::i16)
59513 return std::make_pair(0U, &X86::GR16RegClass);
59514 if (VT == MVT::i32 || VT == MVT::f32)
59515 return std::make_pair(0U, &X86::GR32RegClass);
59516 if (VT != MVT::f80 && !VT.isVector())
59517 return std::make_pair(0U, &X86::GR64RegClass);
59521 // 32-bit fallthrough
59522 case 'Q': // Q_REGS
59523 if (VT == MVT::i8 || VT == MVT::i1)
59524 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
59525 if (VT == MVT::i16)
59526 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
59527 if (VT == MVT::i32 || VT == MVT::f32 ||
59528 (!VT.isVector() && !Subtarget.is64Bit()))
59529 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
59530 if (VT != MVT::f80 && !VT.isVector())
59531 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
59533 case 'r': // GENERAL_REGS
59534 case 'l': // INDEX_REGS
59535 if (VT == MVT::i8 || VT == MVT::i1)
59536 return std::make_pair(0U, &X86::GR8RegClass);
59537 if (VT == MVT::i16)
59538 return std::make_pair(0U, &X86::GR16RegClass);
59539 if (VT == MVT::i32 || VT == MVT::f32 ||
59540 (!VT.isVector() && !Subtarget.is64Bit()))
59541 return std::make_pair(0U, &X86::GR32RegClass);
59542 if (VT != MVT::f80 && !VT.isVector())
59543 return std::make_pair(0U, &X86::GR64RegClass);
59545 case 'R': // LEGACY_REGS
59546 if (VT == MVT::i8 || VT == MVT::i1)
59547 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
59548 if (VT == MVT::i16)
59549 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
59550 if (VT == MVT::i32 || VT == MVT::f32 ||
59551 (!VT.isVector() && !Subtarget.is64Bit()))
59552 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
59553 if (VT != MVT::f80 && !VT.isVector())
59554 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
59556 case 'f': // FP Stack registers.
59557 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
59558 // value to the correct fpstack register class.
59559 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
59560 return std::make_pair(0U, &X86::RFP32RegClass);
59561 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
59562 return std::make_pair(0U, &X86::RFP64RegClass);
59563 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
59564 return std::make_pair(0U, &X86::RFP80RegClass);
59566 case 'y': // MMX_REGS if MMX allowed.
59567 if (!Subtarget.hasMMX()) break;
59568 return std::make_pair(0U, &X86::VR64RegClass);
59570 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
59571 if (!Subtarget.hasSSE1()) break;
59572 bool VConstraint = (Constraint[0] == 'v');
59574 switch (VT.SimpleTy) {
59576 // Scalar SSE types.
59578 if (VConstraint && Subtarget.hasFP16())
59579 return std::make_pair(0U, &X86::FR16XRegClass);
59583 if (VConstraint && Subtarget.hasVLX())
59584 return std::make_pair(0U, &X86::FR32XRegClass);
59585 return std::make_pair(0U, &X86::FR32RegClass);
59588 if (VConstraint && Subtarget.hasVLX())
59589 return std::make_pair(0U, &X86::FR64XRegClass);
59590 return std::make_pair(0U, &X86::FR64RegClass);
59592 if (Subtarget.is64Bit()) {
59593 if (VConstraint && Subtarget.hasVLX())
59594 return std::make_pair(0U, &X86::VR128XRegClass);
59595 return std::make_pair(0U, &X86::VR128RegClass);
59598 // Vector types and fp128.
59600 if (!Subtarget.hasFP16())
59610 if (VConstraint && Subtarget.hasVLX())
59611 return std::make_pair(0U, &X86::VR128XRegClass);
59612 return std::make_pair(0U, &X86::VR128RegClass);
59615 if (!Subtarget.hasFP16())
59624 if (VConstraint && Subtarget.hasVLX())
59625 return std::make_pair(0U, &X86::VR256XRegClass);
59626 if (Subtarget.hasAVX())
59627 return std::make_pair(0U, &X86::VR256RegClass);
59630 if (!Subtarget.hasFP16())
59639 if (!Subtarget.hasAVX512()) break;
59641 return std::make_pair(0U, &X86::VR512RegClass);
59642 return std::make_pair(0U, &X86::VR512_0_15RegClass);
59646 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
59647 switch (Constraint[1]) {
59653 return getRegForInlineAsmConstraint(TRI, "x", VT);
59655 if (!Subtarget.hasMMX()) break;
59656 return std::make_pair(0U, &X86::VR64RegClass);
59658 if (!Subtarget.hasSSE1()) break;
59659 switch (VT.SimpleTy) {
59661 // Scalar SSE types.
59663 if (!Subtarget.hasFP16())
59665 return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
59668 return std::make_pair(X86::XMM0, &X86::FR32RegClass);
59671 return std::make_pair(X86::XMM0, &X86::FR64RegClass);
59673 if (!Subtarget.hasFP16())
59683 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
59686 if (!Subtarget.hasFP16())
59695 if (Subtarget.hasAVX())
59696 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
59699 if (!Subtarget.hasFP16())
59708 if (Subtarget.hasAVX512())
59709 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
59714 // This register class doesn't allocate k0 for masked vector operation.
59715 if (Subtarget.hasAVX512()) {
59717 return std::make_pair(0U, &X86::VK1WMRegClass);
59719 return std::make_pair(0U, &X86::VK8WMRegClass);
59720 if (VT == MVT::i16)
59721 return std::make_pair(0U, &X86::VK16WMRegClass);
59723 if (Subtarget.hasBWI()) {
59724 if (VT == MVT::i32)
59725 return std::make_pair(0U, &X86::VK32WMRegClass);
59726 if (VT == MVT::i64)
59727 return std::make_pair(0U, &X86::VK64WMRegClass);
59733 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
59734 return std::make_pair(0U, &X86::GR32RegClass);
59736 // Use the default implementation in TargetLowering to convert the register
59737 // constraint into a member of a register class.
59738 std::pair<Register, const TargetRegisterClass*> Res;
59739 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
59741 // Not found as a standard register?
59743 // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
59745 if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
59746 // Map st(0) -> st(7) -> ST0
59747 if (Constraint.size() == 7 && Constraint[0] == '{' &&
59748 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
59749 Constraint[3] == '(' &&
59750 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
59751 Constraint[5] == ')' && Constraint[6] == '}') {
59752 // st(7) is not allocatable and thus not a member of RFP80. Return
59753 // singleton class in cases where we have a reference to it.
59754 if (Constraint[4] == '7')
59755 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
59756 return std::make_pair(X86::FP0 + Constraint[4] - '0',
59757 &X86::RFP80RegClass);
59760 // GCC allows "st(0)" to be called just plain "st".
59761 if (StringRef("{st}").equals_insensitive(Constraint))
59762 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
59766 if (StringRef("{flags}").equals_insensitive(Constraint))
59767 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
59770 // Only allow for clobber.
59771 if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
59773 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
59776 if (StringRef("{fpsr}").equals_insensitive(Constraint))
59777 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
59782 // Make sure it isn't a register that requires 64-bit mode.
59783 if (!Subtarget.is64Bit() &&
59784 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
59785 TRI->getEncodingValue(Res.first) >= 8) {
59786 // Register requires REX prefix, but we're in 32-bit mode.
59787 return std::make_pair(0, nullptr);
59790 // Make sure it isn't a register that requires AVX512.
59791 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
59792 TRI->getEncodingValue(Res.first) & 0x10) {
59793 // Register requires EVEX prefix.
59794 return std::make_pair(0, nullptr);
59797 // Otherwise, check to see if this is a register class of the wrong value
59798 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
59799 // turn into {ax},{dx}.
59800 // MVT::Other is used to specify clobber names.
59801 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
59802 return Res; // Correct type already, nothing to do.
59804 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
59805 // return "eax". This should even work for things like getting 64bit integer
59806 // registers when given an f64 type.
59807 const TargetRegisterClass *Class = Res.second;
59808 // The generic code will match the first register class that contains the
59809 // given register. Thus, based on the ordering of the tablegened file,
59810 // the "plain" GR classes might not come first.
59811 // Therefore, use a helper method.
59812 if (isGRClass(*Class)) {
59813 unsigned Size = VT.getSizeInBits();
59814 if (Size == 1) Size = 8;
59815 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
59816 return std::make_pair(0, nullptr);
59817 Register DestReg = getX86SubSuperRegister(Res.first, Size);
59818 if (DestReg.isValid()) {
59819 bool is64Bit = Subtarget.is64Bit();
59820 const TargetRegisterClass *RC =
59821 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
59822 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
59823 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
59824 : /*Size == 64*/ (is64Bit ? &X86::GR64RegClass : nullptr);
59825 if (Size == 64 && !is64Bit) {
59826 // Model GCC's behavior here and select a fixed pair of 32-bit
59830 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
59832 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
59834 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
59836 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
59838 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
59840 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
59842 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
59844 return std::make_pair(0, nullptr);
59847 if (RC && RC->contains(DestReg))
59848 return std::make_pair(DestReg, RC);
59851 // No register found/type mismatch.
59852 return std::make_pair(0, nullptr);
59853 } else if (isFRClass(*Class)) {
59854 // Handle references to XMM physical registers that got mapped into the
59855 // wrong class. This can happen with constraints like {xmm0} where the
59856 // target independent register mapper will just pick the first match it can
59857 // find, ignoring the required type.
59859 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
59860 if (VT == MVT::f16)
59861 Res.second = &X86::FR16XRegClass;
59862 else if (VT == MVT::f32 || VT == MVT::i32)
59863 Res.second = &X86::FR32XRegClass;
59864 else if (VT == MVT::f64 || VT == MVT::i64)
59865 Res.second = &X86::FR64XRegClass;
59866 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
59867 Res.second = &X86::VR128XRegClass;
59868 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
59869 Res.second = &X86::VR256XRegClass;
59870 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
59871 Res.second = &X86::VR512RegClass;
59873 // Type mismatch and not a clobber: Return an error;
59875 Res.second = nullptr;
59877 } else if (isVKClass(*Class)) {
59879 Res.second = &X86::VK1RegClass;
59880 else if (VT == MVT::i8)
59881 Res.second = &X86::VK8RegClass;
59882 else if (VT == MVT::i16)
59883 Res.second = &X86::VK16RegClass;
59884 else if (VT == MVT::i32)
59885 Res.second = &X86::VK32RegClass;
59886 else if (VT == MVT::i64)
59887 Res.second = &X86::VK64RegClass;
59889 // Type mismatch and not a clobber: Return an error;
59891 Res.second = nullptr;
59898 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
59899 // Integer division on x86 is expensive. However, when aggressively optimizing
59900 // for code size, we prefer to use a div instruction, as it is usually smaller
59901 // than the alternative sequence.
59902 // The exception to this is vector division. Since x86 doesn't have vector
59903 // integer division, leaving the division as-is is a loss even in terms of
59904 // size, because it will have to be scalarized, while the alternative code
59905 // sequence can be performed in vector form.
59906 bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
59907 return OptSize && !VT.isVector();
59910 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
59911 if (!Subtarget.is64Bit())
59914 // Update IsSplitCSR in X86MachineFunctionInfo.
59915 X86MachineFunctionInfo *AFI =
59916 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
59917 AFI->setIsSplitCSR(true);
59920 void X86TargetLowering::insertCopiesSplitCSR(
59921 MachineBasicBlock *Entry,
59922 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
59923 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
59924 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
59928 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
59929 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
59930 MachineBasicBlock::iterator MBBI = Entry->begin();
59931 for (const MCPhysReg *I = IStart; *I; ++I) {
59932 const TargetRegisterClass *RC = nullptr;
59933 if (X86::GR64RegClass.contains(*I))
59934 RC = &X86::GR64RegClass;
59936 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
59938 Register NewVR = MRI->createVirtualRegister(RC);
59939 // Create copy from CSR to a virtual register.
59940 // FIXME: this currently does not emit CFI pseudo-instructions, it works
59941 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
59942 // nounwind. If we want to generalize this later, we may need to emit
59943 // CFI pseudo-instructions.
59945 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
59946 "Function should be nounwind in insertCopiesSplitCSR!");
59947 Entry->addLiveIn(*I);
59948 BuildMI(*Entry, MBBI, MIMetadata(), TII->get(TargetOpcode::COPY), NewVR)
59951 // Insert the copy-back instructions right before the terminator.
59952 for (auto *Exit : Exits)
59953 BuildMI(*Exit, Exit->getFirstTerminator(), MIMetadata(),
59954 TII->get(TargetOpcode::COPY), *I)
59959 bool X86TargetLowering::supportSwiftError() const {
59960 return Subtarget.is64Bit();
59964 X86TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
59965 MachineBasicBlock::instr_iterator &MBBI,
59966 const TargetInstrInfo *TII) const {
59967 assert(MBBI->isCall() && MBBI->getCFIType() &&
59968 "Invalid call instruction for a KCFI check");
59970 MachineFunction &MF = *MBB.getParent();
59971 // If the call target is a memory operand, unfold it and use R11 for the
59972 // call, so KCFI_CHECK won't have to recompute the address.
59973 switch (MBBI->getOpcode()) {
59975 case X86::CALL64m_NT:
59976 case X86::TAILJMPm64:
59977 case X86::TAILJMPm64_REX: {
59978 MachineBasicBlock::instr_iterator OrigCall = MBBI;
59979 SmallVector<MachineInstr *, 2> NewMIs;
59980 if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
59981 /*UnfoldStore=*/false, NewMIs))
59982 report_fatal_error("Failed to unfold memory operand for a KCFI check");
59983 for (auto *NewMI : NewMIs)
59984 MBBI = MBB.insert(OrigCall, NewMI);
59985 assert(MBBI->isCall() &&
59986 "Unexpected instruction after memory operand unfolding");
59987 if (OrigCall->shouldUpdateCallSiteInfo())
59988 MF.moveCallSiteInfo(&*OrigCall, &*MBBI);
59989 MBBI->setCFIType(MF, OrigCall->getCFIType());
59990 OrigCall->eraseFromParent();
59997 MachineOperand &Target = MBBI->getOperand(0);
59998 Register TargetReg;
59999 switch (MBBI->getOpcode()) {
60001 case X86::CALL64r_NT:
60002 case X86::TAILJMPr64:
60003 case X86::TAILJMPr64_REX:
60004 assert(Target.isReg() && "Unexpected target operand for an indirect call");
60005 Target.setIsRenamable(false);
60006 TargetReg = Target.getReg();
60008 case X86::CALL64pcrel32:
60009 case X86::TAILJMPd64:
60010 assert(Target.isSymbol() && "Unexpected target operand for a direct call");
60011 // X86TargetLowering::EmitLoweredIndirectThunk always uses r11 for
60012 // 64-bit indirect thunk calls.
60013 assert(StringRef(Target.getSymbolName()).endswith("_r11") &&
60014 "Unexpected register for an indirect thunk call");
60015 TargetReg = X86::R11;
60018 llvm_unreachable("Unexpected CFI call opcode");
60022 return BuildMI(MBB, MBBI, MIMetadata(*MBBI), TII->get(X86::KCFI_CHECK))
60024 .addImm(MBBI->getCFIType())
60028 /// Returns true if stack probing through a function call is requested.
60029 bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
60030 return !getStackProbeSymbolName(MF).empty();
60033 /// Returns true if stack probing through inline assembly is requested.
60034 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
60036 // No inline stack probe for Windows, they have their own mechanism.
60037 if (Subtarget.isOSWindows() ||
60038 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
60041 // If the function specifically requests inline stack probes, emit them.
60042 if (MF.getFunction().hasFnAttribute("probe-stack"))
60043 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
60049 /// Returns the name of the symbol used to emit stack probes or the empty
60050 /// string if not applicable.
60052 X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
60053 // Inline Stack probes disable stack probe call
60054 if (hasInlineStackProbe(MF))
60057 // If the function specifically requests stack probes, emit them.
60058 if (MF.getFunction().hasFnAttribute("probe-stack"))
60059 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
60061 // Generally, if we aren't on Windows, the platform ABI does not include
60062 // support for stack probes, so don't emit them.
60063 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
60064 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
60067 // We need a stack probe to conform to the Windows ABI. Choose the right
60069 if (Subtarget.is64Bit())
60070 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
60071 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
60075 X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
60076 // The default stack probe size is 4096 if the function has no stackprobesize
60078 return MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size",
60082 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
60083 if (ML->isInnermost() &&
60084 ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
60085 return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
60086 return TargetLowering::getPrefLoopAlignment();