1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "Utils/X86ShuffleDecode.h"
16 #include "X86CallingConv.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86IntrinsicsInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/Analysis/BlockFrequencyInfo.h"
29 #include "llvm/Analysis/EHPersonalities.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/CodeGen/IntrinsicLowering.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/WinEHFuncInfo.h"
40 #include "llvm/IR/CallSite.h"
41 #include "llvm/IR/CallingConv.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalVariable.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/Intrinsics.h"
50 #include "llvm/MC/MCAsmInfo.h"
51 #include "llvm/MC/MCContext.h"
52 #include "llvm/MC/MCExpr.h"
53 #include "llvm/MC/MCSymbol.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/KnownBits.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Target/TargetOptions.h"
66 #define DEBUG_TYPE "x86-isel"
68 STATISTIC(NumTailCalls, "Number of tail calls");
70 static cl::opt<int> ExperimentalPrefLoopAlignment(
71 "x86-experimental-pref-loop-alignment", cl::init(4),
73 "Sets the preferable loop alignment for experiments (as log2 bytes)"
74 "(the last x86-experimental-pref-loop-alignment bits"
75 " of the loop header PC will be 0)."),
79 static cl::opt<bool> EnableOldKNLABI(
80 "x86-enable-old-knl-abi", cl::init(false),
81 cl::desc("Enables passing v32i16 and v64i8 in 2 YMM registers instead of "
82 "one ZMM register on AVX512F, but not AVX512BW targets."),
85 static cl::opt<bool> MulConstantOptimization(
86 "mul-constant-optimization", cl::init(true),
87 cl::desc("Replace 'mul x, Const' with more effective instructions like "
91 static cl::opt<bool> ExperimentalUnorderedISEL(
92 "x86-experimental-unordered-atomic-isel", cl::init(false),
93 cl::desc("Use LoadSDNode and StoreSDNode instead of "
94 "AtomicSDNode for unordered atomic loads and "
95 "stores respectively."),
98 /// Call this when the user attempts to do something unsupported, like
99 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
100 /// report_fatal_error, so calling code should attempt to recover without
102 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
104 MachineFunction &MF = DAG.getMachineFunction();
105 DAG.getContext()->diagnose(
106 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
109 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
110 const X86Subtarget &STI)
111 : TargetLowering(TM), Subtarget(STI) {
112 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
113 X86ScalarSSEf64 = Subtarget.hasSSE2();
114 X86ScalarSSEf32 = Subtarget.hasSSE1();
115 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
117 // Set up the TargetLowering object.
119 // X86 is weird. It always uses i8 for shift amounts and setcc results.
120 setBooleanContents(ZeroOrOneBooleanContent);
121 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
122 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
124 // For 64-bit, since we have so many registers, use the ILP scheduler.
125 // For 32-bit, use the register pressure specific scheduling.
126 // For Atom, always use ILP scheduling.
127 if (Subtarget.isAtom())
128 setSchedulingPreference(Sched::ILP);
129 else if (Subtarget.is64Bit())
130 setSchedulingPreference(Sched::ILP);
132 setSchedulingPreference(Sched::RegPressure);
133 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
134 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
136 // Bypass expensive divides and use cheaper ones.
137 if (TM.getOptLevel() >= CodeGenOpt::Default) {
138 if (Subtarget.hasSlowDivide32())
139 addBypassSlowDiv(32, 8);
140 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
141 addBypassSlowDiv(64, 32);
144 if (Subtarget.isTargetWindowsMSVC() ||
145 Subtarget.isTargetWindowsItanium()) {
146 // Setup Windows compiler runtime calls.
147 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
148 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
149 setLibcallName(RTLIB::SREM_I64, "_allrem");
150 setLibcallName(RTLIB::UREM_I64, "_aullrem");
151 setLibcallName(RTLIB::MUL_I64, "_allmul");
152 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
153 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
154 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
155 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
156 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
159 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
160 // MSVCRT doesn't have powi; fall back to pow
161 setLibcallName(RTLIB::POWI_F32, nullptr);
162 setLibcallName(RTLIB::POWI_F64, nullptr);
165 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
166 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
167 // FIXME: Should we be limitting the atomic size on other configs? Default is
169 if (!Subtarget.hasCmpxchg8b())
170 setMaxAtomicSizeInBitsSupported(32);
172 // Set up the register classes.
173 addRegisterClass(MVT::i8, &X86::GR8RegClass);
174 addRegisterClass(MVT::i16, &X86::GR16RegClass);
175 addRegisterClass(MVT::i32, &X86::GR32RegClass);
176 if (Subtarget.is64Bit())
177 addRegisterClass(MVT::i64, &X86::GR64RegClass);
179 for (MVT VT : MVT::integer_valuetypes())
180 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
182 // We don't accept any truncstore of integer registers.
183 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
184 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
185 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
186 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
187 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
188 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
190 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
192 // SETOEQ and SETUNE require checking two conditions.
193 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
194 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
195 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
196 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
197 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
198 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
201 if (Subtarget.hasCMov()) {
202 setOperationAction(ISD::ABS , MVT::i16 , Custom);
203 setOperationAction(ISD::ABS , MVT::i32 , Custom);
205 setOperationAction(ISD::ABS , MVT::i64 , Custom);
208 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
209 setOperationAction(ShiftOp , MVT::i16 , Custom);
210 setOperationAction(ShiftOp , MVT::i32 , Custom);
211 if (Subtarget.is64Bit())
212 setOperationAction(ShiftOp , MVT::i64 , Custom);
215 if (!Subtarget.useSoftFloat()) {
216 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
218 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
219 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
220 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
221 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
222 // We have an algorithm for SSE2, and we turn this into a 64-bit
223 // FILD or VCVTUSI2SS/SD for other targets.
224 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
225 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
226 // We have an algorithm for SSE2->double, and we turn this into a
227 // 64-bit FILD followed by conditional FADD for other targets.
228 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
229 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
231 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
233 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
234 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
235 // SSE has no i16 to fp conversion, only i32. We promote in the handler
236 // to allow f80 to use i16 and f64 to use i16 with sse1 only
237 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
238 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
239 // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
240 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
241 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
242 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
243 // are Legal, f80 is custom lowered.
244 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
245 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
247 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
249 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
250 // FIXME: This doesn't generate invalid exception when it should. PR44019.
251 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
252 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
253 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
254 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
255 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
256 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
257 // are Legal, f80 is custom lowered.
258 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
259 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
261 // Handle FP_TO_UINT by promoting the destination to a larger signed
263 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
264 // FIXME: This doesn't generate invalid exception when it should. PR44019.
265 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
266 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
267 // FIXME: This doesn't generate invalid exception when it should. PR44019.
268 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
269 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
270 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
271 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
272 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
275 // Handle address space casts between mixed sized pointers.
276 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
277 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
279 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
280 if (!X86ScalarSSEf64) {
281 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
282 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
283 if (Subtarget.is64Bit()) {
284 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
285 // Without SSE, i64->f64 goes through memory.
286 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
288 } else if (!Subtarget.is64Bit())
289 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
291 // Scalar integer divide and remainder are lowered to use operations that
292 // produce two results, to match the available instructions. This exposes
293 // the two-result form to trivial CSE, which is able to combine x/y and x%y
294 // into a single instruction.
296 // Scalar integer multiply-high is also lowered to use two-result
297 // operations, to match the available instructions. However, plain multiply
298 // (low) operations are left as Legal, as there are single-result
299 // instructions for this in x86. Using the two-result multiply instructions
300 // when both high and low results are needed must be arranged by dagcombine.
301 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
302 setOperationAction(ISD::MULHS, VT, Expand);
303 setOperationAction(ISD::MULHU, VT, Expand);
304 setOperationAction(ISD::SDIV, VT, Expand);
305 setOperationAction(ISD::UDIV, VT, Expand);
306 setOperationAction(ISD::SREM, VT, Expand);
307 setOperationAction(ISD::UREM, VT, Expand);
310 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
311 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
312 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
313 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
314 setOperationAction(ISD::BR_CC, VT, Expand);
315 setOperationAction(ISD::SELECT_CC, VT, Expand);
317 if (Subtarget.is64Bit())
318 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
319 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
320 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
321 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
323 setOperationAction(ISD::FREM , MVT::f32 , Expand);
324 setOperationAction(ISD::FREM , MVT::f64 , Expand);
325 setOperationAction(ISD::FREM , MVT::f80 , Expand);
326 setOperationAction(ISD::FREM , MVT::f128 , Expand);
327 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
329 // Promote the i8 variants and force them on up to i32 which has a shorter
331 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
332 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
333 if (!Subtarget.hasBMI()) {
334 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
335 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
336 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
337 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
338 if (Subtarget.is64Bit()) {
339 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
340 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
344 if (Subtarget.hasLZCNT()) {
345 // When promoting the i8 variants, force them to i32 for a shorter
347 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
348 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
350 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
351 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
352 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
353 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
354 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
355 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
356 if (Subtarget.is64Bit()) {
357 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
358 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
362 // Special handling for half-precision floating point conversions.
363 // If we don't have F16C support, then lower half float conversions
364 // into library calls.
365 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
366 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
367 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
370 // There's never any support for operations beyond MVT::f32.
371 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
372 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
373 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
374 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
375 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
376 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
378 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
379 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
380 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
381 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
382 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
383 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
384 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
385 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
387 if (Subtarget.hasPOPCNT()) {
388 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
390 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
391 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
392 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
393 if (Subtarget.is64Bit())
394 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
396 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
399 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
401 if (!Subtarget.hasMOVBE())
402 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
404 // X86 wants to expand cmov itself.
405 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
406 setOperationAction(ISD::SELECT, VT, Custom);
407 setOperationAction(ISD::SETCC, VT, Custom);
408 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
409 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
411 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
412 if (VT == MVT::i64 && !Subtarget.is64Bit())
414 setOperationAction(ISD::SELECT, VT, Custom);
415 setOperationAction(ISD::SETCC, VT, Custom);
418 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
419 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
420 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
422 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
423 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
424 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
425 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
426 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
427 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
428 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
429 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
432 for (auto VT : { MVT::i32, MVT::i64 }) {
433 if (VT == MVT::i64 && !Subtarget.is64Bit())
435 setOperationAction(ISD::ConstantPool , VT, Custom);
436 setOperationAction(ISD::JumpTable , VT, Custom);
437 setOperationAction(ISD::GlobalAddress , VT, Custom);
438 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
439 setOperationAction(ISD::ExternalSymbol , VT, Custom);
440 setOperationAction(ISD::BlockAddress , VT, Custom);
443 // 64-bit shl, sra, srl (iff 32-bit x86)
444 for (auto VT : { MVT::i32, MVT::i64 }) {
445 if (VT == MVT::i64 && !Subtarget.is64Bit())
447 setOperationAction(ISD::SHL_PARTS, VT, Custom);
448 setOperationAction(ISD::SRA_PARTS, VT, Custom);
449 setOperationAction(ISD::SRL_PARTS, VT, Custom);
452 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
453 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
455 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
457 // Expand certain atomics
458 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
459 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
460 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
461 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
462 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
463 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
464 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
465 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
468 if (!Subtarget.is64Bit())
469 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
471 if (Subtarget.hasCmpxchg16b()) {
472 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
475 // FIXME - use subtarget debug flags
476 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
477 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
478 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
479 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
482 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
483 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
485 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
486 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
488 setOperationAction(ISD::TRAP, MVT::Other, Legal);
489 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
491 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
492 setOperationAction(ISD::VASTART , MVT::Other, Custom);
493 setOperationAction(ISD::VAEND , MVT::Other, Expand);
494 bool Is64Bit = Subtarget.is64Bit();
495 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
496 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
498 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
499 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
501 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
503 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
504 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
505 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
507 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
508 // f32 and f64 use SSE.
509 // Set up the FP register classes.
510 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
511 : &X86::FR32RegClass);
512 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
513 : &X86::FR64RegClass);
515 // Disable f32->f64 extload as we can only generate this in one instruction
516 // under optsize. So its easier to pattern match (fpext (load)) for that
517 // case instead of needing to emit 2 instructions for extload in the
519 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
521 for (auto VT : { MVT::f32, MVT::f64 }) {
522 // Use ANDPD to simulate FABS.
523 setOperationAction(ISD::FABS, VT, Custom);
525 // Use XORP to simulate FNEG.
526 setOperationAction(ISD::FNEG, VT, Custom);
528 // Use ANDPD and ORPD to simulate FCOPYSIGN.
529 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
531 // These might be better off as horizontal vector ops.
532 setOperationAction(ISD::FADD, VT, Custom);
533 setOperationAction(ISD::FSUB, VT, Custom);
535 // We don't support sin/cos/fmod
536 setOperationAction(ISD::FSIN , VT, Expand);
537 setOperationAction(ISD::FCOS , VT, Expand);
538 setOperationAction(ISD::FSINCOS, VT, Expand);
541 // Lower this to MOVMSK plus an AND.
542 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
543 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
545 } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
546 // Use SSE for f32, x87 for f64.
547 // Set up the FP register classes.
548 addRegisterClass(MVT::f32, &X86::FR32RegClass);
550 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
552 // Use ANDPS to simulate FABS.
553 setOperationAction(ISD::FABS , MVT::f32, Custom);
555 // Use XORP to simulate FNEG.
556 setOperationAction(ISD::FNEG , MVT::f32, Custom);
559 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
561 // Use ANDPS and ORPS to simulate FCOPYSIGN.
563 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
564 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
566 // We don't support sin/cos/fmod
567 setOperationAction(ISD::FSIN , MVT::f32, Expand);
568 setOperationAction(ISD::FCOS , MVT::f32, Expand);
569 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
572 // Always expand sin/cos functions even though x87 has an instruction.
573 setOperationAction(ISD::FSIN, MVT::f64, Expand);
574 setOperationAction(ISD::FCOS, MVT::f64, Expand);
575 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
578 // f32 and f64 in x87.
579 // Set up the FP register classes.
580 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
581 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
583 for (auto VT : { MVT::f32, MVT::f64 }) {
584 setOperationAction(ISD::UNDEF, VT, Expand);
585 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
587 // Always expand sin/cos functions even though x87 has an instruction.
588 setOperationAction(ISD::FSIN , VT, Expand);
589 setOperationAction(ISD::FCOS , VT, Expand);
590 setOperationAction(ISD::FSINCOS, VT, Expand);
594 // Expand FP32 immediates into loads from the stack, save special cases.
595 if (isTypeLegal(MVT::f32)) {
596 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
597 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
598 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
599 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
600 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
601 } else // SSE immediates.
602 addLegalFPImmediate(APFloat(+0.0f)); // xorps
604 // Expand FP64 immediates into loads from the stack, save special cases.
605 if (isTypeLegal(MVT::f64)) {
606 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
607 addLegalFPImmediate(APFloat(+0.0)); // FLD0
608 addLegalFPImmediate(APFloat(+1.0)); // FLD1
609 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
610 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
611 } else // SSE immediates.
612 addLegalFPImmediate(APFloat(+0.0)); // xorpd
614 // Handle constrained floating-point operations of scalar.
615 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
616 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
617 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
618 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
619 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
620 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
621 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
622 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
623 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
624 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
625 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
626 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
627 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
629 // We don't support FMA.
630 setOperationAction(ISD::FMA, MVT::f64, Expand);
631 setOperationAction(ISD::FMA, MVT::f32, Expand);
633 // f80 always uses X87.
635 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
636 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
637 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
639 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
640 addLegalFPImmediate(TmpFlt); // FLD0
642 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
645 APFloat TmpFlt2(+1.0);
646 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
648 addLegalFPImmediate(TmpFlt2); // FLD1
649 TmpFlt2.changeSign();
650 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
653 // Always expand sin/cos functions even though x87 has an instruction.
654 setOperationAction(ISD::FSIN , MVT::f80, Expand);
655 setOperationAction(ISD::FCOS , MVT::f80, Expand);
656 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
658 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
659 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
660 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
661 setOperationAction(ISD::FRINT, MVT::f80, Expand);
662 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
663 setOperationAction(ISD::FMA, MVT::f80, Expand);
664 setOperationAction(ISD::LROUND, MVT::f80, Expand);
665 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
666 setOperationAction(ISD::LRINT, MVT::f80, Expand);
667 setOperationAction(ISD::LLRINT, MVT::f80, Expand);
669 // Handle constrained floating-point operations of scalar.
670 setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
671 setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal);
672 setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal);
673 setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal);
674 setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal);
675 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
676 // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
678 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
681 // f128 uses xmm registers, but most operations require libcalls.
682 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
683 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
684 : &X86::VR128RegClass);
686 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
688 setOperationAction(ISD::FADD, MVT::f128, LibCall);
689 setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
690 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
691 setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
692 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
693 setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
694 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
695 setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
696 setOperationAction(ISD::FMA, MVT::f128, LibCall);
697 setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall);
699 setOperationAction(ISD::FABS, MVT::f128, Custom);
700 setOperationAction(ISD::FNEG, MVT::f128, Custom);
701 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
703 setOperationAction(ISD::FSIN, MVT::f128, LibCall);
704 setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall);
705 setOperationAction(ISD::FCOS, MVT::f128, LibCall);
706 setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall);
707 setOperationAction(ISD::FSINCOS, MVT::f128, LibCall);
709 setOperationAction(ISD::FSQRT, MVT::f128, LibCall);
710 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
712 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
713 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
714 // We need to custom handle any FP_ROUND with an f128 input, but
715 // LegalizeDAG uses the result type to know when to run a custom handler.
716 // So we have to list all legal floating point result types here.
717 if (isTypeLegal(MVT::f32)) {
718 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
719 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
721 if (isTypeLegal(MVT::f64)) {
722 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
723 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
725 if (isTypeLegal(MVT::f80)) {
726 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
727 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
730 setOperationAction(ISD::SETCC, MVT::f128, Custom);
732 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
733 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
734 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
735 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
736 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
737 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
740 // Always use a library call for pow.
741 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
742 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
743 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
744 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
746 setOperationAction(ISD::FLOG, MVT::f80, Expand);
747 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
748 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
749 setOperationAction(ISD::FEXP, MVT::f80, Expand);
750 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
751 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
752 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
754 // Some FP actions are always expanded for vector types.
755 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
756 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
757 setOperationAction(ISD::FSIN, VT, Expand);
758 setOperationAction(ISD::FSINCOS, VT, Expand);
759 setOperationAction(ISD::FCOS, VT, Expand);
760 setOperationAction(ISD::FREM, VT, Expand);
761 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
762 setOperationAction(ISD::FPOW, VT, Expand);
763 setOperationAction(ISD::FLOG, VT, Expand);
764 setOperationAction(ISD::FLOG2, VT, Expand);
765 setOperationAction(ISD::FLOG10, VT, Expand);
766 setOperationAction(ISD::FEXP, VT, Expand);
767 setOperationAction(ISD::FEXP2, VT, Expand);
770 // First set operation action for all vector types to either promote
771 // (for widening) or expand (for scalarization). Then we will selectively
772 // turn on ones that can be effectively codegen'd.
773 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
774 setOperationAction(ISD::SDIV, VT, Expand);
775 setOperationAction(ISD::UDIV, VT, Expand);
776 setOperationAction(ISD::SREM, VT, Expand);
777 setOperationAction(ISD::UREM, VT, Expand);
778 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
779 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
780 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
781 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
782 setOperationAction(ISD::FMA, VT, Expand);
783 setOperationAction(ISD::FFLOOR, VT, Expand);
784 setOperationAction(ISD::FCEIL, VT, Expand);
785 setOperationAction(ISD::FTRUNC, VT, Expand);
786 setOperationAction(ISD::FRINT, VT, Expand);
787 setOperationAction(ISD::FNEARBYINT, VT, Expand);
788 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
789 setOperationAction(ISD::MULHS, VT, Expand);
790 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
791 setOperationAction(ISD::MULHU, VT, Expand);
792 setOperationAction(ISD::SDIVREM, VT, Expand);
793 setOperationAction(ISD::UDIVREM, VT, Expand);
794 setOperationAction(ISD::CTPOP, VT, Expand);
795 setOperationAction(ISD::CTTZ, VT, Expand);
796 setOperationAction(ISD::CTLZ, VT, Expand);
797 setOperationAction(ISD::ROTL, VT, Expand);
798 setOperationAction(ISD::ROTR, VT, Expand);
799 setOperationAction(ISD::BSWAP, VT, Expand);
800 setOperationAction(ISD::SETCC, VT, Expand);
801 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
802 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
803 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
804 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
805 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
806 setOperationAction(ISD::TRUNCATE, VT, Expand);
807 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
808 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
809 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
810 setOperationAction(ISD::SELECT_CC, VT, Expand);
811 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
812 setTruncStoreAction(InnerVT, VT, Expand);
814 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
815 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
817 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
818 // types, we have to deal with them whether we ask for Expansion or not.
819 // Setting Expand causes its own optimisation problems though, so leave
821 if (VT.getVectorElementType() == MVT::i1)
822 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
824 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
825 // split/scalarized right now.
826 if (VT.getVectorElementType() == MVT::f16)
827 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
831 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
832 // with -msoft-float, disable use of MMX as well.
833 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
834 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
835 // No operations on x86mmx supported, everything uses intrinsics.
838 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
839 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
840 : &X86::VR128RegClass);
842 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
843 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
844 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
845 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
846 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
847 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
848 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
849 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
851 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
852 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
854 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
855 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
856 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
857 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
858 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
861 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
862 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
863 : &X86::VR128RegClass);
865 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
866 // registers cannot be used even for integer operations.
867 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
868 : &X86::VR128RegClass);
869 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
870 : &X86::VR128RegClass);
871 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
872 : &X86::VR128RegClass);
873 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
874 : &X86::VR128RegClass);
876 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
877 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
878 setOperationAction(ISD::SDIV, VT, Custom);
879 setOperationAction(ISD::SREM, VT, Custom);
880 setOperationAction(ISD::UDIV, VT, Custom);
881 setOperationAction(ISD::UREM, VT, Custom);
884 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
885 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
886 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
888 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
889 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
890 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
891 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
892 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
893 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
894 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
895 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
896 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
897 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
898 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
899 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
900 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
902 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
903 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
904 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
905 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
906 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
909 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
910 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
911 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
912 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
913 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
914 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
915 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
916 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
917 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
918 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
919 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
920 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
922 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
923 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
924 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
926 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
927 setOperationAction(ISD::SETCC, VT, Custom);
928 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
929 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
930 setOperationAction(ISD::CTPOP, VT, Custom);
931 setOperationAction(ISD::ABS, VT, Custom);
933 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
934 // setcc all the way to isel and prefer SETGT in some isel patterns.
935 setCondCodeAction(ISD::SETLT, VT, Custom);
936 setCondCodeAction(ISD::SETLE, VT, Custom);
939 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
940 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
941 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
942 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
943 setOperationAction(ISD::VSELECT, VT, Custom);
944 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
947 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
948 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
949 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
950 setOperationAction(ISD::VSELECT, VT, Custom);
952 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
955 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
956 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
959 // Custom lower v2i64 and v2f64 selects.
960 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
961 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
962 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
963 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
964 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
966 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
967 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
968 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
969 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom);
971 // Custom legalize these to avoid over promotion or custom promotion.
972 for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
973 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
974 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
975 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
976 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
979 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
980 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
981 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
982 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom);
984 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
985 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom);
987 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
988 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom);
990 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
991 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
992 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom);
993 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
994 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom);
996 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
997 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom);
998 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
999 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom);
1001 // We want to legalize this to an f64 load rather than an i64 load on
1002 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1004 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
1005 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1006 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
1007 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
1008 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
1009 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
1011 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1012 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1013 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1014 if (!Subtarget.hasAVX512())
1015 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1017 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1018 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1019 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1021 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1023 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
1024 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
1025 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1026 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1027 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1028 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1030 // In the customized shift lowering, the legal v4i32/v2i64 cases
1031 // in AVX2 will be recognized.
1032 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1033 setOperationAction(ISD::SRL, VT, Custom);
1034 setOperationAction(ISD::SHL, VT, Custom);
1035 setOperationAction(ISD::SRA, VT, Custom);
1038 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1039 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1041 // With AVX512, expanding (and promoting the shifts) is better.
1042 if (!Subtarget.hasAVX512())
1043 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1045 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1046 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1047 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1048 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1049 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1052 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1053 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1054 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1055 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1056 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1057 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1058 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1059 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1060 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1062 // These might be better off as horizontal vector ops.
1063 setOperationAction(ISD::ADD, MVT::i16, Custom);
1064 setOperationAction(ISD::ADD, MVT::i32, Custom);
1065 setOperationAction(ISD::SUB, MVT::i16, Custom);
1066 setOperationAction(ISD::SUB, MVT::i32, Custom);
1069 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1070 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1071 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1072 setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal);
1073 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1074 setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal);
1075 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1076 setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal);
1077 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1078 setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
1079 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1080 setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
1083 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1084 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1085 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1086 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1087 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1088 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1089 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1090 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1092 // FIXME: Do we need to handle scalar-to-vector here?
1093 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1095 // We directly match byte blends in the backend as they match the VSELECT
1097 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1099 // SSE41 brings specific instructions for doing vector sign extend even in
1100 // cases where we don't have SRA.
1101 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1102 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1103 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1106 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1107 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1108 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1109 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1110 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1111 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1112 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1113 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1116 // i8 vectors are custom because the source register and source
1117 // source memory operand types are not the same width.
1118 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1120 if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1121 // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1122 // do the pre and post work in the vector domain.
1123 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom);
1124 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1125 // We need to mark SINT_TO_FP as Custom even though we want to expand it
1126 // so that DAG combine doesn't try to turn it into uint_to_fp.
1127 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom);
1128 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1132 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1133 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1134 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1135 setOperationAction(ISD::ROTL, VT, Custom);
1137 // XOP can efficiently perform BITREVERSE with VPPERM.
1138 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1139 setOperationAction(ISD::BITREVERSE, VT, Custom);
1141 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1142 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1143 setOperationAction(ISD::BITREVERSE, VT, Custom);
1146 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1147 bool HasInt256 = Subtarget.hasInt256();
1149 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1150 : &X86::VR256RegClass);
1151 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1152 : &X86::VR256RegClass);
1153 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1154 : &X86::VR256RegClass);
1155 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1156 : &X86::VR256RegClass);
1157 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1158 : &X86::VR256RegClass);
1159 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1160 : &X86::VR256RegClass);
1162 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1163 setOperationAction(ISD::FFLOOR, VT, Legal);
1164 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1165 setOperationAction(ISD::FCEIL, VT, Legal);
1166 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1167 setOperationAction(ISD::FTRUNC, VT, Legal);
1168 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1169 setOperationAction(ISD::FRINT, VT, Legal);
1170 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1171 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1172 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1173 setOperationAction(ISD::FNEG, VT, Custom);
1174 setOperationAction(ISD::FABS, VT, Custom);
1175 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1178 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1179 // even though v8i16 is a legal type.
1180 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1181 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1182 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1183 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1184 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1185 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i32, Legal);
1187 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1188 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Legal);
1190 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal);
1191 setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal);
1192 setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal);
1193 setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal);
1194 setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal);
1195 setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal);
1196 setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal);
1197 setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal);
1198 setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal);
1199 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal);
1200 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal);
1201 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal);
1203 if (!Subtarget.hasAVX512())
1204 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1206 // In the customized shift lowering, the legal v8i32/v4i64 cases
1207 // in AVX2 will be recognized.
1208 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1209 setOperationAction(ISD::SRL, VT, Custom);
1210 setOperationAction(ISD::SHL, VT, Custom);
1211 setOperationAction(ISD::SRA, VT, Custom);
1214 // These types need custom splitting if their input is a 128-bit vector.
1215 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1216 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1217 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1218 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1220 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1221 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1223 // With BWI, expanding (and promoting the shifts) is the better.
1224 if (!Subtarget.hasBWI())
1225 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1227 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1228 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1229 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1230 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1232 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1234 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1235 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1236 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1237 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1240 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1241 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1242 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1243 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1245 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1246 setOperationAction(ISD::SETCC, VT, Custom);
1247 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1248 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1249 setOperationAction(ISD::CTPOP, VT, Custom);
1250 setOperationAction(ISD::CTLZ, VT, Custom);
1252 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1253 // setcc all the way to isel and prefer SETGT in some isel patterns.
1254 setCondCodeAction(ISD::SETLT, VT, Custom);
1255 setCondCodeAction(ISD::SETLE, VT, Custom);
1258 if (Subtarget.hasAnyFMA()) {
1259 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1260 MVT::v2f64, MVT::v4f64 }) {
1261 setOperationAction(ISD::FMA, VT, Legal);
1262 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1266 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1267 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1268 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1271 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1272 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1273 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1274 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1276 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1277 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1278 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1279 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1280 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1281 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1283 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1284 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1285 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1286 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1287 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1289 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1290 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1291 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1292 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1293 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1294 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1295 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1296 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1298 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1299 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1300 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1301 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1302 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1303 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1306 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1307 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1308 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1312 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1313 // when we have a 256bit-wide blend with immediate.
1314 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1315 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1317 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1318 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1319 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1320 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1321 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1322 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1323 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1324 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1328 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1329 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1330 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1331 setOperationAction(ISD::MSTORE, VT, Legal);
1334 // Extract subvector is special because the value type
1335 // (result) is 128-bit but the source is 256-bit wide.
1336 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1337 MVT::v4f32, MVT::v2f64 }) {
1338 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1341 // Custom lower several nodes for 256-bit types.
1342 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1343 MVT::v8f32, MVT::v4f64 }) {
1344 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1345 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1346 setOperationAction(ISD::VSELECT, VT, Custom);
1347 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1348 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1349 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1350 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1351 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1352 setOperationAction(ISD::STORE, VT, Custom);
1356 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1358 // Custom legalize 2x32 to get a little better code.
1359 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1360 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1362 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1363 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1364 setOperationAction(ISD::MGATHER, VT, Custom);
1368 // This block controls legalization of the mask vector sizes that are
1369 // available with AVX512. 512-bit vectors are in a separate block controlled
1370 // by useAVX512Regs.
1371 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1372 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1373 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1374 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1375 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1376 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1378 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1379 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1380 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1382 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1383 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1384 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1385 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1386 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1387 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1388 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1389 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1390 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1391 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1392 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
1393 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
1395 // There is no byte sized k-register load or store without AVX512DQ.
1396 if (!Subtarget.hasDQI()) {
1397 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1398 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1399 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1400 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1402 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1403 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1404 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1405 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1408 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1409 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1410 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1411 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1412 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1415 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1416 setOperationAction(ISD::ADD, VT, Custom);
1417 setOperationAction(ISD::SUB, VT, Custom);
1418 setOperationAction(ISD::MUL, VT, Custom);
1419 setOperationAction(ISD::SETCC, VT, Custom);
1420 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1421 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1422 setOperationAction(ISD::SELECT, VT, Custom);
1423 setOperationAction(ISD::TRUNCATE, VT, Custom);
1424 setOperationAction(ISD::UADDSAT, VT, Custom);
1425 setOperationAction(ISD::SADDSAT, VT, Custom);
1426 setOperationAction(ISD::USUBSAT, VT, Custom);
1427 setOperationAction(ISD::SSUBSAT, VT, Custom);
1429 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1430 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1431 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1432 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1433 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1434 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1435 setOperationAction(ISD::VSELECT, VT, Expand);
1438 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1439 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1442 // This block controls legalization for 512-bit operations with 32/64 bit
1443 // elements. 512-bits can be disabled based on prefer-vector-width and
1444 // required-vector-width function attributes.
1445 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1446 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1447 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1448 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1449 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1451 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1452 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1453 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1454 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1455 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1456 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1459 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1460 setOperationAction(ISD::FNEG, VT, Custom);
1461 setOperationAction(ISD::FABS, VT, Custom);
1462 setOperationAction(ISD::FMA, VT, Legal);
1463 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1464 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1467 for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1468 setOperationPromotedToType(ISD::FP_TO_SINT , VT, MVT::v16i32);
1469 setOperationPromotedToType(ISD::FP_TO_UINT , VT, MVT::v16i32);
1470 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1471 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1473 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1474 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1475 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal);
1476 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal);
1477 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1478 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1479 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal);
1480 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal);
1482 setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal);
1483 setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal);
1484 setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal);
1485 setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal);
1486 setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal);
1487 setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal);
1488 setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal);
1489 setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal);
1490 setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal);
1491 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal);
1492 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal);
1493 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal);
1495 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1496 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1497 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1498 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1499 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1501 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1502 // to 512-bit rather than use the AVX2 instructions so that we can use
1504 if (!Subtarget.hasVLX()) {
1505 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1506 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1507 setOperationAction(ISD::MLOAD, VT, Custom);
1508 setOperationAction(ISD::MSTORE, VT, Custom);
1512 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1513 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1514 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1515 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1516 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1517 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1518 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1519 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1521 // Need to custom widen this if we don't have AVX512BW.
1522 setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom);
1523 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom);
1524 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom);
1526 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1527 setOperationAction(ISD::FFLOOR, VT, Legal);
1528 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1529 setOperationAction(ISD::FCEIL, VT, Legal);
1530 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1531 setOperationAction(ISD::FTRUNC, VT, Legal);
1532 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1533 setOperationAction(ISD::FRINT, VT, Legal);
1534 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1535 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1536 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1538 setOperationAction(ISD::SELECT, VT, Custom);
1541 // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1542 for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1543 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1544 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1547 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1548 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1549 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1550 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1552 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1553 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1555 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1556 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1558 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1559 setOperationAction(ISD::SMAX, VT, Legal);
1560 setOperationAction(ISD::UMAX, VT, Legal);
1561 setOperationAction(ISD::SMIN, VT, Legal);
1562 setOperationAction(ISD::UMIN, VT, Legal);
1563 setOperationAction(ISD::ABS, VT, Legal);
1564 setOperationAction(ISD::SRL, VT, Custom);
1565 setOperationAction(ISD::SHL, VT, Custom);
1566 setOperationAction(ISD::SRA, VT, Custom);
1567 setOperationAction(ISD::CTPOP, VT, Custom);
1568 setOperationAction(ISD::ROTL, VT, Custom);
1569 setOperationAction(ISD::ROTR, VT, Custom);
1570 setOperationAction(ISD::SETCC, VT, Custom);
1571 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1572 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1573 setOperationAction(ISD::SELECT, VT, Custom);
1575 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1576 // setcc all the way to isel and prefer SETGT in some isel patterns.
1577 setCondCodeAction(ISD::SETLT, VT, Custom);
1578 setCondCodeAction(ISD::SETLE, VT, Custom);
1581 if (Subtarget.hasDQI()) {
1582 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1583 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1584 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal);
1585 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal);
1586 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1587 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1588 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal);
1589 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal);
1591 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1594 if (Subtarget.hasCDI()) {
1595 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1596 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1597 setOperationAction(ISD::CTLZ, VT, Legal);
1599 } // Subtarget.hasCDI()
1601 if (Subtarget.hasVPOPCNTDQ()) {
1602 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1603 setOperationAction(ISD::CTPOP, VT, Legal);
1606 // Extract subvector is special because the value type
1607 // (result) is 256-bit but the source is 512-bit wide.
1608 // 128-bit was made Legal under AVX1.
1609 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1610 MVT::v8f32, MVT::v4f64 })
1611 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1613 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1614 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1615 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1616 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1617 setOperationAction(ISD::VSELECT, VT, Custom);
1618 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1619 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1620 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1621 setOperationAction(ISD::MLOAD, VT, Legal);
1622 setOperationAction(ISD::MSTORE, VT, Legal);
1623 setOperationAction(ISD::MGATHER, VT, Custom);
1624 setOperationAction(ISD::MSCATTER, VT, Custom);
1626 if (!Subtarget.hasBWI()) {
1627 // Need to custom split v32i16/v64i8 bitcasts.
1628 setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1629 setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
1631 // Better to split these into two 256-bit ops.
1632 setOperationAction(ISD::BITREVERSE, MVT::v8i64, Custom);
1633 setOperationAction(ISD::BITREVERSE, MVT::v16i32, Custom);
1636 if (Subtarget.hasVBMI2()) {
1637 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1638 setOperationAction(ISD::FSHL, VT, Custom);
1639 setOperationAction(ISD::FSHR, VT, Custom);
1644 // This block controls legalization for operations that don't have
1645 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1647 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1648 // These operations are handled on non-VLX by artificially widening in
1651 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32,
1652 Subtarget.hasVLX() ? Legal : Custom);
1653 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32,
1654 Subtarget.hasVLX() ? Legal : Custom);
1655 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1656 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32,
1657 Subtarget.hasVLX() ? Legal : Custom);
1658 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32,
1659 Subtarget.hasVLX() ? Legal : Custom);
1660 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom);
1661 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32,
1662 Subtarget.hasVLX() ? Legal : Custom);
1663 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32,
1664 Subtarget.hasVLX() ? Legal : Custom);
1665 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32,
1666 Subtarget.hasVLX() ? Legal : Custom);
1667 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32,
1668 Subtarget.hasVLX() ? Legal : Custom);
1670 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1671 setOperationAction(ISD::SMAX, VT, Legal);
1672 setOperationAction(ISD::UMAX, VT, Legal);
1673 setOperationAction(ISD::SMIN, VT, Legal);
1674 setOperationAction(ISD::UMIN, VT, Legal);
1675 setOperationAction(ISD::ABS, VT, Legal);
1678 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1679 setOperationAction(ISD::ROTL, VT, Custom);
1680 setOperationAction(ISD::ROTR, VT, Custom);
1683 // Custom legalize 2x32 to get a little better code.
1684 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1685 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1687 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1688 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1689 setOperationAction(ISD::MSCATTER, VT, Custom);
1691 if (Subtarget.hasDQI()) {
1692 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1693 setOperationAction(ISD::SINT_TO_FP, VT,
1694 Subtarget.hasVLX() ? Legal : Custom);
1695 setOperationAction(ISD::UINT_TO_FP, VT,
1696 Subtarget.hasVLX() ? Legal : Custom);
1697 setOperationAction(ISD::STRICT_SINT_TO_FP, VT,
1698 Subtarget.hasVLX() ? Legal : Custom);
1699 setOperationAction(ISD::STRICT_UINT_TO_FP, VT,
1700 Subtarget.hasVLX() ? Legal : Custom);
1701 setOperationAction(ISD::FP_TO_SINT, VT,
1702 Subtarget.hasVLX() ? Legal : Custom);
1703 setOperationAction(ISD::FP_TO_UINT, VT,
1704 Subtarget.hasVLX() ? Legal : Custom);
1705 setOperationAction(ISD::STRICT_FP_TO_SINT, VT,
1706 Subtarget.hasVLX() ? Legal : Custom);
1707 setOperationAction(ISD::STRICT_FP_TO_UINT, VT,
1708 Subtarget.hasVLX() ? Legal : Custom);
1709 setOperationAction(ISD::MUL, VT, Legal);
1713 if (Subtarget.hasCDI()) {
1714 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1715 setOperationAction(ISD::CTLZ, VT, Legal);
1717 } // Subtarget.hasCDI()
1719 if (Subtarget.hasVPOPCNTDQ()) {
1720 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1721 setOperationAction(ISD::CTPOP, VT, Legal);
1725 // This block control legalization of v32i1/v64i1 which are available with
1726 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1728 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1729 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1730 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1732 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1733 setOperationAction(ISD::ADD, VT, Custom);
1734 setOperationAction(ISD::SUB, VT, Custom);
1735 setOperationAction(ISD::MUL, VT, Custom);
1736 setOperationAction(ISD::VSELECT, VT, Expand);
1737 setOperationAction(ISD::UADDSAT, VT, Custom);
1738 setOperationAction(ISD::SADDSAT, VT, Custom);
1739 setOperationAction(ISD::USUBSAT, VT, Custom);
1740 setOperationAction(ISD::SSUBSAT, VT, Custom);
1742 setOperationAction(ISD::TRUNCATE, VT, Custom);
1743 setOperationAction(ISD::SETCC, VT, Custom);
1744 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1745 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1746 setOperationAction(ISD::SELECT, VT, Custom);
1747 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1748 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1751 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1752 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1753 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1754 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1755 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1756 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1758 // Extends from v32i1 masks to 256-bit vectors.
1759 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1760 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1761 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1764 // This block controls legalization for v32i16 and v64i8. 512-bits can be
1765 // disabled based on prefer-vector-width and required-vector-width function
1767 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1768 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1769 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1771 // Extends from v64i1 masks to 512-bit vectors.
1772 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1773 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1774 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1776 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1777 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1778 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1779 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1780 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1781 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1782 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1783 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1784 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
1785 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
1786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1787 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1788 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1789 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1790 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1791 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1792 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1793 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1794 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1795 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1796 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1797 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1798 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1800 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1801 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1803 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1805 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1806 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1807 setOperationAction(ISD::VSELECT, VT, Custom);
1808 setOperationAction(ISD::ABS, VT, Legal);
1809 setOperationAction(ISD::SRL, VT, Custom);
1810 setOperationAction(ISD::SHL, VT, Custom);
1811 setOperationAction(ISD::SRA, VT, Custom);
1812 setOperationAction(ISD::MLOAD, VT, Legal);
1813 setOperationAction(ISD::MSTORE, VT, Legal);
1814 setOperationAction(ISD::CTPOP, VT, Custom);
1815 setOperationAction(ISD::CTLZ, VT, Custom);
1816 setOperationAction(ISD::SMAX, VT, Legal);
1817 setOperationAction(ISD::UMAX, VT, Legal);
1818 setOperationAction(ISD::SMIN, VT, Legal);
1819 setOperationAction(ISD::UMIN, VT, Legal);
1820 setOperationAction(ISD::SETCC, VT, Custom);
1821 setOperationAction(ISD::UADDSAT, VT, Legal);
1822 setOperationAction(ISD::SADDSAT, VT, Legal);
1823 setOperationAction(ISD::USUBSAT, VT, Legal);
1824 setOperationAction(ISD::SSUBSAT, VT, Legal);
1825 setOperationAction(ISD::SELECT, VT, Custom);
1827 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1828 // setcc all the way to isel and prefer SETGT in some isel patterns.
1829 setCondCodeAction(ISD::SETLT, VT, Custom);
1830 setCondCodeAction(ISD::SETLE, VT, Custom);
1833 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1834 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1837 if (Subtarget.hasBITALG()) {
1838 for (auto VT : { MVT::v64i8, MVT::v32i16 })
1839 setOperationAction(ISD::CTPOP, VT, Legal);
1842 if (Subtarget.hasVBMI2()) {
1843 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1844 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1848 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1849 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1850 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1851 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1854 // These operations are handled on non-VLX by artificially widening in
1856 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1858 if (Subtarget.hasBITALG()) {
1859 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1860 setOperationAction(ISD::CTPOP, VT, Legal);
1864 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1865 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1866 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1867 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1868 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1869 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1871 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1872 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1873 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1874 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1875 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1877 if (Subtarget.hasDQI()) {
1878 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1879 // v2f32 UINT_TO_FP is already custom under SSE2.
1880 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1881 isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1882 "Unexpected operation action!");
1883 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1884 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1885 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1886 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1887 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1890 if (Subtarget.hasBWI()) {
1891 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1892 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1895 if (Subtarget.hasVBMI2()) {
1896 // TODO: Make these legal even without VLX?
1897 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1898 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1899 setOperationAction(ISD::FSHL, VT, Custom);
1900 setOperationAction(ISD::FSHR, VT, Custom);
1904 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1905 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1906 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1909 // We want to custom lower some of our intrinsics.
1910 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1911 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1912 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1913 if (!Subtarget.is64Bit()) {
1914 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1917 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1918 // handle type legalization for these operations here.
1920 // FIXME: We really should do custom legalization for addition and
1921 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1922 // than generic legalization for 64-bit multiplication-with-overflow, though.
1923 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1924 if (VT == MVT::i64 && !Subtarget.is64Bit())
1926 // Add/Sub/Mul with overflow operations are custom lowered.
1927 setOperationAction(ISD::SADDO, VT, Custom);
1928 setOperationAction(ISD::UADDO, VT, Custom);
1929 setOperationAction(ISD::SSUBO, VT, Custom);
1930 setOperationAction(ISD::USUBO, VT, Custom);
1931 setOperationAction(ISD::SMULO, VT, Custom);
1932 setOperationAction(ISD::UMULO, VT, Custom);
1934 // Support carry in as value rather than glue.
1935 setOperationAction(ISD::ADDCARRY, VT, Custom);
1936 setOperationAction(ISD::SUBCARRY, VT, Custom);
1937 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1940 if (!Subtarget.is64Bit()) {
1941 // These libcalls are not available in 32-bit.
1942 setLibcallName(RTLIB::SHL_I128, nullptr);
1943 setLibcallName(RTLIB::SRL_I128, nullptr);
1944 setLibcallName(RTLIB::SRA_I128, nullptr);
1945 setLibcallName(RTLIB::MUL_I128, nullptr);
1948 // Combine sin / cos into _sincos_stret if it is available.
1949 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1950 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1951 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1952 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1955 if (Subtarget.isTargetWin64()) {
1956 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1957 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1958 setOperationAction(ISD::SREM, MVT::i128, Custom);
1959 setOperationAction(ISD::UREM, MVT::i128, Custom);
1960 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1961 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1964 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1965 // is. We should promote the value to 64-bits to solve this.
1966 // This is what the CRT headers do - `fmodf` is an inline header
1967 // function casting to f64 and calling `fmod`.
1968 if (Subtarget.is32Bit() &&
1969 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1970 for (ISD::NodeType Op :
1971 {ISD::FCEIL, ISD::STRICT_FCEIL,
1972 ISD::FCOS, ISD::STRICT_FCOS,
1973 ISD::FEXP, ISD::STRICT_FEXP,
1974 ISD::FFLOOR, ISD::STRICT_FFLOOR,
1975 ISD::FREM, ISD::STRICT_FREM,
1976 ISD::FLOG, ISD::STRICT_FLOG,
1977 ISD::FLOG10, ISD::STRICT_FLOG10,
1978 ISD::FPOW, ISD::STRICT_FPOW,
1979 ISD::FSIN, ISD::STRICT_FSIN})
1980 if (isOperationExpand(Op, MVT::f32))
1981 setOperationAction(Op, MVT::f32, Promote);
1983 // We have target-specific dag combine patterns for the following nodes:
1984 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1985 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1986 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1987 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1988 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1989 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1990 setTargetDAGCombine(ISD::BITCAST);
1991 setTargetDAGCombine(ISD::VSELECT);
1992 setTargetDAGCombine(ISD::SELECT);
1993 setTargetDAGCombine(ISD::SHL);
1994 setTargetDAGCombine(ISD::SRA);
1995 setTargetDAGCombine(ISD::SRL);
1996 setTargetDAGCombine(ISD::OR);
1997 setTargetDAGCombine(ISD::AND);
1998 setTargetDAGCombine(ISD::ADD);
1999 setTargetDAGCombine(ISD::FADD);
2000 setTargetDAGCombine(ISD::FSUB);
2001 setTargetDAGCombine(ISD::FNEG);
2002 setTargetDAGCombine(ISD::FMA);
2003 setTargetDAGCombine(ISD::FMINNUM);
2004 setTargetDAGCombine(ISD::FMAXNUM);
2005 setTargetDAGCombine(ISD::SUB);
2006 setTargetDAGCombine(ISD::LOAD);
2007 setTargetDAGCombine(ISD::MLOAD);
2008 setTargetDAGCombine(ISD::STORE);
2009 setTargetDAGCombine(ISD::MSTORE);
2010 setTargetDAGCombine(ISD::TRUNCATE);
2011 setTargetDAGCombine(ISD::ZERO_EXTEND);
2012 setTargetDAGCombine(ISD::ANY_EXTEND);
2013 setTargetDAGCombine(ISD::SIGN_EXTEND);
2014 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
2015 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
2016 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
2017 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
2018 setTargetDAGCombine(ISD::SINT_TO_FP);
2019 setTargetDAGCombine(ISD::UINT_TO_FP);
2020 setTargetDAGCombine(ISD::STRICT_SINT_TO_FP);
2021 setTargetDAGCombine(ISD::STRICT_UINT_TO_FP);
2022 setTargetDAGCombine(ISD::SETCC);
2023 setTargetDAGCombine(ISD::MUL);
2024 setTargetDAGCombine(ISD::XOR);
2025 setTargetDAGCombine(ISD::MSCATTER);
2026 setTargetDAGCombine(ISD::MGATHER);
2028 computeRegisterProperties(Subtarget.getRegisterInfo());
2030 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2031 MaxStoresPerMemsetOptSize = 8;
2032 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2033 MaxStoresPerMemcpyOptSize = 4;
2034 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2035 MaxStoresPerMemmoveOptSize = 4;
2037 // TODO: These control memcmp expansion in CGP and could be raised higher, but
2038 // that needs to benchmarked and balanced with the potential use of vector
2039 // load/store types (PR33329, PR33914).
2040 MaxLoadsPerMemcmp = 2;
2041 MaxLoadsPerMemcmpOptSize = 2;
2043 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
2044 setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
2046 // An out-of-order CPU can speculatively execute past a predictable branch,
2047 // but a conditional move could be stalled by an expensive earlier operation.
2048 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2049 EnableExtLdPromotion = true;
2050 setPrefFunctionAlignment(Align(16));
2052 verifyIntrinsicTables();
2054 // Default to having -disable-strictnode-mutation on
2055 IsStrictFPEnabled = true;
2058 // This has so far only been implemented for 64-bit MachO.
2059 bool X86TargetLowering::useLoadStackGuardNode() const {
2060 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2063 bool X86TargetLowering::useStackGuardXorFP() const {
2064 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2065 return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2068 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2069 const SDLoc &DL) const {
2070 EVT PtrTy = getPointerTy(DAG.getDataLayout());
2071 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2072 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2073 return SDValue(Node, 0);
2076 TargetLoweringBase::LegalizeTypeAction
2077 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2078 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2079 return TypeSplitVector;
2081 if (VT.getVectorNumElements() != 1 &&
2082 VT.getVectorElementType() != MVT::i1)
2083 return TypeWidenVector;
2085 return TargetLoweringBase::getPreferredVectorAction(VT);
2088 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2091 // v32i1 vectors should be promoted to v32i8 to match avx2.
2092 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2094 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2095 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2096 Subtarget.hasAVX512() &&
2097 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2098 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2099 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
2101 // Split v64i1 vectors if we don't have v64i8 available.
2102 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2103 CC != CallingConv::X86_RegCall)
2105 // FIXME: Should we just make these types legal and custom split operations?
2106 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !EnableOldKNLABI &&
2107 Subtarget.useAVX512Regs() && !Subtarget.hasBWI())
2109 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2112 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2115 // v32i1 vectors should be promoted to v32i8 to match avx2.
2116 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2118 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2119 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2120 Subtarget.hasAVX512() &&
2121 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2122 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2123 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
2124 return VT.getVectorNumElements();
2125 // Split v64i1 vectors if we don't have v64i8 available.
2126 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2127 CC != CallingConv::X86_RegCall)
2129 // FIXME: Should we just make these types legal and custom split operations?
2130 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !EnableOldKNLABI &&
2131 Subtarget.useAVX512Regs() && !Subtarget.hasBWI())
2133 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2136 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2137 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2138 unsigned &NumIntermediates, MVT &RegisterVT) const {
2139 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2140 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2141 Subtarget.hasAVX512() &&
2142 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2143 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2144 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) {
2145 RegisterVT = MVT::i8;
2146 IntermediateVT = MVT::i1;
2147 NumIntermediates = VT.getVectorNumElements();
2148 return NumIntermediates;
2151 // Split v64i1 vectors if we don't have v64i8 available.
2152 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2153 CC != CallingConv::X86_RegCall) {
2154 RegisterVT = MVT::v32i1;
2155 IntermediateVT = MVT::v32i1;
2156 NumIntermediates = 2;
2160 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2161 NumIntermediates, RegisterVT);
2164 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2165 LLVMContext& Context,
2170 if (Subtarget.hasAVX512()) {
2171 const unsigned NumElts = VT.getVectorNumElements();
2173 // Figure out what this type will be legalized to.
2175 while (getTypeAction(Context, LegalVT) != TypeLegal)
2176 LegalVT = getTypeToTransformTo(Context, LegalVT);
2178 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2179 if (LegalVT.getSimpleVT().is512BitVector())
2180 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2182 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2183 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2184 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2186 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2187 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2188 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2192 return VT.changeVectorElementTypeToInteger();
2195 /// Helper for getByValTypeAlignment to determine
2196 /// the desired ByVal argument alignment.
2197 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
2200 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2201 if (VTy->getBitWidth() == 128)
2203 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2204 unsigned EltAlign = 0;
2205 getMaxByValAlign(ATy->getElementType(), EltAlign);
2206 if (EltAlign > MaxAlign)
2207 MaxAlign = EltAlign;
2208 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2209 for (auto *EltTy : STy->elements()) {
2210 unsigned EltAlign = 0;
2211 getMaxByValAlign(EltTy, EltAlign);
2212 if (EltAlign > MaxAlign)
2213 MaxAlign = EltAlign;
2220 /// Return the desired alignment for ByVal aggregate
2221 /// function arguments in the caller parameter area. For X86, aggregates
2222 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2223 /// are at 4-byte boundaries.
2224 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2225 const DataLayout &DL) const {
2226 if (Subtarget.is64Bit()) {
2227 // Max of 8 and alignment of type.
2228 unsigned TyAlign = DL.getABITypeAlignment(Ty);
2235 if (Subtarget.hasSSE1())
2236 getMaxByValAlign(Ty, Align);
2240 /// Returns the target specific optimal type for load
2241 /// and store operations as a result of memset, memcpy, and memmove
2242 /// lowering. If DstAlign is zero that means it's safe to destination
2243 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2244 /// means there isn't a need to check it against alignment requirement,
2245 /// probably because the source does not need to be loaded. If 'IsMemset' is
2246 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2247 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2248 /// source is constant so it does not need to be loaded.
2249 /// It returns EVT::Other if the type should be determined using generic
2250 /// target-independent logic.
2251 /// For vector ops we check that the overall size isn't larger than our
2252 /// preferred vector width.
2253 EVT X86TargetLowering::getOptimalMemOpType(
2254 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2255 bool ZeroMemset, bool MemcpyStrSrc,
2256 const AttributeList &FuncAttributes) const {
2257 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2258 if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2259 ((DstAlign == 0 || DstAlign >= 16) &&
2260 (SrcAlign == 0 || SrcAlign >= 16)))) {
2261 // FIXME: Check if unaligned 64-byte accesses are slow.
2262 if (Size >= 64 && Subtarget.hasAVX512() &&
2263 (Subtarget.getPreferVectorWidth() >= 512)) {
2264 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2266 // FIXME: Check if unaligned 32-byte accesses are slow.
2267 if (Size >= 32 && Subtarget.hasAVX() &&
2268 (Subtarget.getPreferVectorWidth() >= 256)) {
2269 // Although this isn't a well-supported type for AVX1, we'll let
2270 // legalization and shuffle lowering produce the optimal codegen. If we
2271 // choose an optimal type with a vector element larger than a byte,
2272 // getMemsetStores() may create an intermediate splat (using an integer
2273 // multiply) before we splat as a vector.
2276 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2278 // TODO: Can SSE1 handle a byte vector?
2279 // If we have SSE1 registers we should be able to use them.
2280 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2281 (Subtarget.getPreferVectorWidth() >= 128))
2283 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2284 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2285 // Do not use f64 to lower memcpy if source is string constant. It's
2286 // better to use i32 to avoid the loads.
2287 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2288 // The gymnastics of splatting a byte value into an XMM register and then
2289 // only using 8-byte stores (because this is a CPU with slow unaligned
2290 // 16-byte accesses) makes that a loser.
2294 // This is a compromise. If we reach here, unaligned accesses may be slow on
2295 // this target. However, creating smaller, aligned accesses could be even
2296 // slower and would certainly be a lot more code.
2297 if (Subtarget.is64Bit() && Size >= 8)
2302 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2304 return X86ScalarSSEf32;
2305 else if (VT == MVT::f64)
2306 return X86ScalarSSEf64;
2310 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2311 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2314 switch (VT.getSizeInBits()) {
2316 // 8-byte and under are always assumed to be fast.
2320 *Fast = !Subtarget.isUnalignedMem16Slow();
2323 *Fast = !Subtarget.isUnalignedMem32Slow();
2325 // TODO: What about AVX-512 (512-bit) accesses?
2328 // NonTemporal vector memory ops must be aligned.
2329 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2330 // NT loads can only be vector aligned, so if its less aligned than the
2331 // minimum vector size (which we can split the vector down to), we might as
2332 // well use a regular unaligned vector load.
2333 // We don't have any NT loads pre-SSE41.
2334 if (!!(Flags & MachineMemOperand::MOLoad))
2335 return (Align < 16 || !Subtarget.hasSSE41());
2338 // Misaligned accesses of any size are always allowed.
2342 /// Return the entry encoding for a jump table in the
2343 /// current function. The returned value is a member of the
2344 /// MachineJumpTableInfo::JTEntryKind enum.
2345 unsigned X86TargetLowering::getJumpTableEncoding() const {
2346 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2348 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2349 return MachineJumpTableInfo::EK_Custom32;
2351 // Otherwise, use the normal jump table encoding heuristics.
2352 return TargetLowering::getJumpTableEncoding();
2355 bool X86TargetLowering::useSoftFloat() const {
2356 return Subtarget.useSoftFloat();
2359 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2360 ArgListTy &Args) const {
2362 // Only relabel X86-32 for C / Stdcall CCs.
2363 if (Subtarget.is64Bit())
2365 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2367 unsigned ParamRegs = 0;
2368 if (auto *M = MF->getFunction().getParent())
2369 ParamRegs = M->getNumberRegisterParameters();
2371 // Mark the first N int arguments as having reg
2372 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2373 Type *T = Args[Idx].Ty;
2374 if (T->isIntOrPtrTy())
2375 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2376 unsigned numRegs = 1;
2377 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2379 if (ParamRegs < numRegs)
2381 ParamRegs -= numRegs;
2382 Args[Idx].IsInReg = true;
2388 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2389 const MachineBasicBlock *MBB,
2390 unsigned uid,MCContext &Ctx) const{
2391 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2392 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2394 return MCSymbolRefExpr::create(MBB->getSymbol(),
2395 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2398 /// Returns relocation base for the given PIC jumptable.
2399 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2400 SelectionDAG &DAG) const {
2401 if (!Subtarget.is64Bit())
2402 // This doesn't have SDLoc associated with it, but is not really the
2403 // same as a Register.
2404 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2405 getPointerTy(DAG.getDataLayout()));
2409 /// This returns the relocation base for the given PIC jumptable,
2410 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2411 const MCExpr *X86TargetLowering::
2412 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2413 MCContext &Ctx) const {
2414 // X86-64 uses RIP relative addressing based on the jump table label.
2415 if (Subtarget.isPICStyleRIPRel())
2416 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2418 // Otherwise, the reference is relative to the PIC base.
2419 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2422 std::pair<const TargetRegisterClass *, uint8_t>
2423 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2425 const TargetRegisterClass *RRC = nullptr;
2427 switch (VT.SimpleTy) {
2429 return TargetLowering::findRepresentativeClass(TRI, VT);
2430 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2431 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2434 RRC = &X86::VR64RegClass;
2436 case MVT::f32: case MVT::f64:
2437 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2438 case MVT::v4f32: case MVT::v2f64:
2439 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2440 case MVT::v8f32: case MVT::v4f64:
2441 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2442 case MVT::v16f32: case MVT::v8f64:
2443 RRC = &X86::VR128XRegClass;
2446 return std::make_pair(RRC, Cost);
2449 unsigned X86TargetLowering::getAddressSpace() const {
2450 if (Subtarget.is64Bit())
2451 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2455 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2456 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2457 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2460 static Constant* SegmentOffset(IRBuilder<> &IRB,
2461 unsigned Offset, unsigned AddressSpace) {
2462 return ConstantExpr::getIntToPtr(
2463 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2464 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2467 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2468 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2469 // tcbhead_t; use it instead of the usual global variable (see
2470 // sysdeps/{i386,x86_64}/nptl/tls.h)
2471 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2472 if (Subtarget.isTargetFuchsia()) {
2473 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2474 return SegmentOffset(IRB, 0x10, getAddressSpace());
2476 // %fs:0x28, unless we're using a Kernel code model, in which case
2477 // it's %gs:0x28. gs:0x14 on i386.
2478 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2479 return SegmentOffset(IRB, Offset, getAddressSpace());
2483 return TargetLowering::getIRStackGuard(IRB);
2486 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2487 // MSVC CRT provides functionalities for stack protection.
2488 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2489 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2490 // MSVC CRT has a global variable holding security cookie.
2491 M.getOrInsertGlobal("__security_cookie",
2492 Type::getInt8PtrTy(M.getContext()));
2494 // MSVC CRT has a function to validate security cookie.
2495 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2496 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2497 Type::getInt8PtrTy(M.getContext()));
2498 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2499 F->setCallingConv(CallingConv::X86_FastCall);
2500 F->addAttribute(1, Attribute::AttrKind::InReg);
2504 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2505 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2507 TargetLowering::insertSSPDeclarations(M);
2510 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2511 // MSVC CRT has a global variable holding security cookie.
2512 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2513 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2514 return M.getGlobalVariable("__security_cookie");
2516 return TargetLowering::getSDagStackGuard(M);
2519 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2520 // MSVC CRT has a function to validate security cookie.
2521 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2522 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2523 return M.getFunction("__security_check_cookie");
2525 return TargetLowering::getSSPStackGuardCheck(M);
2528 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2529 if (Subtarget.getTargetTriple().isOSContiki())
2530 return getDefaultSafeStackPointerLocation(IRB, false);
2532 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2533 // definition of TLS_SLOT_SAFESTACK in
2534 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2535 if (Subtarget.isTargetAndroid()) {
2536 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2538 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2539 return SegmentOffset(IRB, Offset, getAddressSpace());
2542 // Fuchsia is similar.
2543 if (Subtarget.isTargetFuchsia()) {
2544 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2545 return SegmentOffset(IRB, 0x18, getAddressSpace());
2548 return TargetLowering::getSafeStackPointerLocation(IRB);
2551 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2552 unsigned DestAS) const {
2553 assert(SrcAS != DestAS && "Expected different address spaces!");
2555 const TargetMachine &TM = getTargetMachine();
2556 if (TM.getPointerSize(SrcAS) != TM.getPointerSize(DestAS))
2559 return SrcAS < 256 && DestAS < 256;
2562 //===----------------------------------------------------------------------===//
2563 // Return Value Calling Convention Implementation
2564 //===----------------------------------------------------------------------===//
2566 bool X86TargetLowering::CanLowerReturn(
2567 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2568 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2569 SmallVector<CCValAssign, 16> RVLocs;
2570 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2571 return CCInfo.CheckReturn(Outs, RetCC_X86);
2574 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2575 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2579 /// Lowers masks values (v*i1) to the local register values
2580 /// \returns DAG node after lowering to register type
2581 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2582 const SDLoc &Dl, SelectionDAG &DAG) {
2583 EVT ValVT = ValArg.getValueType();
2585 if (ValVT == MVT::v1i1)
2586 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2587 DAG.getIntPtrConstant(0, Dl));
2589 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2590 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2591 // Two stage lowering might be required
2592 // bitcast: v8i1 -> i8 / v16i1 -> i16
2593 // anyextend: i8 -> i32 / i16 -> i32
2594 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2595 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2596 if (ValLoc == MVT::i32)
2597 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2601 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2602 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2603 // One stage lowering is required
2604 // bitcast: v32i1 -> i32 / v64i1 -> i64
2605 return DAG.getBitcast(ValLoc, ValArg);
2608 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2611 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2612 static void Passv64i1ArgInRegs(
2613 const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2614 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, CCValAssign &VA,
2615 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2616 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2617 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2618 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2619 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2620 "The value should reside in two registers");
2622 // Before splitting the value we cast it to i64
2623 Arg = DAG.getBitcast(MVT::i64, Arg);
2625 // Splitting the value into two i32 types
2627 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2628 DAG.getConstant(0, Dl, MVT::i32));
2629 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2630 DAG.getConstant(1, Dl, MVT::i32));
2632 // Attach the two i32 types into corresponding registers
2633 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2634 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2638 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2640 const SmallVectorImpl<ISD::OutputArg> &Outs,
2641 const SmallVectorImpl<SDValue> &OutVals,
2642 const SDLoc &dl, SelectionDAG &DAG) const {
2643 MachineFunction &MF = DAG.getMachineFunction();
2644 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2646 // In some cases we need to disable registers from the default CSR list.
2647 // For example, when they are used for argument passing.
2648 bool ShouldDisableCalleeSavedRegister =
2649 CallConv == CallingConv::X86_RegCall ||
2650 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2652 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2653 report_fatal_error("X86 interrupts may not return any value");
2655 SmallVector<CCValAssign, 16> RVLocs;
2656 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2657 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2660 SmallVector<SDValue, 6> RetOps;
2661 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2662 // Operand #1 = Bytes To Pop
2663 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2666 // Copy the result values into the output registers.
2667 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2669 CCValAssign &VA = RVLocs[I];
2670 assert(VA.isRegLoc() && "Can only return in registers!");
2672 // Add the register to the CalleeSaveDisableRegs list.
2673 if (ShouldDisableCalleeSavedRegister)
2674 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2676 SDValue ValToCopy = OutVals[OutsIndex];
2677 EVT ValVT = ValToCopy.getValueType();
2679 // Promote values to the appropriate types.
2680 if (VA.getLocInfo() == CCValAssign::SExt)
2681 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2682 else if (VA.getLocInfo() == CCValAssign::ZExt)
2683 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2684 else if (VA.getLocInfo() == CCValAssign::AExt) {
2685 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2686 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2688 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2690 else if (VA.getLocInfo() == CCValAssign::BCvt)
2691 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2693 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2694 "Unexpected FP-extend for return value.");
2696 // Report an error if we have attempted to return a value via an XMM
2697 // register and SSE was disabled.
2698 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
2699 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2700 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2701 } else if (!Subtarget.hasSSE2() &&
2702 X86::FR64XRegClass.contains(VA.getLocReg()) &&
2703 ValVT == MVT::f64) {
2704 // When returning a double via an XMM register, report an error if SSE2 is
2706 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2707 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2710 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2711 // the RET instruction and handled by the FP Stackifier.
2712 if (VA.getLocReg() == X86::FP0 ||
2713 VA.getLocReg() == X86::FP1) {
2714 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2715 // change the value to the FP stack register class.
2716 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2717 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2718 RetOps.push_back(ValToCopy);
2719 // Don't emit a copytoreg.
2723 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2724 // which is returned in RAX / RDX.
2725 if (Subtarget.is64Bit()) {
2726 if (ValVT == MVT::x86mmx) {
2727 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2728 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2729 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2731 // If we don't have SSE2 available, convert to v4f32 so the generated
2732 // register is legal.
2733 if (!Subtarget.hasSSE2())
2734 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2739 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2741 if (VA.needsCustom()) {
2742 assert(VA.getValVT() == MVT::v64i1 &&
2743 "Currently the only custom case is when we split v64i1 to 2 regs");
2745 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RegsToPass, VA, RVLocs[++I],
2748 assert(2 == RegsToPass.size() &&
2749 "Expecting two registers after Pass64BitArgInRegs");
2751 // Add the second register to the CalleeSaveDisableRegs list.
2752 if (ShouldDisableCalleeSavedRegister)
2753 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2755 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2758 // Add nodes to the DAG and add the values into the RetOps list
2759 for (auto &Reg : RegsToPass) {
2760 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2761 Flag = Chain.getValue(1);
2762 RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2766 // Swift calling convention does not require we copy the sret argument
2767 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2769 // All x86 ABIs require that for returning structs by value we copy
2770 // the sret argument into %rax/%eax (depending on ABI) for the return.
2771 // We saved the argument into a virtual register in the entry block,
2772 // so now we copy the value out and into %rax/%eax.
2774 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2775 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2776 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2777 // either case FuncInfo->setSRetReturnReg() will have been called.
2778 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2779 // When we have both sret and another return value, we should use the
2780 // original Chain stored in RetOps[0], instead of the current Chain updated
2781 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2783 // For the case of sret and another return value, we have
2784 // Chain_0 at the function entry
2785 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2786 // If we use Chain_1 in getCopyFromReg, we will have
2787 // Val = getCopyFromReg(Chain_1)
2788 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2790 // getCopyToReg(Chain_0) will be glued together with
2791 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2792 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2793 // Data dependency from Unit B to Unit A due to usage of Val in
2794 // getCopyToReg(Chain_1, Val)
2795 // Chain dependency from Unit A to Unit B
2797 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2798 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2799 getPointerTy(MF.getDataLayout()));
2802 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2803 X86::RAX : X86::EAX;
2804 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2805 Flag = Chain.getValue(1);
2807 // RAX/EAX now acts like a return value.
2809 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2811 // Add the returned register to the CalleeSaveDisableRegs list.
2812 if (ShouldDisableCalleeSavedRegister)
2813 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2816 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2817 const MCPhysReg *I =
2818 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2821 if (X86::GR64RegClass.contains(*I))
2822 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2824 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2828 RetOps[0] = Chain; // Update chain.
2830 // Add the flag if we have it.
2832 RetOps.push_back(Flag);
2834 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2835 if (CallConv == CallingConv::X86_INTR)
2836 opcode = X86ISD::IRET;
2837 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2840 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2841 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2844 SDValue TCChain = Chain;
2845 SDNode *Copy = *N->use_begin();
2846 if (Copy->getOpcode() == ISD::CopyToReg) {
2847 // If the copy has a glue operand, we conservatively assume it isn't safe to
2848 // perform a tail call.
2849 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2851 TCChain = Copy->getOperand(0);
2852 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2855 bool HasRet = false;
2856 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2858 if (UI->getOpcode() != X86ISD::RET_FLAG)
2860 // If we are returning more than one value, we can definitely
2861 // not make a tail call see PR19530
2862 if (UI->getNumOperands() > 4)
2864 if (UI->getNumOperands() == 4 &&
2865 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2877 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2878 ISD::NodeType ExtendKind) const {
2879 MVT ReturnMVT = MVT::i32;
2881 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2882 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2883 // The ABI does not require i1, i8 or i16 to be extended.
2885 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2886 // always extending i8/i16 return values, so keep doing that for now.
2888 ReturnMVT = MVT::i8;
2891 EVT MinVT = getRegisterType(Context, ReturnMVT);
2892 return VT.bitsLT(MinVT) ? MinVT : VT;
2895 /// Reads two 32 bit registers and creates a 64 bit mask value.
2896 /// \param VA The current 32 bit value that need to be assigned.
2897 /// \param NextVA The next 32 bit value that need to be assigned.
2898 /// \param Root The parent DAG node.
2899 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2900 /// glue purposes. In the case the DAG is already using
2901 /// physical register instead of virtual, we should glue
2902 /// our new SDValue to InFlag SDvalue.
2903 /// \return a new SDvalue of size 64bit.
2904 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2905 SDValue &Root, SelectionDAG &DAG,
2906 const SDLoc &Dl, const X86Subtarget &Subtarget,
2907 SDValue *InFlag = nullptr) {
2908 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2909 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2910 assert(VA.getValVT() == MVT::v64i1 &&
2911 "Expecting first location of 64 bit width type");
2912 assert(NextVA.getValVT() == VA.getValVT() &&
2913 "The locations should have the same type");
2914 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2915 "The values should reside in two registers");
2918 SDValue ArgValueLo, ArgValueHi;
2920 MachineFunction &MF = DAG.getMachineFunction();
2921 const TargetRegisterClass *RC = &X86::GR32RegClass;
2923 // Read a 32 bit value from the registers.
2924 if (nullptr == InFlag) {
2925 // When no physical register is present,
2926 // create an intermediate virtual register.
2927 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2928 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2929 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2930 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2932 // When a physical register is available read the value from it and glue
2933 // the reads together.
2935 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2936 *InFlag = ArgValueLo.getValue(2);
2938 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2939 *InFlag = ArgValueHi.getValue(2);
2942 // Convert the i32 type into v32i1 type.
2943 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2945 // Convert the i32 type into v32i1 type.
2946 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2948 // Concatenate the two values together.
2949 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2952 /// The function will lower a register of various sizes (8/16/32/64)
2953 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2954 /// \returns a DAG node contains the operand after lowering to mask type.
2955 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2956 const EVT &ValLoc, const SDLoc &Dl,
2957 SelectionDAG &DAG) {
2958 SDValue ValReturned = ValArg;
2960 if (ValVT == MVT::v1i1)
2961 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2963 if (ValVT == MVT::v64i1) {
2964 // In 32 bit machine, this case is handled by getv64i1Argument
2965 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2966 // In 64 bit machine, There is no need to truncate the value only bitcast
2969 switch (ValVT.getSimpleVT().SimpleTy) {
2980 llvm_unreachable("Expecting a vector of i1 types");
2983 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2985 return DAG.getBitcast(ValVT, ValReturned);
2988 /// Lower the result values of a call into the
2989 /// appropriate copies out of appropriate physical registers.
2991 SDValue X86TargetLowering::LowerCallResult(
2992 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2993 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2994 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2995 uint32_t *RegMask) const {
2997 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2998 // Assign locations to each value returned by this call.
2999 SmallVector<CCValAssign, 16> RVLocs;
3000 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3002 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3004 // Copy all of the result registers out of their specified physreg.
3005 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3007 CCValAssign &VA = RVLocs[I];
3008 EVT CopyVT = VA.getLocVT();
3010 // In some calling conventions we need to remove the used registers
3011 // from the register mask.
3013 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
3014 SubRegs.isValid(); ++SubRegs)
3015 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3018 // Report an error if there was an attempt to return FP values via XMM
3020 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3021 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3022 if (VA.getLocReg() == X86::XMM1)
3023 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3025 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3026 } else if (!Subtarget.hasSSE2() &&
3027 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3028 CopyVT == MVT::f64) {
3029 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3030 if (VA.getLocReg() == X86::XMM1)
3031 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3033 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3036 // If we prefer to use the value in xmm registers, copy it out as f80 and
3037 // use a truncate to move it from fp stack reg to xmm reg.
3038 bool RoundAfterCopy = false;
3039 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3040 isScalarFPTypeInSSEReg(VA.getValVT())) {
3041 if (!Subtarget.hasX87())
3042 report_fatal_error("X87 register return with X87 disabled");
3044 RoundAfterCopy = (CopyVT != VA.getLocVT());
3048 if (VA.needsCustom()) {
3049 assert(VA.getValVT() == MVT::v64i1 &&
3050 "Currently the only custom case is when we split v64i1 to 2 regs");
3052 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3054 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3056 Val = Chain.getValue(0);
3057 InFlag = Chain.getValue(2);
3061 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3062 // This truncation won't change the value.
3063 DAG.getIntPtrConstant(1, dl));
3065 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
3066 if (VA.getValVT().isVector() &&
3067 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3068 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3069 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3070 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3072 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3075 if (VA.getLocInfo() == CCValAssign::BCvt)
3076 Val = DAG.getBitcast(VA.getValVT(), Val);
3078 InVals.push_back(Val);
3084 //===----------------------------------------------------------------------===//
3085 // C & StdCall & Fast Calling Convention implementation
3086 //===----------------------------------------------------------------------===//
3087 // StdCall calling convention seems to be standard for many Windows' API
3088 // routines and around. It differs from C calling convention just a little:
3089 // callee should clean up the stack, not caller. Symbols should be also
3090 // decorated in some fancy way :) It doesn't support any vector arguments.
3091 // For info on fast calling convention see Fast Calling Convention (tail call)
3092 // implementation LowerX86_32FastCCCallTo.
3094 /// CallIsStructReturn - Determines whether a call uses struct return
3096 enum StructReturnType {
3101 static StructReturnType
3102 callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
3104 return NotStructReturn;
3106 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
3107 if (!Flags.isSRet())
3108 return NotStructReturn;
3109 if (Flags.isInReg() || IsMCU)
3110 return RegStructReturn;
3111 return StackStructReturn;
3114 /// Determines whether a function uses struct return semantics.
3115 static StructReturnType
3116 argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
3118 return NotStructReturn;
3120 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
3121 if (!Flags.isSRet())
3122 return NotStructReturn;
3123 if (Flags.isInReg() || IsMCU)
3124 return RegStructReturn;
3125 return StackStructReturn;
3128 /// Make a copy of an aggregate at address specified by "Src" to address
3129 /// "Dst" with size and alignment information specified by the specific
3130 /// parameter attribute. The copy will be passed as a byval function parameter.
3131 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3132 SDValue Chain, ISD::ArgFlagsTy Flags,
3133 SelectionDAG &DAG, const SDLoc &dl) {
3134 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
3136 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
3137 /*isVolatile*/false, /*AlwaysInline=*/true,
3138 /*isTailCall*/false,
3139 MachinePointerInfo(), MachinePointerInfo());
3142 /// Return true if the calling convention is one that we can guarantee TCO for.
3143 static bool canGuaranteeTCO(CallingConv::ID CC) {
3144 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3145 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3146 CC == CallingConv::HHVM || CC == CallingConv::Tail);
3149 /// Return true if we might ever do TCO for calls with this calling convention.
3150 static bool mayTailCallThisCC(CallingConv::ID CC) {
3152 // C calling conventions:
3153 case CallingConv::C:
3154 case CallingConv::Win64:
3155 case CallingConv::X86_64_SysV:
3156 // Callee pop conventions:
3157 case CallingConv::X86_ThisCall:
3158 case CallingConv::X86_StdCall:
3159 case CallingConv::X86_VectorCall:
3160 case CallingConv::X86_FastCall:
3162 case CallingConv::Swift:
3165 return canGuaranteeTCO(CC);
3169 /// Return true if the function is being made into a tailcall target by
3170 /// changing its ABI.
3171 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3172 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
3175 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3176 if (!CI->isTailCall())
3179 ImmutableCallSite CS(CI);
3180 CallingConv::ID CalleeCC = CS.getCallingConv();
3181 if (!mayTailCallThisCC(CalleeCC))
3188 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3189 const SmallVectorImpl<ISD::InputArg> &Ins,
3190 const SDLoc &dl, SelectionDAG &DAG,
3191 const CCValAssign &VA,
3192 MachineFrameInfo &MFI, unsigned i) const {
3193 // Create the nodes corresponding to a load from this parameter slot.
3194 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3195 bool AlwaysUseMutable = shouldGuaranteeTCO(
3196 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3197 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3199 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3201 // If value is passed by pointer we have address passed instead of the value
3202 // itself. No need to extend if the mask value and location share the same
3204 bool ExtendedInMem =
3205 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3206 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3208 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3209 ValVT = VA.getLocVT();
3211 ValVT = VA.getValVT();
3213 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3214 // changed with more analysis.
3215 // In case of tail call optimization mark all arguments mutable. Since they
3216 // could be overwritten by lowering of arguments in case of a tail call.
3217 if (Flags.isByVal()) {
3218 unsigned Bytes = Flags.getByValSize();
3219 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3221 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3222 // can be improved with deeper analysis.
3223 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3224 /*isAliased=*/true);
3225 return DAG.getFrameIndex(FI, PtrVT);
3228 // This is an argument in memory. We might be able to perform copy elision.
3229 // If the argument is passed directly in memory without any extension, then we
3230 // can perform copy elision. Large vector types, for example, may be passed
3231 // indirectly by pointer.
3232 if (Flags.isCopyElisionCandidate() &&
3233 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3234 EVT ArgVT = Ins[i].ArgVT;
3236 if (Ins[i].PartOffset == 0) {
3237 // If this is a one-part value or the first part of a multi-part value,
3238 // create a stack object for the entire argument value type and return a
3239 // load from our portion of it. This assumes that if the first part of an
3240 // argument is in memory, the rest will also be in memory.
3241 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3242 /*IsImmutable=*/false);
3243 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3245 ValVT, dl, Chain, PartAddr,
3246 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3248 // This is not the first piece of an argument in memory. See if there is
3249 // already a fixed stack object including this offset. If so, assume it
3250 // was created by the PartOffset == 0 branch above and create a load from
3251 // the appropriate offset into it.
3252 int64_t PartBegin = VA.getLocMemOffset();
3253 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3254 int FI = MFI.getObjectIndexBegin();
3255 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3256 int64_t ObjBegin = MFI.getObjectOffset(FI);
3257 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3258 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3261 if (MFI.isFixedObjectIndex(FI)) {
3263 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3264 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3266 ValVT, dl, Chain, Addr,
3267 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3268 Ins[i].PartOffset));
3273 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3274 VA.getLocMemOffset(), isImmutable);
3276 // Set SExt or ZExt flag.
3277 if (VA.getLocInfo() == CCValAssign::ZExt) {
3278 MFI.setObjectZExt(FI, true);
3279 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3280 MFI.setObjectSExt(FI, true);
3283 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3284 SDValue Val = DAG.getLoad(
3285 ValVT, dl, Chain, FIN,
3286 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3287 return ExtendedInMem
3288 ? (VA.getValVT().isVector()
3289 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3290 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3294 // FIXME: Get this from tablegen.
3295 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3296 const X86Subtarget &Subtarget) {
3297 assert(Subtarget.is64Bit());
3299 if (Subtarget.isCallingConvWin64(CallConv)) {
3300 static const MCPhysReg GPR64ArgRegsWin64[] = {
3301 X86::RCX, X86::RDX, X86::R8, X86::R9
3303 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3306 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3307 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3309 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3312 // FIXME: Get this from tablegen.
3313 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3314 CallingConv::ID CallConv,
3315 const X86Subtarget &Subtarget) {
3316 assert(Subtarget.is64Bit());
3317 if (Subtarget.isCallingConvWin64(CallConv)) {
3318 // The XMM registers which might contain var arg parameters are shadowed
3319 // in their paired GPR. So we only need to save the GPR to their home
3321 // TODO: __vectorcall will change this.
3325 const Function &F = MF.getFunction();
3326 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3327 bool isSoftFloat = Subtarget.useSoftFloat();
3328 assert(!(isSoftFloat && NoImplicitFloatOps) &&
3329 "SSE register cannot be used when SSE is disabled!");
3330 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3331 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3335 static const MCPhysReg XMMArgRegs64Bit[] = {
3336 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3337 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3339 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3343 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3344 return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3345 [](const CCValAssign &A, const CCValAssign &B) -> bool {
3346 return A.getValNo() < B.getValNo();
3351 SDValue X86TargetLowering::LowerFormalArguments(
3352 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3353 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3354 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3355 MachineFunction &MF = DAG.getMachineFunction();
3356 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3357 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3359 const Function &F = MF.getFunction();
3360 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3361 F.getName() == "main")
3362 FuncInfo->setForceFramePointer(true);
3364 MachineFrameInfo &MFI = MF.getFrameInfo();
3365 bool Is64Bit = Subtarget.is64Bit();
3366 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3369 !(isVarArg && canGuaranteeTCO(CallConv)) &&
3370 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3372 // Assign locations to all of the incoming arguments.
3373 SmallVector<CCValAssign, 16> ArgLocs;
3374 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3376 // Allocate shadow area for Win64.
3378 CCInfo.AllocateStack(32, 8);
3380 CCInfo.AnalyzeArguments(Ins, CC_X86);
3382 // In vectorcall calling convention a second pass is required for the HVA
3384 if (CallingConv::X86_VectorCall == CallConv) {
3385 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3388 // The next loop assumes that the locations are in the same order of the
3390 assert(isSortedByValueNo(ArgLocs) &&
3391 "Argument Location list must be sorted before lowering");
3394 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3396 assert(InsIndex < Ins.size() && "Invalid Ins index");
3397 CCValAssign &VA = ArgLocs[I];
3399 if (VA.isRegLoc()) {
3400 EVT RegVT = VA.getLocVT();
3401 if (VA.needsCustom()) {
3403 VA.getValVT() == MVT::v64i1 &&
3404 "Currently the only custom case is when we split v64i1 to 2 regs");
3406 // v64i1 values, in regcall calling convention, that are
3407 // compiled to 32 bit arch, are split up into two registers.
3409 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3411 const TargetRegisterClass *RC;
3412 if (RegVT == MVT::i8)
3413 RC = &X86::GR8RegClass;
3414 else if (RegVT == MVT::i16)
3415 RC = &X86::GR16RegClass;
3416 else if (RegVT == MVT::i32)
3417 RC = &X86::GR32RegClass;
3418 else if (Is64Bit && RegVT == MVT::i64)
3419 RC = &X86::GR64RegClass;
3420 else if (RegVT == MVT::f32)
3421 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3422 else if (RegVT == MVT::f64)
3423 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3424 else if (RegVT == MVT::f80)
3425 RC = &X86::RFP80RegClass;
3426 else if (RegVT == MVT::f128)
3427 RC = &X86::VR128RegClass;
3428 else if (RegVT.is512BitVector())
3429 RC = &X86::VR512RegClass;
3430 else if (RegVT.is256BitVector())
3431 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3432 else if (RegVT.is128BitVector())
3433 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3434 else if (RegVT == MVT::x86mmx)
3435 RC = &X86::VR64RegClass;
3436 else if (RegVT == MVT::v1i1)
3437 RC = &X86::VK1RegClass;
3438 else if (RegVT == MVT::v8i1)
3439 RC = &X86::VK8RegClass;
3440 else if (RegVT == MVT::v16i1)
3441 RC = &X86::VK16RegClass;
3442 else if (RegVT == MVT::v32i1)
3443 RC = &X86::VK32RegClass;
3444 else if (RegVT == MVT::v64i1)
3445 RC = &X86::VK64RegClass;
3447 llvm_unreachable("Unknown argument type!");
3449 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3450 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3453 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3454 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3456 if (VA.getLocInfo() == CCValAssign::SExt)
3457 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3458 DAG.getValueType(VA.getValVT()));
3459 else if (VA.getLocInfo() == CCValAssign::ZExt)
3460 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3461 DAG.getValueType(VA.getValVT()));
3462 else if (VA.getLocInfo() == CCValAssign::BCvt)
3463 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3465 if (VA.isExtInLoc()) {
3466 // Handle MMX values passed in XMM regs.
3467 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3468 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3469 else if (VA.getValVT().isVector() &&
3470 VA.getValVT().getScalarType() == MVT::i1 &&
3471 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3472 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3473 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3474 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3476 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3479 assert(VA.isMemLoc());
3481 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3484 // If value is passed via pointer - do a load.
3485 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3487 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3489 InVals.push_back(ArgValue);
3492 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3493 // Swift calling convention does not require we copy the sret argument
3494 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3495 if (CallConv == CallingConv::Swift)
3498 // All x86 ABIs require that for returning structs by value we copy the
3499 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3500 // the argument into a virtual register so that we can access it from the
3502 if (Ins[I].Flags.isSRet()) {
3503 unsigned Reg = FuncInfo->getSRetReturnReg();
3505 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3506 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3507 FuncInfo->setSRetReturnReg(Reg);
3509 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3510 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3515 unsigned StackSize = CCInfo.getNextStackOffset();
3516 // Align stack specially for tail calls.
3517 if (shouldGuaranteeTCO(CallConv,
3518 MF.getTarget().Options.GuaranteedTailCallOpt))
3519 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3521 // If the function takes variable number of arguments, make a frame index for
3522 // the start of the first vararg value... for expansion of llvm.va_start. We
3523 // can skip this if there are no va_start calls.
3524 if (MFI.hasVAStart() &&
3525 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3526 CallConv != CallingConv::X86_ThisCall))) {
3527 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3530 // Figure out if XMM registers are in use.
3531 assert(!(Subtarget.useSoftFloat() &&
3532 F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3533 "SSE register cannot be used when SSE is disabled!");
3535 // 64-bit calling conventions support varargs and register parameters, so we
3536 // have to do extra work to spill them in the prologue.
3537 if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3538 // Find the first unallocated argument registers.
3539 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3540 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3541 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3542 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3543 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3544 "SSE register cannot be used when SSE is disabled!");
3546 // Gather all the live in physical registers.
3547 SmallVector<SDValue, 6> LiveGPRs;
3548 SmallVector<SDValue, 8> LiveXMMRegs;
3550 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3551 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3553 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3555 if (!ArgXMMs.empty()) {
3556 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3557 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3558 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3559 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3560 LiveXMMRegs.push_back(
3561 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3566 // Get to the caller-allocated home save location. Add 8 to account
3567 // for the return address.
3568 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3569 FuncInfo->setRegSaveFrameIndex(
3570 MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3571 // Fixup to set vararg frame on shadow area (4 x i64).
3573 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3575 // For X86-64, if there are vararg parameters that are passed via
3576 // registers, then we must store them to their spots on the stack so
3577 // they may be loaded by dereferencing the result of va_next.
3578 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3579 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3580 FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3581 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3584 // Store the integer parameter registers.
3585 SmallVector<SDValue, 8> MemOps;
3586 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3587 getPointerTy(DAG.getDataLayout()));
3588 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3589 for (SDValue Val : LiveGPRs) {
3590 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3591 RSFIN, DAG.getIntPtrConstant(Offset, dl));
3593 DAG.getStore(Val.getValue(1), dl, Val, FIN,
3594 MachinePointerInfo::getFixedStack(
3595 DAG.getMachineFunction(),
3596 FuncInfo->getRegSaveFrameIndex(), Offset));
3597 MemOps.push_back(Store);
3601 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3602 // Now store the XMM (fp + vector) parameter registers.
3603 SmallVector<SDValue, 12> SaveXMMOps;
3604 SaveXMMOps.push_back(Chain);
3605 SaveXMMOps.push_back(ALVal);
3606 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3607 FuncInfo->getRegSaveFrameIndex(), dl));
3608 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3609 FuncInfo->getVarArgsFPOffset(), dl));
3610 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3612 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3613 MVT::Other, SaveXMMOps));
3616 if (!MemOps.empty())
3617 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3620 if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3621 // Find the largest legal vector type.
3622 MVT VecVT = MVT::Other;
3623 // FIXME: Only some x86_32 calling conventions support AVX512.
3624 if (Subtarget.useAVX512Regs() &&
3625 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3626 CallConv == CallingConv::Intel_OCL_BI)))
3627 VecVT = MVT::v16f32;
3628 else if (Subtarget.hasAVX())
3630 else if (Subtarget.hasSSE2())
3633 // We forward some GPRs and some vector types.
3634 SmallVector<MVT, 2> RegParmTypes;
3635 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3636 RegParmTypes.push_back(IntVT);
3637 if (VecVT != MVT::Other)
3638 RegParmTypes.push_back(VecVT);
3640 // Compute the set of forwarded registers. The rest are scratch.
3641 SmallVectorImpl<ForwardedRegister> &Forwards =
3642 FuncInfo->getForwardedMustTailRegParms();
3643 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3645 // Forward AL for SysV x86_64 targets, since it is used for varargs.
3646 if (Is64Bit && !IsWin64 && !CCInfo.isAllocated(X86::AL)) {
3647 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3648 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3651 // Copy all forwards from physical to virtual registers.
3652 for (ForwardedRegister &FR : Forwards) {
3653 // FIXME: Can we use a less constrained schedule?
3654 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3655 FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3656 Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3660 // Some CCs need callee pop.
3661 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3662 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3663 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3664 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3665 // X86 interrupts must pop the error code (and the alignment padding) if
3667 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3669 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3670 // If this is an sret function, the return should pop the hidden pointer.
3671 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3672 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3673 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3674 FuncInfo->setBytesToPopOnReturn(4);
3678 // RegSaveFrameIndex is X86-64 only.
3679 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3680 if (CallConv == CallingConv::X86_FastCall ||
3681 CallConv == CallingConv::X86_ThisCall)
3682 // fastcc functions can't have varargs.
3683 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3686 FuncInfo->setArgumentStackSize(StackSize);
3688 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3689 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3690 if (Personality == EHPersonality::CoreCLR) {
3692 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3693 // that we'd prefer this slot be allocated towards the bottom of the frame
3694 // (i.e. near the stack pointer after allocating the frame). Every
3695 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3696 // offset from the bottom of this and each funclet's frame must be the
3697 // same, so the size of funclets' (mostly empty) frames is dictated by
3698 // how far this slot is from the bottom (since they allocate just enough
3699 // space to accommodate holding this slot at the correct offset).
3700 int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3701 EHInfo->PSPSymFrameIdx = PSPSymFI;
3705 if (CallConv == CallingConv::X86_RegCall ||
3706 F.hasFnAttribute("no_caller_saved_registers")) {
3707 MachineRegisterInfo &MRI = MF.getRegInfo();
3708 for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3709 MRI.disableCalleeSavedRegister(Pair.first);
3715 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3716 SDValue Arg, const SDLoc &dl,
3718 const CCValAssign &VA,
3719 ISD::ArgFlagsTy Flags) const {
3720 unsigned LocMemOffset = VA.getLocMemOffset();
3721 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3722 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3724 if (Flags.isByVal())
3725 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3727 return DAG.getStore(
3728 Chain, dl, Arg, PtrOff,
3729 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3732 /// Emit a load of return address if tail call
3733 /// optimization is performed and it is required.
3734 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3735 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3736 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3737 // Adjust the Return address stack slot.
3738 EVT VT = getPointerTy(DAG.getDataLayout());
3739 OutRetAddr = getReturnAddressFrameIndex(DAG);
3741 // Load the "old" Return address.
3742 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3743 return SDValue(OutRetAddr.getNode(), 1);
3746 /// Emit a store of the return address if tail call
3747 /// optimization is performed and it is required (FPDiff!=0).
3748 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3749 SDValue Chain, SDValue RetAddrFrIdx,
3750 EVT PtrVT, unsigned SlotSize,
3751 int FPDiff, const SDLoc &dl) {
3752 // Store the return address to the appropriate stack slot.
3753 if (!FPDiff) return Chain;
3754 // Calculate the new stack slot for the return address.
3755 int NewReturnAddrFI =
3756 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3758 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3759 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3760 MachinePointerInfo::getFixedStack(
3761 DAG.getMachineFunction(), NewReturnAddrFI));
3765 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3766 /// operation of specified width.
3767 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3769 unsigned NumElems = VT.getVectorNumElements();
3770 SmallVector<int, 8> Mask;
3771 Mask.push_back(NumElems);
3772 for (unsigned i = 1; i != NumElems; ++i)
3774 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3778 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3779 SmallVectorImpl<SDValue> &InVals) const {
3780 SelectionDAG &DAG = CLI.DAG;
3782 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3783 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3784 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3785 SDValue Chain = CLI.Chain;
3786 SDValue Callee = CLI.Callee;
3787 CallingConv::ID CallConv = CLI.CallConv;
3788 bool &isTailCall = CLI.IsTailCall;
3789 bool isVarArg = CLI.IsVarArg;
3791 MachineFunction &MF = DAG.getMachineFunction();
3792 bool Is64Bit = Subtarget.is64Bit();
3793 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3794 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3795 bool IsSibcall = false;
3796 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3797 CallConv == CallingConv::Tail;
3798 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3799 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3800 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3801 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3802 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3803 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3805 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3806 const Module *M = MF.getMMI().getModule();
3807 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3809 MachineFunction::CallSiteInfo CSInfo;
3811 if (CallConv == CallingConv::X86_INTR)
3812 report_fatal_error("X86 interrupts may not be called directly");
3814 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3815 // If we are using a GOT, disable tail calls to external symbols with
3816 // default visibility. Tail calling such a symbol requires using a GOT
3817 // relocation, which forces early binding of the symbol. This breaks code
3818 // that require lazy function symbol resolution. Using musttail or
3819 // GuaranteedTailCallOpt will override this.
3820 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3821 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3822 G->getGlobal()->hasDefaultVisibility()))
3826 bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3828 // Force this to be a tail call. The verifier rules are enough to ensure
3829 // that we can lower this successfully without moving the return address
3832 } else if (isTailCall) {
3833 // Check if it's really possible to do a tail call.
3834 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3835 isVarArg, SR != NotStructReturn,
3836 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3837 Outs, OutVals, Ins, DAG);
3839 // Sibcalls are automatically detected tailcalls which do not require
3841 if (!IsGuaranteeTCO && isTailCall)
3848 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3849 "Var args not supported with calling convention fastcc, ghc or hipe");
3851 // Analyze operands of the call, assigning locations to each operand.
3852 SmallVector<CCValAssign, 16> ArgLocs;
3853 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3855 // Allocate shadow area for Win64.
3857 CCInfo.AllocateStack(32, 8);
3859 CCInfo.AnalyzeArguments(Outs, CC_X86);
3861 // In vectorcall calling convention a second pass is required for the HVA
3863 if (CallingConv::X86_VectorCall == CallConv) {
3864 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3867 // Get a count of how many bytes are to be pushed on the stack.
3868 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3870 // This is a sibcall. The memory operands are available in caller's
3871 // own caller's stack.
3873 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3874 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3877 if (isTailCall && !IsSibcall && !IsMustTail) {
3878 // Lower arguments at fp - stackoffset + fpdiff.
3879 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3881 FPDiff = NumBytesCallerPushed - NumBytes;
3883 // Set the delta of movement of the returnaddr stackslot.
3884 // But only set if delta is greater than previous delta.
3885 if (FPDiff < X86Info->getTCReturnAddrDelta())
3886 X86Info->setTCReturnAddrDelta(FPDiff);
3889 unsigned NumBytesToPush = NumBytes;
3890 unsigned NumBytesToPop = NumBytes;
3892 // If we have an inalloca argument, all stack space has already been allocated
3893 // for us and be right at the top of the stack. We don't support multiple
3894 // arguments passed in memory when using inalloca.
3895 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3897 if (!ArgLocs.back().isMemLoc())
3898 report_fatal_error("cannot use inalloca attribute on a register "
3900 if (ArgLocs.back().getLocMemOffset() != 0)
3901 report_fatal_error("any parameter with the inalloca attribute must be "
3902 "the only memory argument");
3905 if (!IsSibcall && !IsMustTail)
3906 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3907 NumBytes - NumBytesToPush, dl);
3909 SDValue RetAddrFrIdx;
3910 // Load return address for tail calls.
3911 if (isTailCall && FPDiff)
3912 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3913 Is64Bit, FPDiff, dl);
3915 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3916 SmallVector<SDValue, 8> MemOpChains;
3919 // The next loop assumes that the locations are in the same order of the
3921 assert(isSortedByValueNo(ArgLocs) &&
3922 "Argument Location list must be sorted before lowering");
3924 // Walk the register/memloc assignments, inserting copies/loads. In the case
3925 // of tail call optimization arguments are handle later.
3926 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3927 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3929 assert(OutIndex < Outs.size() && "Invalid Out index");
3930 // Skip inalloca arguments, they have already been written.
3931 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3932 if (Flags.isInAlloca())
3935 CCValAssign &VA = ArgLocs[I];
3936 EVT RegVT = VA.getLocVT();
3937 SDValue Arg = OutVals[OutIndex];
3938 bool isByVal = Flags.isByVal();
3940 // Promote the value if needed.
3941 switch (VA.getLocInfo()) {
3942 default: llvm_unreachable("Unknown loc info!");
3943 case CCValAssign::Full: break;
3944 case CCValAssign::SExt:
3945 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3947 case CCValAssign::ZExt:
3948 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3950 case CCValAssign::AExt:
3951 if (Arg.getValueType().isVector() &&
3952 Arg.getValueType().getVectorElementType() == MVT::i1)
3953 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3954 else if (RegVT.is128BitVector()) {
3955 // Special case: passing MMX values in XMM registers.
3956 Arg = DAG.getBitcast(MVT::i64, Arg);
3957 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3958 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3960 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3962 case CCValAssign::BCvt:
3963 Arg = DAG.getBitcast(RegVT, Arg);
3965 case CCValAssign::Indirect: {
3967 // Memcpy the argument to a temporary stack slot to prevent
3968 // the caller from seeing any modifications the callee may make
3969 // as guaranteed by the `byval` attribute.
3970 int FrameIdx = MF.getFrameInfo().CreateStackObject(
3971 Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3974 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3976 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3977 // From now on treat this as a regular pointer
3981 // Store the argument.
3982 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3983 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3984 Chain = DAG.getStore(
3985 Chain, dl, Arg, SpillSlot,
3986 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3993 if (VA.needsCustom()) {
3994 assert(VA.getValVT() == MVT::v64i1 &&
3995 "Currently the only custom case is when we split v64i1 to 2 regs");
3996 // Split v64i1 value into two registers
3997 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
3998 } else if (VA.isRegLoc()) {
3999 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4000 const TargetOptions &Options = DAG.getTarget().Options;
4001 if (Options.EnableDebugEntryValues)
4002 CSInfo.emplace_back(VA.getLocReg(), I);
4003 if (isVarArg && IsWin64) {
4004 // Win64 ABI requires argument XMM reg to be copied to the corresponding
4005 // shadow reg if callee is a varargs function.
4006 unsigned ShadowReg = 0;
4007 switch (VA.getLocReg()) {
4008 case X86::XMM0: ShadowReg = X86::RCX; break;
4009 case X86::XMM1: ShadowReg = X86::RDX; break;
4010 case X86::XMM2: ShadowReg = X86::R8; break;
4011 case X86::XMM3: ShadowReg = X86::R9; break;
4014 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4016 } else if (!IsSibcall && (!isTailCall || isByVal)) {
4017 assert(VA.isMemLoc());
4018 if (!StackPtr.getNode())
4019 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4020 getPointerTy(DAG.getDataLayout()));
4021 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4022 dl, DAG, VA, Flags));
4026 if (!MemOpChains.empty())
4027 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4029 if (Subtarget.isPICStyleGOT()) {
4030 // ELF / PIC requires GOT in the EBX register before function calls via PLT
4033 RegsToPass.push_back(std::make_pair(
4034 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4035 getPointerTy(DAG.getDataLayout()))));
4037 // If we are tail calling and generating PIC/GOT style code load the
4038 // address of the callee into ECX. The value in ecx is used as target of
4039 // the tail jump. This is done to circumvent the ebx/callee-saved problem
4040 // for tail calls on PIC/GOT architectures. Normally we would just put the
4041 // address of GOT into ebx and then call target@PLT. But for tail calls
4042 // ebx would be restored (since ebx is callee saved) before jumping to the
4045 // Note: The actual moving to ECX is done further down.
4046 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4047 if (G && !G->getGlobal()->hasLocalLinkage() &&
4048 G->getGlobal()->hasDefaultVisibility())
4049 Callee = LowerGlobalAddress(Callee, DAG);
4050 else if (isa<ExternalSymbolSDNode>(Callee))
4051 Callee = LowerExternalSymbol(Callee, DAG);
4055 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
4056 // From AMD64 ABI document:
4057 // For calls that may call functions that use varargs or stdargs
4058 // (prototype-less calls or calls to functions containing ellipsis (...) in
4059 // the declaration) %al is used as hidden argument to specify the number
4060 // of SSE registers used. The contents of %al do not need to match exactly
4061 // the number of registers, but must be an ubound on the number of SSE
4062 // registers used and is in the range 0 - 8 inclusive.
4064 // Count the number of XMM registers allocated.
4065 static const MCPhysReg XMMArgRegs[] = {
4066 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4067 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4069 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4070 assert((Subtarget.hasSSE1() || !NumXMMRegs)
4071 && "SSE registers cannot be used when SSE is disabled");
4073 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
4074 DAG.getConstant(NumXMMRegs, dl,
4078 if (isVarArg && IsMustTail) {
4079 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4080 for (const auto &F : Forwards) {
4081 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4082 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
4086 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
4087 // don't need this because the eligibility check rejects calls that require
4088 // shuffling arguments passed in memory.
4089 if (!IsSibcall && isTailCall) {
4090 // Force all the incoming stack arguments to be loaded from the stack
4091 // before any new outgoing arguments are stored to the stack, because the
4092 // outgoing stack slots may alias the incoming argument stack slots, and
4093 // the alias isn't otherwise explicit. This is slightly more conservative
4094 // than necessary, because it means that each store effectively depends
4095 // on every argument instead of just those arguments it would clobber.
4096 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4098 SmallVector<SDValue, 8> MemOpChains2;
4101 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4103 CCValAssign &VA = ArgLocs[I];
4105 if (VA.isRegLoc()) {
4106 if (VA.needsCustom()) {
4107 assert((CallConv == CallingConv::X86_RegCall) &&
4108 "Expecting custom case only in regcall calling convention");
4109 // This means that we are in special case where one argument was
4110 // passed through two register locations - Skip the next location
4117 assert(VA.isMemLoc());
4118 SDValue Arg = OutVals[OutsIndex];
4119 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4120 // Skip inalloca arguments. They don't require any work.
4121 if (Flags.isInAlloca())
4123 // Create frame index.
4124 int32_t Offset = VA.getLocMemOffset()+FPDiff;
4125 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4126 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4127 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4129 if (Flags.isByVal()) {
4130 // Copy relative to framepointer.
4131 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4132 if (!StackPtr.getNode())
4133 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4134 getPointerTy(DAG.getDataLayout()));
4135 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4138 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4142 // Store relative to framepointer.
4143 MemOpChains2.push_back(DAG.getStore(
4144 ArgChain, dl, Arg, FIN,
4145 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4149 if (!MemOpChains2.empty())
4150 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4152 // Store the return address to the appropriate stack slot.
4153 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4154 getPointerTy(DAG.getDataLayout()),
4155 RegInfo->getSlotSize(), FPDiff, dl);
4158 // Build a sequence of copy-to-reg nodes chained together with token chain
4159 // and flag operands which copy the outgoing args into registers.
4161 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4162 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4163 RegsToPass[i].second, InFlag);
4164 InFlag = Chain.getValue(1);
4167 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4168 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4169 // In the 64-bit large code model, we have to make all calls
4170 // through a register, since the call instruction's 32-bit
4171 // pc-relative offset may not be large enough to hold the whole
4173 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4174 Callee->getOpcode() == ISD::ExternalSymbol) {
4175 // Lower direct calls to global addresses and external symbols. Setting
4176 // ForCall to true here has the effect of removing WrapperRIP when possible
4177 // to allow direct calls to be selected without first materializing the
4178 // address into a register.
4179 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4180 } else if (Subtarget.isTarget64BitILP32() &&
4181 Callee->getValueType(0) == MVT::i32) {
4182 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4183 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4186 // Returns a chain & a flag for retval copy to use.
4187 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4188 SmallVector<SDValue, 8> Ops;
4190 if (!IsSibcall && isTailCall && !IsMustTail) {
4191 Chain = DAG.getCALLSEQ_END(Chain,
4192 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4193 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4194 InFlag = Chain.getValue(1);
4197 Ops.push_back(Chain);
4198 Ops.push_back(Callee);
4201 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4203 // Add argument registers to the end of the list so that they are known live
4205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4206 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4207 RegsToPass[i].second.getValueType()));
4209 // Add a register mask operand representing the call-preserved registers.
4210 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4211 // set X86_INTR calling convention because it has the same CSR mask
4212 // (same preserved registers).
4213 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4214 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4215 assert(Mask && "Missing call preserved mask for calling convention");
4217 // If this is an invoke in a 32-bit function using a funclet-based
4218 // personality, assume the function clobbers all registers. If an exception
4219 // is thrown, the runtime will not restore CSRs.
4220 // FIXME: Model this more precisely so that we can register allocate across
4221 // the normal edge and spill and fill across the exceptional edge.
4222 if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4223 const Function &CallerFn = MF.getFunction();
4224 EHPersonality Pers =
4225 CallerFn.hasPersonalityFn()
4226 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4227 : EHPersonality::Unknown;
4228 if (isFuncletEHPersonality(Pers))
4229 Mask = RegInfo->getNoPreservedMask();
4232 // Define a new register mask from the existing mask.
4233 uint32_t *RegMask = nullptr;
4235 // In some calling conventions we need to remove the used physical registers
4236 // from the reg mask.
4237 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4238 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4240 // Allocate a new Reg Mask and copy Mask.
4241 RegMask = MF.allocateRegMask();
4242 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4243 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4245 // Make sure all sub registers of the argument registers are reset
4247 for (auto const &RegPair : RegsToPass)
4248 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4249 SubRegs.isValid(); ++SubRegs)
4250 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4252 // Create the RegMask Operand according to our updated mask.
4253 Ops.push_back(DAG.getRegisterMask(RegMask));
4255 // Create the RegMask Operand according to the static mask.
4256 Ops.push_back(DAG.getRegisterMask(Mask));
4259 if (InFlag.getNode())
4260 Ops.push_back(InFlag);
4264 //// If this is the first return lowered for this function, add the regs
4265 //// to the liveout set for the function.
4266 // This isn't right, although it's probably harmless on x86; liveouts
4267 // should be computed from returns not tail calls. Consider a void
4268 // function making a tail call to a function returning int.
4269 MF.getFrameInfo().setHasTailCall();
4270 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4271 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4275 if (HasNoCfCheck && IsCFProtectionSupported) {
4276 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4278 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4280 InFlag = Chain.getValue(1);
4281 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4283 // Save heapallocsite metadata.
4285 if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
4286 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4288 // Create the CALLSEQ_END node.
4289 unsigned NumBytesForCalleeToPop;
4290 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4291 DAG.getTarget().Options.GuaranteedTailCallOpt))
4292 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4293 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4294 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4295 SR == StackStructReturn)
4296 // If this is a call to a struct-return function, the callee
4297 // pops the hidden struct pointer, so we have to push it back.
4298 // This is common for Darwin/X86, Linux & Mingw32 targets.
4299 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4300 NumBytesForCalleeToPop = 4;
4302 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4304 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4305 // No need to reset the stack after the call if the call doesn't return. To
4306 // make the MI verify, we'll pretend the callee does it for us.
4307 NumBytesForCalleeToPop = NumBytes;
4310 // Returns a flag for retval copy to use.
4312 Chain = DAG.getCALLSEQ_END(Chain,
4313 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4314 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4317 InFlag = Chain.getValue(1);
4320 // Handle result values, copying them out of physregs into vregs that we
4322 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4326 //===----------------------------------------------------------------------===//
4327 // Fast Calling Convention (tail call) implementation
4328 //===----------------------------------------------------------------------===//
4330 // Like std call, callee cleans arguments, convention except that ECX is
4331 // reserved for storing the tail called function address. Only 2 registers are
4332 // free for argument passing (inreg). Tail call optimization is performed
4334 // * tailcallopt is enabled
4335 // * caller/callee are fastcc
4336 // On X86_64 architecture with GOT-style position independent code only local
4337 // (within module) calls are supported at the moment.
4338 // To keep the stack aligned according to platform abi the function
4339 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4340 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4341 // If a tail called function callee has more arguments than the caller the
4342 // caller needs to make sure that there is room to move the RETADDR to. This is
4343 // achieved by reserving an area the size of the argument delta right after the
4344 // original RETADDR, but before the saved framepointer or the spilled registers
4345 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4357 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4360 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
4361 SelectionDAG &DAG) const {
4362 const Align StackAlignment(Subtarget.getFrameLowering()->getStackAlignment());
4363 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
4364 assert(StackSize % SlotSize == 0 &&
4365 "StackSize must be a multiple of SlotSize");
4366 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
4369 /// Return true if the given stack call argument is already available in the
4370 /// same position (relatively) of the caller's incoming argument stack.
4372 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4373 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4374 const X86InstrInfo *TII, const CCValAssign &VA) {
4375 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4378 // Look through nodes that don't alter the bits of the incoming value.
4379 unsigned Op = Arg.getOpcode();
4380 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4381 Arg = Arg.getOperand(0);
4384 if (Op == ISD::TRUNCATE) {
4385 const SDValue &TruncInput = Arg.getOperand(0);
4386 if (TruncInput.getOpcode() == ISD::AssertZext &&
4387 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4388 Arg.getValueType()) {
4389 Arg = TruncInput.getOperand(0);
4397 if (Arg.getOpcode() == ISD::CopyFromReg) {
4398 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4399 if (!Register::isVirtualRegister(VR))
4401 MachineInstr *Def = MRI->getVRegDef(VR);
4404 if (!Flags.isByVal()) {
4405 if (!TII->isLoadFromStackSlot(*Def, FI))
4408 unsigned Opcode = Def->getOpcode();
4409 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4410 Opcode == X86::LEA64_32r) &&
4411 Def->getOperand(1).isFI()) {
4412 FI = Def->getOperand(1).getIndex();
4413 Bytes = Flags.getByValSize();
4417 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4418 if (Flags.isByVal())
4419 // ByVal argument is passed in as a pointer but it's now being
4420 // dereferenced. e.g.
4421 // define @foo(%struct.X* %A) {
4422 // tail call @bar(%struct.X* byval %A)
4425 SDValue Ptr = Ld->getBasePtr();
4426 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4429 FI = FINode->getIndex();
4430 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4431 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4432 FI = FINode->getIndex();
4433 Bytes = Flags.getByValSize();
4437 assert(FI != INT_MAX);
4438 if (!MFI.isFixedObjectIndex(FI))
4441 if (Offset != MFI.getObjectOffset(FI))
4444 // If this is not byval, check that the argument stack object is immutable.
4445 // inalloca and argument copy elision can create mutable argument stack
4446 // objects. Byval objects can be mutated, but a byval call intends to pass the
4448 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4451 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4452 // If the argument location is wider than the argument type, check that any
4453 // extension flags match.
4454 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4455 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4460 return Bytes == MFI.getObjectSize(FI);
4463 /// Check whether the call is eligible for tail call optimization. Targets
4464 /// that want to do tail call optimization should implement this function.
4465 bool X86TargetLowering::IsEligibleForTailCallOptimization(
4466 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4467 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4468 const SmallVectorImpl<ISD::OutputArg> &Outs,
4469 const SmallVectorImpl<SDValue> &OutVals,
4470 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4471 if (!mayTailCallThisCC(CalleeCC))
4474 // If -tailcallopt is specified, make fastcc functions tail-callable.
4475 MachineFunction &MF = DAG.getMachineFunction();
4476 const Function &CallerF = MF.getFunction();
4478 // If the function return type is x86_fp80 and the callee return type is not,
4479 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4480 // perform a tailcall optimization here.
4481 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4484 CallingConv::ID CallerCC = CallerF.getCallingConv();
4485 bool CCMatch = CallerCC == CalleeCC;
4486 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4487 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4488 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4489 CalleeCC == CallingConv::Tail;
4491 // Win64 functions have extra shadow space for argument homing. Don't do the
4492 // sibcall if the caller and callee have mismatched expectations for this
4494 if (IsCalleeWin64 != IsCallerWin64)
4497 if (IsGuaranteeTCO) {
4498 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4503 // Look for obvious safe cases to perform tail call optimization that do not
4504 // require ABI changes. This is what gcc calls sibcall.
4506 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4507 // emit a special epilogue.
4508 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4509 if (RegInfo->needsStackRealignment(MF))
4512 // Also avoid sibcall optimization if either caller or callee uses struct
4513 // return semantics.
4514 if (isCalleeStructRet || isCallerStructRet)
4517 // Do not sibcall optimize vararg calls unless all arguments are passed via
4519 LLVMContext &C = *DAG.getContext();
4520 if (isVarArg && !Outs.empty()) {
4521 // Optimizing for varargs on Win64 is unlikely to be safe without
4522 // additional testing.
4523 if (IsCalleeWin64 || IsCallerWin64)
4526 SmallVector<CCValAssign, 16> ArgLocs;
4527 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4529 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4530 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4531 if (!ArgLocs[i].isRegLoc())
4535 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4536 // stack. Therefore, if it's not used by the call it is not safe to optimize
4537 // this into a sibcall.
4538 bool Unused = false;
4539 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4546 SmallVector<CCValAssign, 16> RVLocs;
4547 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4548 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4549 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4550 CCValAssign &VA = RVLocs[i];
4551 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4556 // Check that the call results are passed in the same way.
4557 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4558 RetCC_X86, RetCC_X86))
4560 // The callee has to preserve all registers the caller needs to preserve.
4561 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4562 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4564 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4565 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4569 unsigned StackArgsSize = 0;
4571 // If the callee takes no arguments then go on to check the results of the
4573 if (!Outs.empty()) {
4574 // Check if stack adjustment is needed. For now, do not do this if any
4575 // argument is passed on the stack.
4576 SmallVector<CCValAssign, 16> ArgLocs;
4577 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4579 // Allocate shadow area for Win64
4581 CCInfo.AllocateStack(32, 8);
4583 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4584 StackArgsSize = CCInfo.getNextStackOffset();
4586 if (CCInfo.getNextStackOffset()) {
4587 // Check if the arguments are already laid out in the right way as
4588 // the caller's fixed stack objects.
4589 MachineFrameInfo &MFI = MF.getFrameInfo();
4590 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4591 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4592 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4593 CCValAssign &VA = ArgLocs[i];
4594 SDValue Arg = OutVals[i];
4595 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4596 if (VA.getLocInfo() == CCValAssign::Indirect)
4598 if (!VA.isRegLoc()) {
4599 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4606 bool PositionIndependent = isPositionIndependent();
4607 // If the tailcall address may be in a register, then make sure it's
4608 // possible to register allocate for it. In 32-bit, the call address can
4609 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4610 // callee-saved registers are restored. These happen to be the same
4611 // registers used to pass 'inreg' arguments so watch out for those.
4612 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4613 !isa<ExternalSymbolSDNode>(Callee)) ||
4614 PositionIndependent)) {
4615 unsigned NumInRegs = 0;
4616 // In PIC we need an extra register to formulate the address computation
4618 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4620 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4621 CCValAssign &VA = ArgLocs[i];
4624 Register Reg = VA.getLocReg();
4627 case X86::EAX: case X86::EDX: case X86::ECX:
4628 if (++NumInRegs == MaxInRegs)
4635 const MachineRegisterInfo &MRI = MF.getRegInfo();
4636 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4640 bool CalleeWillPop =
4641 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4642 MF.getTarget().Options.GuaranteedTailCallOpt);
4644 if (unsigned BytesToPop =
4645 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4646 // If we have bytes to pop, the callee must pop them.
4647 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4648 if (!CalleePopMatches)
4650 } else if (CalleeWillPop && StackArgsSize > 0) {
4651 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4659 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4660 const TargetLibraryInfo *libInfo) const {
4661 return X86::createFastISel(funcInfo, libInfo);
4664 //===----------------------------------------------------------------------===//
4665 // Other Lowering Hooks
4666 //===----------------------------------------------------------------------===//
4668 static bool MayFoldLoad(SDValue Op) {
4669 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4672 static bool MayFoldIntoStore(SDValue Op) {
4673 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4676 static bool MayFoldIntoZeroExtend(SDValue Op) {
4677 if (Op.hasOneUse()) {
4678 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4679 return (ISD::ZERO_EXTEND == Opcode);
4684 static bool isTargetShuffle(unsigned Opcode) {
4686 default: return false;
4687 case X86ISD::BLENDI:
4688 case X86ISD::PSHUFB:
4689 case X86ISD::PSHUFD:
4690 case X86ISD::PSHUFHW:
4691 case X86ISD::PSHUFLW:
4693 case X86ISD::INSERTPS:
4694 case X86ISD::EXTRQI:
4695 case X86ISD::INSERTQI:
4696 case X86ISD::PALIGNR:
4697 case X86ISD::VSHLDQ:
4698 case X86ISD::VSRLDQ:
4699 case X86ISD::MOVLHPS:
4700 case X86ISD::MOVHLPS:
4701 case X86ISD::MOVSHDUP:
4702 case X86ISD::MOVSLDUP:
4703 case X86ISD::MOVDDUP:
4706 case X86ISD::UNPCKL:
4707 case X86ISD::UNPCKH:
4708 case X86ISD::VBROADCAST:
4709 case X86ISD::VPERMILPI:
4710 case X86ISD::VPERMILPV:
4711 case X86ISD::VPERM2X128:
4712 case X86ISD::SHUF128:
4713 case X86ISD::VPERMIL2:
4714 case X86ISD::VPERMI:
4715 case X86ISD::VPPERM:
4716 case X86ISD::VPERMV:
4717 case X86ISD::VPERMV3:
4718 case X86ISD::VZEXT_MOVL:
4723 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4725 default: return false;
4727 case X86ISD::PSHUFB:
4728 case X86ISD::VPERMILPV:
4729 case X86ISD::VPERMIL2:
4730 case X86ISD::VPPERM:
4731 case X86ISD::VPERMV:
4732 case X86ISD::VPERMV3:
4734 // 'Faux' Target Shuffles.
4742 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4743 MachineFunction &MF = DAG.getMachineFunction();
4744 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4745 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4746 int ReturnAddrIndex = FuncInfo->getRAIndex();
4748 if (ReturnAddrIndex == 0) {
4749 // Set up a frame object for the return address.
4750 unsigned SlotSize = RegInfo->getSlotSize();
4751 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4754 FuncInfo->setRAIndex(ReturnAddrIndex);
4757 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4760 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4761 bool hasSymbolicDisplacement) {
4762 // Offset should fit into 32 bit immediate field.
4763 if (!isInt<32>(Offset))
4766 // If we don't have a symbolic displacement - we don't have any extra
4768 if (!hasSymbolicDisplacement)
4771 // FIXME: Some tweaks might be needed for medium code model.
4772 if (M != CodeModel::Small && M != CodeModel::Kernel)
4775 // For small code model we assume that latest object is 16MB before end of 31
4776 // bits boundary. We may also accept pretty large negative constants knowing
4777 // that all objects are in the positive half of address space.
4778 if (M == CodeModel::Small && Offset < 16*1024*1024)
4781 // For kernel code model we know that all object resist in the negative half
4782 // of 32bits address space. We may not accept negative offsets, since they may
4783 // be just off and we may accept pretty large positive ones.
4784 if (M == CodeModel::Kernel && Offset >= 0)
4790 /// Determines whether the callee is required to pop its own arguments.
4791 /// Callee pop is necessary to support tail calls.
4792 bool X86::isCalleePop(CallingConv::ID CallingConv,
4793 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4794 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4795 // can guarantee TCO.
4796 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4799 switch (CallingConv) {
4802 case CallingConv::X86_StdCall:
4803 case CallingConv::X86_FastCall:
4804 case CallingConv::X86_ThisCall:
4805 case CallingConv::X86_VectorCall:
4810 /// Return true if the condition is an signed comparison operation.
4811 static bool isX86CCSigned(unsigned X86CC) {
4814 llvm_unreachable("Invalid integer condition!");
4830 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4831 switch (SetCCOpcode) {
4832 default: llvm_unreachable("Invalid integer condition!");
4833 case ISD::SETEQ: return X86::COND_E;
4834 case ISD::SETGT: return X86::COND_G;
4835 case ISD::SETGE: return X86::COND_GE;
4836 case ISD::SETLT: return X86::COND_L;
4837 case ISD::SETLE: return X86::COND_LE;
4838 case ISD::SETNE: return X86::COND_NE;
4839 case ISD::SETULT: return X86::COND_B;
4840 case ISD::SETUGT: return X86::COND_A;
4841 case ISD::SETULE: return X86::COND_BE;
4842 case ISD::SETUGE: return X86::COND_AE;
4846 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4847 /// condition code, returning the condition code and the LHS/RHS of the
4848 /// comparison to make.
4849 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4850 bool isFP, SDValue &LHS, SDValue &RHS,
4851 SelectionDAG &DAG) {
4853 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4854 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4855 // X > -1 -> X == 0, jump !sign.
4856 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4857 return X86::COND_NS;
4859 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4860 // X < 0 -> X == 0, jump on sign.
4863 if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4864 // X >= 0 -> X == 0, jump on !sign.
4865 return X86::COND_NS;
4867 if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
4869 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4870 return X86::COND_LE;
4874 return TranslateIntegerX86CC(SetCCOpcode);
4877 // First determine if it is required or is profitable to flip the operands.
4879 // If LHS is a foldable load, but RHS is not, flip the condition.
4880 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4881 !ISD::isNON_EXTLoad(RHS.getNode())) {
4882 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4883 std::swap(LHS, RHS);
4886 switch (SetCCOpcode) {
4892 std::swap(LHS, RHS);
4896 // On a floating point condition, the flags are set as follows:
4898 // 0 | 0 | 0 | X > Y
4899 // 0 | 0 | 1 | X < Y
4900 // 1 | 0 | 0 | X == Y
4901 // 1 | 1 | 1 | unordered
4902 switch (SetCCOpcode) {
4903 default: llvm_unreachable("Condcode should be pre-legalized away");
4905 case ISD::SETEQ: return X86::COND_E;
4906 case ISD::SETOLT: // flipped
4908 case ISD::SETGT: return X86::COND_A;
4909 case ISD::SETOLE: // flipped
4911 case ISD::SETGE: return X86::COND_AE;
4912 case ISD::SETUGT: // flipped
4914 case ISD::SETLT: return X86::COND_B;
4915 case ISD::SETUGE: // flipped
4917 case ISD::SETLE: return X86::COND_BE;
4919 case ISD::SETNE: return X86::COND_NE;
4920 case ISD::SETUO: return X86::COND_P;
4921 case ISD::SETO: return X86::COND_NP;
4923 case ISD::SETUNE: return X86::COND_INVALID;
4927 /// Is there a floating point cmov for the specific X86 condition code?
4928 /// Current x86 isa includes the following FP cmov instructions:
4929 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4930 static bool hasFPCMov(unsigned X86CC) {
4947 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4949 MachineFunction &MF,
4950 unsigned Intrinsic) const {
4952 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4956 Info.flags = MachineMemOperand::MONone;
4959 switch (IntrData->Type) {
4960 case TRUNCATE_TO_MEM_VI8:
4961 case TRUNCATE_TO_MEM_VI16:
4962 case TRUNCATE_TO_MEM_VI32: {
4963 Info.opc = ISD::INTRINSIC_VOID;
4964 Info.ptrVal = I.getArgOperand(0);
4965 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4966 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4967 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4969 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4970 ScalarVT = MVT::i16;
4971 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4972 ScalarVT = MVT::i32;
4974 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4975 Info.align = Align::None();
4976 Info.flags |= MachineMemOperand::MOStore;
4981 Info.opc = ISD::INTRINSIC_W_CHAIN;
4982 Info.ptrVal = nullptr;
4983 MVT DataVT = MVT::getVT(I.getType());
4984 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4985 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4986 IndexVT.getVectorNumElements());
4987 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4988 Info.align = Align::None();
4989 Info.flags |= MachineMemOperand::MOLoad;
4993 Info.opc = ISD::INTRINSIC_VOID;
4994 Info.ptrVal = nullptr;
4995 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4996 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4997 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4998 IndexVT.getVectorNumElements());
4999 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5000 Info.align = Align::None();
5001 Info.flags |= MachineMemOperand::MOStore;
5011 /// Returns true if the target can instruction select the
5012 /// specified FP immediate natively. If false, the legalizer will
5013 /// materialize the FP immediate as a load from a constant pool.
5014 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5015 bool ForCodeSize) const {
5016 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
5017 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
5023 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5024 ISD::LoadExtType ExtTy,
5026 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5028 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5029 // relocation target a movq or addq instruction: don't let the load shrink.
5030 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5031 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5032 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5033 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5035 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5036 // those uses are extracted directly into a store, then the extract + store
5037 // can be store-folded. Therefore, it's probably not worth splitting the load.
5038 EVT VT = Load->getValueType(0);
5039 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5040 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5041 // Skip uses of the chain value. Result 0 of the node is the load value.
5042 if (UI.getUse().getResNo() != 0)
5045 // If this use is not an extract + store, it's probably worth splitting.
5046 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5047 UI->use_begin()->getOpcode() != ISD::STORE)
5050 // All non-chain uses are extract + store.
5057 /// Returns true if it is beneficial to convert a load of a constant
5058 /// to just the constant itself.
5059 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5061 assert(Ty->isIntegerTy());
5063 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5064 if (BitSize == 0 || BitSize > 64)
5069 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5070 // If we are using XMM registers in the ABI and the condition of the select is
5071 // a floating-point compare and we have blendv or conditional move, then it is
5072 // cheaper to select instead of doing a cross-register move and creating a
5073 // load that depends on the compare result.
5074 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5075 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5078 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5079 // TODO: It might be a win to ease or lift this restriction, but the generic
5080 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5081 if (VT.isVector() && Subtarget.hasAVX512())
5087 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5089 // TODO: We handle scalars using custom code, but generic combining could make
5090 // that unnecessary.
5092 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5095 // Find the type this will be legalized too. Otherwise we might prematurely
5096 // convert this to shl+add/sub and then still have to type legalize those ops.
5097 // Another choice would be to defer the decision for illegal types until
5098 // after type legalization. But constant splat vectors of i64 can't make it
5099 // through type legalization on 32-bit targets so we would need to special
5101 while (getTypeAction(Context, VT) != TypeLegal)
5102 VT = getTypeToTransformTo(Context, VT);
5104 // If vector multiply is legal, assume that's faster than shl + add/sub.
5105 // TODO: Multiply is a complex op with higher latency and lower throughput in
5106 // most implementations, so this check could be loosened based on type
5107 // and/or a CPU attribute.
5108 if (isOperationLegal(ISD::MUL, VT))
5111 // shl+add, shl+sub, shl+add+neg
5112 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5113 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5116 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5117 unsigned Index) const {
5118 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5121 // Mask vectors support all subregister combinations and operations that
5122 // extract half of vector.
5123 if (ResVT.getVectorElementType() == MVT::i1)
5124 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5125 (Index == ResVT.getVectorNumElements()));
5127 return (Index % ResVT.getVectorNumElements()) == 0;
5130 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5131 unsigned Opc = VecOp.getOpcode();
5133 // Assume target opcodes can't be scalarized.
5134 // TODO - do we have any exceptions?
5135 if (Opc >= ISD::BUILTIN_OP_END)
5138 // If the vector op is not supported, try to convert to scalar.
5139 EVT VecVT = VecOp.getValueType();
5140 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5143 // If the vector op is supported, but the scalar op is not, the transform may
5144 // not be worthwhile.
5145 EVT ScalarVT = VecVT.getScalarType();
5146 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5149 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
5150 // TODO: Allow vectors?
5153 return VT.isSimple() || !isOperationExpand(Opcode, VT);
5156 bool X86TargetLowering::isCheapToSpeculateCttz() const {
5157 // Speculate cttz only if we can directly use TZCNT.
5158 return Subtarget.hasBMI();
5161 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5162 // Speculate ctlz only if we can directly use LZCNT.
5163 return Subtarget.hasLZCNT();
5166 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5167 const SelectionDAG &DAG,
5168 const MachineMemOperand &MMO) const {
5169 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5170 BitcastVT.getVectorElementType() == MVT::i1)
5173 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5176 // If both types are legal vectors, it's always ok to convert them.
5177 if (LoadVT.isVector() && BitcastVT.isVector() &&
5178 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5181 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5184 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5185 const SelectionDAG &DAG) const {
5186 // Do not merge to float value size (128 bytes) if no implicit
5187 // float attribute is set.
5188 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5189 Attribute::NoImplicitFloat);
5192 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5193 return (MemVT.getSizeInBits() <= MaxIntSize);
5195 // Make sure we don't merge greater than our preferred vector
5197 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5202 bool X86TargetLowering::isCtlzFast() const {
5203 return Subtarget.hasFastLZCNT();
5206 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5207 const Instruction &AndI) const {
5211 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5212 EVT VT = Y.getValueType();
5217 if (!Subtarget.hasBMI())
5220 // There are only 32-bit and 64-bit forms for 'andn'.
5221 if (VT != MVT::i32 && VT != MVT::i64)
5224 return !isa<ConstantSDNode>(Y);
5227 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5228 EVT VT = Y.getValueType();
5231 return hasAndNotCompare(Y);
5235 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5238 if (VT == MVT::v4i32)
5241 return Subtarget.hasSSE2();
5244 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5245 return X.getValueType().isScalarInteger(); // 'bt'
5248 bool X86TargetLowering::
5249 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5250 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5251 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5252 SelectionDAG &DAG) const {
5253 // Does baseline recommend not to perform the fold by default?
5254 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5255 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5257 // For scalars this transform is always beneficial.
5258 if (X.getValueType().isScalarInteger())
5260 // If all the shift amounts are identical, then transform is beneficial even
5261 // with rudimentary SSE2 shifts.
5262 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5264 // If we have AVX2 with it's powerful shift operations, then it's also good.
5265 if (Subtarget.hasAVX2())
5267 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5268 return NewShiftOpcode == ISD::SHL;
5271 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5272 const SDNode *N, CombineLevel Level) const {
5273 assert(((N->getOpcode() == ISD::SHL &&
5274 N->getOperand(0).getOpcode() == ISD::SRL) ||
5275 (N->getOpcode() == ISD::SRL &&
5276 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5277 "Expected shift-shift mask");
5278 EVT VT = N->getValueType(0);
5279 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5280 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5281 // Only fold if the shift values are equal - so it folds to AND.
5282 // TODO - we should fold if either is a non-uniform vector but we don't do
5283 // the fold for non-splats yet.
5284 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5286 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5289 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5290 EVT VT = Y.getValueType();
5292 // For vectors, we don't have a preference, but we probably want a mask.
5296 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5297 if (VT == MVT::i64 && !Subtarget.is64Bit())
5303 bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5305 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5306 !Subtarget.isOSWindows())
5311 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5312 // Any legal vector type can be splatted more efficiently than
5313 // loading/spilling from memory.
5314 return isTypeLegal(VT);
5317 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5318 MVT VT = MVT::getIntegerVT(NumBits);
5319 if (isTypeLegal(VT))
5322 // PMOVMSKB can handle this.
5323 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5326 // VPMOVMSKB can handle this.
5327 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5330 // TODO: Allow 64-bit type for 32-bit target.
5331 // TODO: 512-bit types should be allowed, but make sure that those
5332 // cases are handled in combineVectorSizedSetCCEquality().
5334 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5337 /// Val is the undef sentinel value or equal to the specified value.
5338 static bool isUndefOrEqual(int Val, int CmpVal) {
5339 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5342 /// Val is either the undef or zero sentinel value.
5343 static bool isUndefOrZero(int Val) {
5344 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5347 /// Return true if every element in Mask, beginning from position Pos and ending
5348 /// in Pos+Size is the undef sentinel value.
5349 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5350 return llvm::all_of(Mask.slice(Pos, Size),
5351 [](int M) { return M == SM_SentinelUndef; });
5354 /// Return true if the mask creates a vector whose lower half is undefined.
5355 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5356 unsigned NumElts = Mask.size();
5357 return isUndefInRange(Mask, 0, NumElts / 2);
5360 /// Return true if the mask creates a vector whose upper half is undefined.
5361 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5362 unsigned NumElts = Mask.size();
5363 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5366 /// Return true if Val falls within the specified range (L, H].
5367 static bool isInRange(int Val, int Low, int Hi) {
5368 return (Val >= Low && Val < Hi);
5371 /// Return true if the value of any element in Mask falls within the specified
5373 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5374 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5377 /// Return true if Val is undef or if its value falls within the
5378 /// specified range (L, H].
5379 static bool isUndefOrInRange(int Val, int Low, int Hi) {
5380 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5383 /// Return true if every element in Mask is undef or if its value
5384 /// falls within the specified range (L, H].
5385 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5386 return llvm::all_of(
5387 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5390 /// Return true if Val is undef, zero or if its value falls within the
5391 /// specified range (L, H].
5392 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5393 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5396 /// Return true if every element in Mask is undef, zero or if its value
5397 /// falls within the specified range (L, H].
5398 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5399 return llvm::all_of(
5400 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5403 /// Return true if every element in Mask, beginning
5404 /// from position Pos and ending in Pos + Size, falls within the specified
5405 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5406 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5407 unsigned Size, int Low, int Step = 1) {
5408 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5409 if (!isUndefOrEqual(Mask[i], Low))
5414 /// Return true if every element in Mask, beginning
5415 /// from position Pos and ending in Pos+Size, falls within the specified
5416 /// sequential range (Low, Low+Size], or is undef or is zero.
5417 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5418 unsigned Size, int Low,
5420 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5421 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5426 /// Return true if every element in Mask, beginning
5427 /// from position Pos and ending in Pos+Size is undef or is zero.
5428 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5430 return llvm::all_of(Mask.slice(Pos, Size),
5431 [](int M) { return isUndefOrZero(M); });
5434 /// Helper function to test whether a shuffle mask could be
5435 /// simplified by widening the elements being shuffled.
5437 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5438 /// leaves it in an unspecified state.
5440 /// NOTE: This must handle normal vector shuffle masks and *target* vector
5441 /// shuffle masks. The latter have the special property of a '-2' representing
5442 /// a zero-ed lane of a vector.
5443 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5444 SmallVectorImpl<int> &WidenedMask) {
5445 WidenedMask.assign(Mask.size() / 2, 0);
5446 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5448 int M1 = Mask[i + 1];
5450 // If both elements are undef, its trivial.
5451 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5452 WidenedMask[i / 2] = SM_SentinelUndef;
5456 // Check for an undef mask and a mask value properly aligned to fit with
5457 // a pair of values. If we find such a case, use the non-undef mask's value.
5458 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5459 WidenedMask[i / 2] = M1 / 2;
5462 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5463 WidenedMask[i / 2] = M0 / 2;
5467 // When zeroing, we need to spread the zeroing across both lanes to widen.
5468 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5469 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5470 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5471 WidenedMask[i / 2] = SM_SentinelZero;
5477 // Finally check if the two mask values are adjacent and aligned with
5479 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5480 WidenedMask[i / 2] = M0 / 2;
5484 // Otherwise we can't safely widen the elements used in this shuffle.
5487 assert(WidenedMask.size() == Mask.size() / 2 &&
5488 "Incorrect size of mask after widening the elements!");
5493 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5494 const APInt &Zeroable,
5496 SmallVectorImpl<int> &WidenedMask) {
5497 // Create an alternative mask with info about zeroable elements.
5498 // Here we do not set undef elements as zeroable.
5499 SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
5501 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
5502 for (int i = 0, Size = Mask.size(); i != Size; ++i)
5503 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
5504 ZeroableMask[i] = SM_SentinelZero;
5506 return canWidenShuffleElements(ZeroableMask, WidenedMask);
5509 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5510 SmallVector<int, 32> WidenedMask;
5511 return canWidenShuffleElements(Mask, WidenedMask);
5514 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
5515 bool X86::isZeroNode(SDValue Elt) {
5516 return isNullConstant(Elt) || isNullFPConstant(Elt);
5519 // Build a vector of constants.
5520 // Use an UNDEF node if MaskElt == -1.
5521 // Split 64-bit constants in the 32-bit mode.
5522 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5523 const SDLoc &dl, bool IsMask = false) {
5525 SmallVector<SDValue, 32> Ops;
5528 MVT ConstVecVT = VT;
5529 unsigned NumElts = VT.getVectorNumElements();
5530 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5531 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5532 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5536 MVT EltVT = ConstVecVT.getVectorElementType();
5537 for (unsigned i = 0; i < NumElts; ++i) {
5538 bool IsUndef = Values[i] < 0 && IsMask;
5539 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5540 DAG.getConstant(Values[i], dl, EltVT);
5541 Ops.push_back(OpNode);
5543 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5544 DAG.getConstant(0, dl, EltVT));
5546 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5548 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5552 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5553 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5554 assert(Bits.size() == Undefs.getBitWidth() &&
5555 "Unequal constant and undef arrays");
5556 SmallVector<SDValue, 32> Ops;
5559 MVT ConstVecVT = VT;
5560 unsigned NumElts = VT.getVectorNumElements();
5561 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5562 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5563 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5567 MVT EltVT = ConstVecVT.getVectorElementType();
5568 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5570 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5573 const APInt &V = Bits[i];
5574 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5576 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5577 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5578 } else if (EltVT == MVT::f32) {
5579 APFloat FV(APFloat::IEEEsingle(), V);
5580 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5581 } else if (EltVT == MVT::f64) {
5582 APFloat FV(APFloat::IEEEdouble(), V);
5583 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5585 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5589 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5590 return DAG.getBitcast(VT, ConstsNode);
5593 /// Returns a vector of specified type with all zero elements.
5594 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5595 SelectionDAG &DAG, const SDLoc &dl) {
5596 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5597 VT.getVectorElementType() == MVT::i1) &&
5598 "Unexpected vector type");
5600 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5601 // type. This ensures they get CSE'd. But if the integer type is not
5602 // available, use a floating-point +0.0 instead.
5604 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5605 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5606 } else if (VT.isFloatingPoint()) {
5607 Vec = DAG.getConstantFP(+0.0, dl, VT);
5608 } else if (VT.getVectorElementType() == MVT::i1) {
5609 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5610 "Unexpected vector type");
5611 Vec = DAG.getConstant(0, dl, VT);
5613 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5614 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5616 return DAG.getBitcast(VT, Vec);
5619 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5620 const SDLoc &dl, unsigned vectorWidth) {
5621 EVT VT = Vec.getValueType();
5622 EVT ElVT = VT.getVectorElementType();
5623 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5624 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5625 VT.getVectorNumElements()/Factor);
5627 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5628 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5629 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5631 // This is the index of the first element of the vectorWidth-bit chunk
5632 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5633 IdxVal &= ~(ElemsPerChunk - 1);
5635 // If the input is a buildvector just emit a smaller one.
5636 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5637 return DAG.getBuildVector(ResultVT, dl,
5638 Vec->ops().slice(IdxVal, ElemsPerChunk));
5640 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5641 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5644 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5645 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5646 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5647 /// instructions or a simple subregister reference. Idx is an index in the
5648 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5649 /// lowering EXTRACT_VECTOR_ELT operations easier.
5650 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5651 SelectionDAG &DAG, const SDLoc &dl) {
5652 assert((Vec.getValueType().is256BitVector() ||
5653 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5654 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5657 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5658 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5659 SelectionDAG &DAG, const SDLoc &dl) {
5660 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5661 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5664 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5665 SelectionDAG &DAG, const SDLoc &dl,
5666 unsigned vectorWidth) {
5667 assert((vectorWidth == 128 || vectorWidth == 256) &&
5668 "Unsupported vector width");
5669 // Inserting UNDEF is Result
5672 EVT VT = Vec.getValueType();
5673 EVT ElVT = VT.getVectorElementType();
5674 EVT ResultVT = Result.getValueType();
5676 // Insert the relevant vectorWidth bits.
5677 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5678 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5680 // This is the index of the first element of the vectorWidth-bit chunk
5681 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5682 IdxVal &= ~(ElemsPerChunk - 1);
5684 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5685 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5688 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5689 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5690 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5691 /// simple superregister reference. Idx is an index in the 128 bits
5692 /// we want. It need not be aligned to a 128-bit boundary. That makes
5693 /// lowering INSERT_VECTOR_ELT operations easier.
5694 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5695 SelectionDAG &DAG, const SDLoc &dl) {
5696 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5697 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5700 /// Widen a vector to a larger size with the same scalar type, with the new
5701 /// elements either zero or undef.
5702 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5703 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5705 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5706 Vec.getValueType().getScalarType() == VT.getScalarType() &&
5707 "Unsupported vector widening type");
5708 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5710 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5711 DAG.getIntPtrConstant(0, dl));
5714 /// Widen a vector to a larger size with the same scalar type, with the new
5715 /// elements either zero or undef.
5716 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5717 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5718 const SDLoc &dl, unsigned WideSizeInBits) {
5719 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5720 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5721 "Unsupported vector widening type");
5722 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5723 MVT SVT = Vec.getSimpleValueType().getScalarType();
5724 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5725 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5728 // Helper function to collect subvector ops that are concated together,
5729 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5730 // The subvectors in Ops are guaranteed to be the same type.
5731 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5732 assert(Ops.empty() && "Expected an empty ops vector");
5734 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5735 Ops.append(N->op_begin(), N->op_end());
5739 if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5740 isa<ConstantSDNode>(N->getOperand(2))) {
5741 SDValue Src = N->getOperand(0);
5742 SDValue Sub = N->getOperand(1);
5743 const APInt &Idx = N->getConstantOperandAPInt(2);
5744 EVT VT = Src.getValueType();
5745 EVT SubVT = Sub.getValueType();
5747 // TODO - Handle more general insert_subvector chains.
5748 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5749 Idx == (VT.getVectorNumElements() / 2) &&
5750 Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5751 Src.getOperand(1).getValueType() == SubVT &&
5752 isNullConstant(Src.getOperand(2))) {
5753 Ops.push_back(Src.getOperand(1));
5762 // Helper for splitting operands of an operation to legal target size and
5763 // apply a function on each part.
5764 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5765 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5766 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5767 // The argument Builder is a function that will be applied on each split part:
5768 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5769 template <typename F>
5770 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5771 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5772 F Builder, bool CheckBWI = true) {
5773 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5774 unsigned NumSubs = 1;
5775 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5776 (!CheckBWI && Subtarget.useAVX512Regs())) {
5777 if (VT.getSizeInBits() > 512) {
5778 NumSubs = VT.getSizeInBits() / 512;
5779 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5781 } else if (Subtarget.hasAVX2()) {
5782 if (VT.getSizeInBits() > 256) {
5783 NumSubs = VT.getSizeInBits() / 256;
5784 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5787 if (VT.getSizeInBits() > 128) {
5788 NumSubs = VT.getSizeInBits() / 128;
5789 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5794 return Builder(DAG, DL, Ops);
5796 SmallVector<SDValue, 4> Subs;
5797 for (unsigned i = 0; i != NumSubs; ++i) {
5798 SmallVector<SDValue, 2> SubOps;
5799 for (SDValue Op : Ops) {
5800 EVT OpVT = Op.getValueType();
5801 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5802 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5803 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5805 Subs.push_back(Builder(DAG, DL, SubOps));
5807 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5810 /// Insert i1-subvector to i1-vector.
5811 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5812 const X86Subtarget &Subtarget) {
5815 SDValue Vec = Op.getOperand(0);
5816 SDValue SubVec = Op.getOperand(1);
5817 SDValue Idx = Op.getOperand(2);
5819 if (!isa<ConstantSDNode>(Idx))
5822 // Inserting undef is a nop. We can just return the original vector.
5823 if (SubVec.isUndef())
5826 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5827 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5830 MVT OpVT = Op.getSimpleValueType();
5831 unsigned NumElems = OpVT.getVectorNumElements();
5833 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5835 // Extend to natively supported kshift.
5836 MVT WideOpVT = OpVT;
5837 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5838 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5840 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5842 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5843 // May need to promote to a legal type.
5844 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5845 DAG.getConstant(0, dl, WideOpVT),
5847 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5850 MVT SubVecVT = SubVec.getSimpleValueType();
5851 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5853 assert(IdxVal + SubVecNumElems <= NumElems &&
5854 IdxVal % SubVecVT.getSizeInBits() == 0 &&
5855 "Unexpected index value in INSERT_SUBVECTOR");
5857 SDValue Undef = DAG.getUNDEF(WideOpVT);
5860 // Zero lower bits of the Vec
5861 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
5862 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5864 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5865 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5866 // Merge them together, SubVec should be zero extended.
5867 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5868 DAG.getConstant(0, dl, WideOpVT),
5870 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5871 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5874 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5875 Undef, SubVec, ZeroIdx);
5877 if (Vec.isUndef()) {
5878 assert(IdxVal != 0 && "Unexpected index");
5879 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5880 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5881 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5884 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5885 assert(IdxVal != 0 && "Unexpected index");
5886 NumElems = WideOpVT.getVectorNumElements();
5887 unsigned ShiftLeft = NumElems - SubVecNumElems;
5888 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5889 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5890 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5891 if (ShiftRight != 0)
5892 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5893 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5894 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5897 // Simple case when we put subvector in the upper part
5898 if (IdxVal + SubVecNumElems == NumElems) {
5899 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5900 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5901 if (SubVecNumElems * 2 == NumElems) {
5902 // Special case, use legal zero extending insert_subvector. This allows
5903 // isel to opimitize when bits are known zero.
5904 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5905 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5906 DAG.getConstant(0, dl, WideOpVT),
5909 // Otherwise use explicit shifts to zero the bits.
5910 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5911 Undef, Vec, ZeroIdx);
5912 NumElems = WideOpVT.getVectorNumElements();
5913 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
5914 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5915 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5917 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5918 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5921 // Inserting into the middle is more complicated.
5923 NumElems = WideOpVT.getVectorNumElements();
5925 // Widen the vector if needed.
5926 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5928 unsigned ShiftLeft = NumElems - SubVecNumElems;
5929 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5931 // Do an optimization for the the most frequently used types.
5932 if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
5933 APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
5934 Mask0.flipAllBits();
5935 SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
5936 SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
5937 Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
5938 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5939 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5940 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5941 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5942 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5944 // Reduce to original width if needed.
5945 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5948 // Clear the upper bits of the subvector and move it to its insert position.
5949 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5950 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5951 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5952 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5954 // Isolate the bits below the insertion point.
5955 unsigned LowShift = NumElems - IdxVal;
5956 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
5957 DAG.getTargetConstant(LowShift, dl, MVT::i8));
5958 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
5959 DAG.getTargetConstant(LowShift, dl, MVT::i8));
5961 // Isolate the bits after the last inserted bit.
5962 unsigned HighShift = IdxVal + SubVecNumElems;
5963 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5964 DAG.getTargetConstant(HighShift, dl, MVT::i8));
5965 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
5966 DAG.getTargetConstant(HighShift, dl, MVT::i8));
5968 // Now OR all 3 pieces together.
5969 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
5970 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
5972 // Reduce to original width if needed.
5973 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5976 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
5978 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
5979 EVT SubVT = V1.getValueType();
5980 EVT SubSVT = SubVT.getScalarType();
5981 unsigned SubNumElts = SubVT.getVectorNumElements();
5982 unsigned SubVectorWidth = SubVT.getSizeInBits();
5983 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
5984 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
5985 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
5988 /// Returns a vector of specified type with all bits set.
5989 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5990 /// Then bitcast to their original type, ensuring they get CSE'd.
5991 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5992 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5993 "Expected a 128/256/512-bit vector type");
5995 APInt Ones = APInt::getAllOnesValue(32);
5996 unsigned NumElts = VT.getSizeInBits() / 32;
5997 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5998 return DAG.getBitcast(VT, Vec);
6001 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
6002 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
6004 case ISD::ANY_EXTEND:
6005 case ISD::ANY_EXTEND_VECTOR_INREG:
6006 return ISD::ANY_EXTEND_VECTOR_INREG;
6007 case ISD::ZERO_EXTEND:
6008 case ISD::ZERO_EXTEND_VECTOR_INREG:
6009 return ISD::ZERO_EXTEND_VECTOR_INREG;
6010 case ISD::SIGN_EXTEND:
6011 case ISD::SIGN_EXTEND_VECTOR_INREG:
6012 return ISD::SIGN_EXTEND_VECTOR_INREG;
6014 llvm_unreachable("Unknown opcode");
6017 static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
6018 SDValue In, SelectionDAG &DAG) {
6019 EVT InVT = In.getValueType();
6020 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
6021 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
6022 ISD::ZERO_EXTEND == Opcode) &&
6023 "Unknown extension opcode");
6025 // For 256-bit vectors, we only need the lower (128-bit) input half.
6026 // For 512-bit vectors, we only need the lower input half or quarter.
6027 if (InVT.getSizeInBits() > 128) {
6028 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
6029 "Expected VTs to be the same size!");
6030 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
6031 In = extractSubVector(In, 0, DAG, DL,
6032 std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
6033 InVT = In.getValueType();
6036 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
6037 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
6039 return DAG.getNode(Opcode, DL, VT, In);
6042 // Match (xor X, -1) -> X.
6043 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
6044 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
6045 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
6046 V = peekThroughBitcasts(V);
6047 if (V.getOpcode() == ISD::XOR &&
6048 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
6049 return V.getOperand(0);
6050 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6051 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
6052 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
6053 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
6054 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
6055 Not, V.getOperand(1));
6058 SmallVector<SDValue, 2> CatOps;
6059 if (collectConcatOps(V.getNode(), CatOps)) {
6060 for (SDValue &CatOp : CatOps) {
6061 SDValue NotCat = IsNOT(CatOp, DAG);
6062 if (!NotCat) return SDValue();
6063 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
6065 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
6070 /// Returns a vector_shuffle node for an unpackl operation.
6071 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6072 SDValue V1, SDValue V2) {
6073 SmallVector<int, 8> Mask;
6074 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
6075 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6078 /// Returns a vector_shuffle node for an unpackh operation.
6079 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6080 SDValue V1, SDValue V2) {
6081 SmallVector<int, 8> Mask;
6082 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
6083 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6086 /// Return a vector_shuffle of the specified vector of zero or undef vector.
6087 /// This produces a shuffle where the low element of V2 is swizzled into the
6088 /// zero/undef vector, landing at element Idx.
6089 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
6090 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
6092 const X86Subtarget &Subtarget,
6093 SelectionDAG &DAG) {
6094 MVT VT = V2.getSimpleValueType();
6096 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
6097 int NumElems = VT.getVectorNumElements();
6098 SmallVector<int, 16> MaskVec(NumElems);
6099 for (int i = 0; i != NumElems; ++i)
6100 // If this is the insertion idx, put the low elt of V2 here.
6101 MaskVec[i] = (i == Idx) ? NumElems : i;
6102 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
6105 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
6106 if (!Load || !ISD::isNormalLoad(Load))
6109 SDValue Ptr = Load->getBasePtr();
6110 if (Ptr->getOpcode() == X86ISD::Wrapper ||
6111 Ptr->getOpcode() == X86ISD::WrapperRIP)
6112 Ptr = Ptr->getOperand(0);
6114 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6115 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
6118 return CNode->getConstVal();
6121 static const Constant *getTargetConstantFromNode(SDValue Op) {
6122 Op = peekThroughBitcasts(Op);
6123 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
6127 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
6128 assert(LD && "Unexpected null LoadSDNode");
6129 return getTargetConstantFromNode(LD);
6132 // Extract raw constant bits from constant pools.
6133 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
6135 SmallVectorImpl<APInt> &EltBits,
6136 bool AllowWholeUndefs = true,
6137 bool AllowPartialUndefs = true) {
6138 assert(EltBits.empty() && "Expected an empty EltBits vector");
6140 Op = peekThroughBitcasts(Op);
6142 EVT VT = Op.getValueType();
6143 unsigned SizeInBits = VT.getSizeInBits();
6144 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
6145 unsigned NumElts = SizeInBits / EltSizeInBits;
6147 // Bitcast a source array of element bits to the target size.
6148 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
6149 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
6150 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
6151 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
6152 "Constant bit sizes don't match");
6154 // Don't split if we don't allow undef bits.
6155 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
6156 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
6159 // If we're already the right size, don't bother bitcasting.
6160 if (NumSrcElts == NumElts) {
6161 UndefElts = UndefSrcElts;
6162 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
6166 // Extract all the undef/constant element data and pack into single bitsets.
6167 APInt UndefBits(SizeInBits, 0);
6168 APInt MaskBits(SizeInBits, 0);
6170 for (unsigned i = 0; i != NumSrcElts; ++i) {
6171 unsigned BitOffset = i * SrcEltSizeInBits;
6172 if (UndefSrcElts[i])
6173 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
6174 MaskBits.insertBits(SrcEltBits[i], BitOffset);
6177 // Split the undef/constant single bitset data into the target elements.
6178 UndefElts = APInt(NumElts, 0);
6179 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6181 for (unsigned i = 0; i != NumElts; ++i) {
6182 unsigned BitOffset = i * EltSizeInBits;
6183 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6185 // Only treat an element as UNDEF if all bits are UNDEF.
6186 if (UndefEltBits.isAllOnesValue()) {
6187 if (!AllowWholeUndefs)
6189 UndefElts.setBit(i);
6193 // If only some bits are UNDEF then treat them as zero (or bail if not
6195 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6198 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6203 // Collect constant bits and insert into mask/undef bit masks.
6204 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6205 unsigned UndefBitIndex) {
6208 if (isa<UndefValue>(Cst)) {
6209 Undefs.setBit(UndefBitIndex);
6212 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6213 Mask = CInt->getValue();
6216 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6217 Mask = CFP->getValueAPF().bitcastToAPInt();
6225 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6226 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6227 return CastBitData(UndefSrcElts, SrcEltBits);
6230 // Extract scalar constant bits.
6231 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6232 APInt UndefSrcElts = APInt::getNullValue(1);
6233 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6234 return CastBitData(UndefSrcElts, SrcEltBits);
6236 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6237 APInt UndefSrcElts = APInt::getNullValue(1);
6238 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6239 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6240 return CastBitData(UndefSrcElts, SrcEltBits);
6243 // Extract constant bits from build vector.
6244 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6245 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6246 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6248 APInt UndefSrcElts(NumSrcElts, 0);
6249 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6250 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6251 const SDValue &Src = Op.getOperand(i);
6252 if (Src.isUndef()) {
6253 UndefSrcElts.setBit(i);
6256 auto *Cst = cast<ConstantSDNode>(Src);
6257 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6259 return CastBitData(UndefSrcElts, SrcEltBits);
6261 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6262 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6263 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6265 APInt UndefSrcElts(NumSrcElts, 0);
6266 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6267 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6268 const SDValue &Src = Op.getOperand(i);
6269 if (Src.isUndef()) {
6270 UndefSrcElts.setBit(i);
6273 auto *Cst = cast<ConstantFPSDNode>(Src);
6274 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6275 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6277 return CastBitData(UndefSrcElts, SrcEltBits);
6280 // Extract constant bits from constant pool vector.
6281 if (auto *Cst = getTargetConstantFromNode(Op)) {
6282 Type *CstTy = Cst->getType();
6283 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6284 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6287 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6288 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6290 APInt UndefSrcElts(NumSrcElts, 0);
6291 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6292 for (unsigned i = 0; i != NumSrcElts; ++i)
6293 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6297 return CastBitData(UndefSrcElts, SrcEltBits);
6300 // Extract constant bits from a broadcasted constant pool scalar.
6301 if (Op.getOpcode() == X86ISD::VBROADCAST &&
6302 EltSizeInBits <= VT.getScalarSizeInBits()) {
6303 if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
6304 unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
6305 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6307 APInt UndefSrcElts(NumSrcElts, 0);
6308 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6309 if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
6310 if (UndefSrcElts[0])
6311 UndefSrcElts.setBits(0, NumSrcElts);
6312 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6313 return CastBitData(UndefSrcElts, SrcEltBits);
6318 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6319 EltSizeInBits <= VT.getScalarSizeInBits()) {
6320 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6321 if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6324 SDValue Ptr = MemIntr->getBasePtr();
6325 if (Ptr->getOpcode() == X86ISD::Wrapper ||
6326 Ptr->getOpcode() == X86ISD::WrapperRIP)
6327 Ptr = Ptr->getOperand(0);
6329 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6330 if (!CNode || CNode->isMachineConstantPoolEntry() ||
6331 CNode->getOffset() != 0)
6334 if (const Constant *C = CNode->getConstVal()) {
6335 unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6336 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6338 APInt UndefSrcElts(NumSrcElts, 0);
6339 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6340 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6341 if (UndefSrcElts[0])
6342 UndefSrcElts.setBits(0, NumSrcElts);
6343 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6344 return CastBitData(UndefSrcElts, SrcEltBits);
6349 // Extract constant bits from a subvector broadcast.
6350 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6351 SmallVector<APInt, 16> SubEltBits;
6352 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6353 UndefElts, SubEltBits, AllowWholeUndefs,
6354 AllowPartialUndefs)) {
6355 UndefElts = APInt::getSplat(NumElts, UndefElts);
6356 while (EltBits.size() < NumElts)
6357 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6362 // Extract a rematerialized scalar constant insertion.
6363 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6364 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6365 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6366 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6367 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6369 APInt UndefSrcElts(NumSrcElts, 0);
6370 SmallVector<APInt, 64> SrcEltBits;
6371 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6372 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6373 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6374 return CastBitData(UndefSrcElts, SrcEltBits);
6377 // Insert constant bits from a base and sub vector sources.
6378 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6379 isa<ConstantSDNode>(Op.getOperand(2))) {
6380 // TODO - support insert_subvector through bitcasts.
6381 if (EltSizeInBits != VT.getScalarSizeInBits())
6385 SmallVector<APInt, 32> EltSubBits;
6386 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6387 UndefSubElts, EltSubBits,
6388 AllowWholeUndefs, AllowPartialUndefs) &&
6389 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6390 UndefElts, EltBits, AllowWholeUndefs,
6391 AllowPartialUndefs)) {
6392 unsigned BaseIdx = Op.getConstantOperandVal(2);
6393 UndefElts.insertBits(UndefSubElts, BaseIdx);
6394 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6395 EltBits[BaseIdx + i] = EltSubBits[i];
6400 // Extract constant bits from a subvector's source.
6401 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6402 isa<ConstantSDNode>(Op.getOperand(1))) {
6403 // TODO - support extract_subvector through bitcasts.
6404 if (EltSizeInBits != VT.getScalarSizeInBits())
6407 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6408 UndefElts, EltBits, AllowWholeUndefs,
6409 AllowPartialUndefs)) {
6410 EVT SrcVT = Op.getOperand(0).getValueType();
6411 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6412 unsigned NumSubElts = VT.getVectorNumElements();
6413 unsigned BaseIdx = Op.getConstantOperandVal(1);
6414 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6415 if ((BaseIdx + NumSubElts) != NumSrcElts)
6416 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6418 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6423 // Extract constant bits from shuffle node sources.
6424 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6425 // TODO - support shuffle through bitcasts.
6426 if (EltSizeInBits != VT.getScalarSizeInBits())
6429 ArrayRef<int> Mask = SVN->getMask();
6430 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6431 llvm::any_of(Mask, [](int M) { return M < 0; }))
6434 APInt UndefElts0, UndefElts1;
6435 SmallVector<APInt, 32> EltBits0, EltBits1;
6436 if (isAnyInRange(Mask, 0, NumElts) &&
6437 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6438 UndefElts0, EltBits0, AllowWholeUndefs,
6439 AllowPartialUndefs))
6441 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6442 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6443 UndefElts1, EltBits1, AllowWholeUndefs,
6444 AllowPartialUndefs))
6447 UndefElts = APInt::getNullValue(NumElts);
6448 for (int i = 0; i != (int)NumElts; ++i) {
6451 UndefElts.setBit(i);
6452 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6453 } else if (M < (int)NumElts) {
6455 UndefElts.setBit(i);
6456 EltBits.push_back(EltBits0[M]);
6458 if (UndefElts1[M - NumElts])
6459 UndefElts.setBit(i);
6460 EltBits.push_back(EltBits1[M - NumElts]);
6471 bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6473 SmallVector<APInt, 16> EltBits;
6474 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6475 UndefElts, EltBits, true, false)) {
6476 int SplatIndex = -1;
6477 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6480 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6486 if (0 <= SplatIndex) {
6487 SplatVal = EltBits[SplatIndex];
6497 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6498 unsigned MaskEltSizeInBits,
6499 SmallVectorImpl<uint64_t> &RawMask,
6501 // Extract the raw target constant bits.
6502 SmallVector<APInt, 64> EltBits;
6503 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6504 EltBits, /* AllowWholeUndefs */ true,
6505 /* AllowPartialUndefs */ false))
6508 // Insert the extracted elements into the mask.
6509 for (APInt Elt : EltBits)
6510 RawMask.push_back(Elt.getZExtValue());
6515 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6516 /// Note: This ignores saturation, so inputs must be checked first.
6517 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6519 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6520 unsigned NumElts = VT.getVectorNumElements();
6521 unsigned NumLanes = VT.getSizeInBits() / 128;
6522 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6523 unsigned Offset = Unary ? 0 : NumElts;
6525 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6526 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6527 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6528 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6529 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6533 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
6534 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6535 APInt &DemandedLHS, APInt &DemandedRHS) {
6536 int NumLanes = VT.getSizeInBits() / 128;
6537 int NumElts = DemandedElts.getBitWidth();
6538 int NumInnerElts = NumElts / 2;
6539 int NumEltsPerLane = NumElts / NumLanes;
6540 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6542 DemandedLHS = APInt::getNullValue(NumInnerElts);
6543 DemandedRHS = APInt::getNullValue(NumInnerElts);
6545 // Map DemandedElts to the packed operands.
6546 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6547 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6548 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6549 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6550 if (DemandedElts[OuterIdx])
6551 DemandedLHS.setBit(InnerIdx);
6552 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6553 DemandedRHS.setBit(InnerIdx);
6558 // Split the demanded elts of a HADD/HSUB node between its operands.
6559 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6560 APInt &DemandedLHS, APInt &DemandedRHS) {
6561 int NumLanes = VT.getSizeInBits() / 128;
6562 int NumElts = DemandedElts.getBitWidth();
6563 int NumEltsPerLane = NumElts / NumLanes;
6564 int HalfEltsPerLane = NumEltsPerLane / 2;
6566 DemandedLHS = APInt::getNullValue(NumElts);
6567 DemandedRHS = APInt::getNullValue(NumElts);
6569 // Map DemandedElts to the horizontal operands.
6570 for (int Idx = 0; Idx != NumElts; ++Idx) {
6571 if (!DemandedElts[Idx])
6573 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6574 int LocalIdx = Idx % NumEltsPerLane;
6575 if (LocalIdx < HalfEltsPerLane) {
6576 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6577 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6579 LocalIdx -= HalfEltsPerLane;
6580 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6581 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6586 /// Calculates the shuffle mask corresponding to the target-specific opcode.
6587 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6588 /// operands in \p Ops, and returns true.
6589 /// Sets \p IsUnary to true if only one source is used. Note that this will set
6590 /// IsUnary for shuffles which use a single input multiple times, and in those
6591 /// cases it will adjust the mask to only have indices within that single input.
6592 /// It is an error to call this with non-empty Mask/Ops vectors.
6593 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6594 SmallVectorImpl<SDValue> &Ops,
6595 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6596 unsigned NumElems = VT.getVectorNumElements();
6597 unsigned MaskEltSize = VT.getScalarSizeInBits();
6598 SmallVector<uint64_t, 32> RawMask;
6602 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6603 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6606 bool IsFakeUnary = false;
6607 switch (N->getOpcode()) {
6608 case X86ISD::BLENDI:
6609 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6610 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6611 ImmN = N->getOperand(N->getNumOperands() - 1);
6612 DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6613 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6616 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6617 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6618 ImmN = N->getOperand(N->getNumOperands() - 1);
6619 DecodeSHUFPMask(NumElems, MaskEltSize,
6620 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6621 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6623 case X86ISD::INSERTPS:
6624 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6625 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6626 ImmN = N->getOperand(N->getNumOperands() - 1);
6627 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6628 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6630 case X86ISD::EXTRQI:
6631 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6632 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6633 isa<ConstantSDNode>(N->getOperand(2))) {
6634 int BitLen = N->getConstantOperandVal(1);
6635 int BitIdx = N->getConstantOperandVal(2);
6636 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6640 case X86ISD::INSERTQI:
6641 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6642 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6643 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6644 isa<ConstantSDNode>(N->getOperand(3))) {
6645 int BitLen = N->getConstantOperandVal(2);
6646 int BitIdx = N->getConstantOperandVal(3);
6647 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6648 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6651 case X86ISD::UNPCKH:
6652 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6653 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6654 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6655 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6657 case X86ISD::UNPCKL:
6658 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6659 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6660 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6661 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6663 case X86ISD::MOVHLPS:
6664 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6665 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6666 DecodeMOVHLPSMask(NumElems, Mask);
6667 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6669 case X86ISD::MOVLHPS:
6670 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6671 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6672 DecodeMOVLHPSMask(NumElems, Mask);
6673 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6675 case X86ISD::PALIGNR:
6676 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6677 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6678 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6679 ImmN = N->getOperand(N->getNumOperands() - 1);
6680 DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6682 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6683 Ops.push_back(N->getOperand(1));
6684 Ops.push_back(N->getOperand(0));
6686 case X86ISD::VSHLDQ:
6687 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6688 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6689 ImmN = N->getOperand(N->getNumOperands() - 1);
6690 DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6694 case X86ISD::VSRLDQ:
6695 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6696 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6697 ImmN = N->getOperand(N->getNumOperands() - 1);
6698 DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6702 case X86ISD::PSHUFD:
6703 case X86ISD::VPERMILPI:
6704 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6705 ImmN = N->getOperand(N->getNumOperands() - 1);
6706 DecodePSHUFMask(NumElems, MaskEltSize,
6707 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6710 case X86ISD::PSHUFHW:
6711 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6712 ImmN = N->getOperand(N->getNumOperands() - 1);
6713 DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6717 case X86ISD::PSHUFLW:
6718 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6719 ImmN = N->getOperand(N->getNumOperands() - 1);
6720 DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6724 case X86ISD::VZEXT_MOVL:
6725 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6726 DecodeZeroMoveLowMask(NumElems, Mask);
6729 case X86ISD::VBROADCAST: {
6730 SDValue N0 = N->getOperand(0);
6731 // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6732 // add the pre-extracted value to the Ops vector.
6733 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6734 N0.getOperand(0).getValueType() == VT &&
6735 N0.getConstantOperandVal(1) == 0)
6736 Ops.push_back(N0.getOperand(0));
6738 // We only decode broadcasts of same-sized vectors, unless the broadcast
6739 // came from an extract from the original width. If we found one, we
6740 // pushed it the Ops vector above.
6741 if (N0.getValueType() == VT || !Ops.empty()) {
6742 DecodeVectorBroadcast(NumElems, Mask);
6748 case X86ISD::VPERMILPV: {
6749 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6751 SDValue MaskNode = N->getOperand(1);
6752 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6754 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6759 case X86ISD::PSHUFB: {
6760 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6761 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6762 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6764 SDValue MaskNode = N->getOperand(1);
6765 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6766 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6771 case X86ISD::VPERMI:
6772 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6773 ImmN = N->getOperand(N->getNumOperands() - 1);
6774 DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6779 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6780 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6781 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6783 case X86ISD::VPERM2X128:
6784 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6785 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6786 ImmN = N->getOperand(N->getNumOperands() - 1);
6787 DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6789 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6791 case X86ISD::SHUF128:
6792 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6793 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6794 ImmN = N->getOperand(N->getNumOperands() - 1);
6795 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6796 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6797 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6799 case X86ISD::MOVSLDUP:
6800 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6801 DecodeMOVSLDUPMask(NumElems, Mask);
6804 case X86ISD::MOVSHDUP:
6805 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6806 DecodeMOVSHDUPMask(NumElems, Mask);
6809 case X86ISD::MOVDDUP:
6810 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6811 DecodeMOVDDUPMask(NumElems, Mask);
6814 case X86ISD::VPERMIL2: {
6815 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6816 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6817 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6818 SDValue MaskNode = N->getOperand(2);
6819 SDValue CtrlNode = N->getOperand(3);
6820 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6821 unsigned CtrlImm = CtrlOp->getZExtValue();
6822 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6824 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6831 case X86ISD::VPPERM: {
6832 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6833 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6834 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6835 SDValue MaskNode = N->getOperand(2);
6836 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6837 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6842 case X86ISD::VPERMV: {
6843 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6845 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6846 Ops.push_back(N->getOperand(1));
6847 SDValue MaskNode = N->getOperand(0);
6848 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6850 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6855 case X86ISD::VPERMV3: {
6856 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6857 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
6858 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6859 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6860 Ops.push_back(N->getOperand(0));
6861 Ops.push_back(N->getOperand(2));
6862 SDValue MaskNode = N->getOperand(1);
6863 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6865 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6870 default: llvm_unreachable("unknown target shuffle node");
6873 // Empty mask indicates the decode failed.
6877 // Check if we're getting a shuffle mask with zero'd elements.
6878 if (!AllowSentinelZero)
6879 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6882 // If we have a fake unary shuffle, the shuffle mask is spread across two
6883 // inputs that are actually the same node. Re-map the mask to always point
6884 // into the first input.
6887 if (M >= (int)Mask.size())
6890 // If we didn't already add operands in the opcode-specific code, default to
6891 // adding 1 or 2 operands starting at 0.
6893 Ops.push_back(N->getOperand(0));
6894 if (!IsUnary || IsFakeUnary)
6895 Ops.push_back(N->getOperand(1));
6901 /// Compute whether each element of a shuffle is zeroable.
6903 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
6904 /// Either it is an undef element in the shuffle mask, the element of the input
6905 /// referenced is undef, or the element of the input referenced is known to be
6906 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
6907 /// as many lanes with this technique as possible to simplify the remaining
6909 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
6910 SDValue V1, SDValue V2,
6911 APInt &KnownUndef, APInt &KnownZero) {
6912 int Size = Mask.size();
6913 KnownUndef = KnownZero = APInt::getNullValue(Size);
6915 V1 = peekThroughBitcasts(V1);
6916 V2 = peekThroughBitcasts(V2);
6918 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
6919 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
6921 int VectorSizeInBits = V1.getValueSizeInBits();
6922 int ScalarSizeInBits = VectorSizeInBits / Size;
6923 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
6925 for (int i = 0; i < Size; ++i) {
6927 // Handle the easy cases.
6929 KnownUndef.setBit(i);
6932 if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
6933 KnownZero.setBit(i);
6937 // Determine shuffle input and normalize the mask.
6938 SDValue V = M < Size ? V1 : V2;
6941 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
6942 if (V.getOpcode() != ISD::BUILD_VECTOR)
6945 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
6946 // the (larger) source element must be UNDEF/ZERO.
6947 if ((Size % V.getNumOperands()) == 0) {
6948 int Scale = Size / V->getNumOperands();
6949 SDValue Op = V.getOperand(M / Scale);
6951 KnownUndef.setBit(i);
6952 if (X86::isZeroNode(Op))
6953 KnownZero.setBit(i);
6954 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
6955 APInt Val = Cst->getAPIntValue();
6956 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
6958 KnownZero.setBit(i);
6959 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6960 APInt Val = Cst->getValueAPF().bitcastToAPInt();
6961 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
6963 KnownZero.setBit(i);
6968 // If the BUILD_VECTOR has more elements then all the (smaller) source
6969 // elements must be UNDEF or ZERO.
6970 if ((V.getNumOperands() % Size) == 0) {
6971 int Scale = V->getNumOperands() / Size;
6972 bool AllUndef = true;
6973 bool AllZero = true;
6974 for (int j = 0; j < Scale; ++j) {
6975 SDValue Op = V.getOperand((M * Scale) + j);
6976 AllUndef &= Op.isUndef();
6977 AllZero &= X86::isZeroNode(Op);
6980 KnownUndef.setBit(i);
6982 KnownZero.setBit(i);
6988 /// Decode a target shuffle mask and inputs and see if any values are
6989 /// known to be undef or zero from their inputs.
6990 /// Returns true if the target shuffle mask was decoded.
6991 /// FIXME: Merge this with computeZeroableShuffleElements?
6992 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
6993 SmallVectorImpl<SDValue> &Ops,
6994 APInt &KnownUndef, APInt &KnownZero) {
6996 if (!isTargetShuffle(N.getOpcode()))
6999 MVT VT = N.getSimpleValueType();
7000 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
7003 int Size = Mask.size();
7004 SDValue V1 = Ops[0];
7005 SDValue V2 = IsUnary ? V1 : Ops[1];
7006 KnownUndef = KnownZero = APInt::getNullValue(Size);
7008 V1 = peekThroughBitcasts(V1);
7009 V2 = peekThroughBitcasts(V2);
7011 assert((VT.getSizeInBits() % Size) == 0 &&
7012 "Illegal split of shuffle value type");
7013 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
7015 // Extract known constant input data.
7016 APInt UndefSrcElts[2];
7017 SmallVector<APInt, 32> SrcEltBits[2];
7018 bool IsSrcConstant[2] = {
7019 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
7020 SrcEltBits[0], true, false),
7021 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
7022 SrcEltBits[1], true, false)};
7024 for (int i = 0; i < Size; ++i) {
7027 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
7029 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
7030 if (SM_SentinelUndef == M)
7031 KnownUndef.setBit(i);
7032 if (SM_SentinelZero == M)
7033 KnownZero.setBit(i);
7037 // Determine shuffle input and normalize the mask.
7038 unsigned SrcIdx = M / Size;
7039 SDValue V = M < Size ? V1 : V2;
7042 // We are referencing an UNDEF input.
7044 KnownUndef.setBit(i);
7048 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
7049 // TODO: We currently only set UNDEF for integer types - floats use the same
7050 // registers as vectors and many of the scalar folded loads rely on the
7051 // SCALAR_TO_VECTOR pattern.
7052 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
7053 (Size % V.getValueType().getVectorNumElements()) == 0) {
7054 int Scale = Size / V.getValueType().getVectorNumElements();
7055 int Idx = M / Scale;
7056 if (Idx != 0 && !VT.isFloatingPoint())
7057 KnownUndef.setBit(i);
7058 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
7059 KnownZero.setBit(i);
7063 // Attempt to extract from the source's constant bits.
7064 if (IsSrcConstant[SrcIdx]) {
7065 if (UndefSrcElts[SrcIdx][M])
7066 KnownUndef.setBit(i);
7067 else if (SrcEltBits[SrcIdx][M] == 0)
7068 KnownZero.setBit(i);
7072 assert(VT.getVectorNumElements() == (unsigned)Size &&
7073 "Different mask size from vector size!");
7077 // Replace target shuffle mask elements with known undef/zero sentinels.
7078 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
7079 const APInt &KnownUndef,
7080 const APInt &KnownZero,
7081 bool ResolveKnownZeros= true) {
7082 unsigned NumElts = Mask.size();
7083 assert(KnownUndef.getBitWidth() == NumElts &&
7084 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
7086 for (unsigned i = 0; i != NumElts; ++i) {
7088 Mask[i] = SM_SentinelUndef;
7089 else if (ResolveKnownZeros && KnownZero[i])
7090 Mask[i] = SM_SentinelZero;
7094 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
7095 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
7098 unsigned NumElts = Mask.size();
7099 KnownUndef = KnownZero = APInt::getNullValue(NumElts);
7101 for (unsigned i = 0; i != NumElts; ++i) {
7103 if (SM_SentinelUndef == M)
7104 KnownUndef.setBit(i);
7105 if (SM_SentinelZero == M)
7106 KnownZero.setBit(i);
7110 // Forward declaration (for getFauxShuffleMask recursive check).
7111 // TODO: Use DemandedElts variant.
7112 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7113 SmallVectorImpl<int> &Mask,
7114 SelectionDAG &DAG, unsigned Depth,
7115 bool ResolveKnownElts);
7117 // Attempt to decode ops that could be represented as a shuffle mask.
7118 // The decoded shuffle mask may contain a different number of elements to the
7119 // destination value type.
7120 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
7121 SmallVectorImpl<int> &Mask,
7122 SmallVectorImpl<SDValue> &Ops,
7123 SelectionDAG &DAG, unsigned Depth,
7124 bool ResolveKnownElts) {
7128 MVT VT = N.getSimpleValueType();
7129 unsigned NumElts = VT.getVectorNumElements();
7130 unsigned NumSizeInBits = VT.getSizeInBits();
7131 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
7132 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
7134 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
7136 unsigned Opcode = N.getOpcode();
7138 case ISD::VECTOR_SHUFFLE: {
7139 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
7140 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
7141 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
7142 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
7143 Ops.push_back(N.getOperand(0));
7144 Ops.push_back(N.getOperand(1));
7150 case X86ISD::ANDNP: {
7151 // Attempt to decode as a per-byte mask.
7153 SmallVector<APInt, 32> EltBits;
7154 SDValue N0 = N.getOperand(0);
7155 SDValue N1 = N.getOperand(1);
7156 bool IsAndN = (X86ISD::ANDNP == Opcode);
7157 uint64_t ZeroMask = IsAndN ? 255 : 0;
7158 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
7160 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
7162 Mask.push_back(SM_SentinelUndef);
7165 const APInt &ByteBits = EltBits[i];
7166 if (ByteBits != 0 && ByteBits != 255)
7168 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
7170 Ops.push_back(IsAndN ? N1 : N0);
7174 // Inspect each operand at the byte level. We can merge these into a
7175 // blend shuffle mask if for each byte at least one is masked out (zero).
7177 DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
7179 DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
7180 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
7181 bool IsByteMask = true;
7182 unsigned NumSizeInBytes = NumSizeInBits / 8;
7183 unsigned NumBytesPerElt = NumBitsPerElt / 8;
7184 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
7185 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
7186 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
7187 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
7188 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
7189 if (LHS == 255 && RHS == 0)
7190 SelectMask.setBit(i);
7191 else if (LHS == 255 && RHS == 255)
7193 else if (!(LHS == 0 && RHS == 255))
7197 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
7198 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
7199 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
7200 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
7201 Mask.push_back(Idx);
7204 Ops.push_back(N.getOperand(0));
7205 Ops.push_back(N.getOperand(1));
7210 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
7211 // is a valid shuffle index.
7212 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
7213 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
7214 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
7216 SmallVector<int, 64> SrcMask0, SrcMask1;
7217 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
7218 if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
7220 !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
7223 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
7224 SmallVector<int, 64> Mask0, Mask1;
7225 scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
7226 scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
7227 for (size_t i = 0; i != MaskSize; ++i) {
7228 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
7229 Mask.push_back(SM_SentinelUndef);
7230 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
7231 Mask.push_back(SM_SentinelZero);
7232 else if (Mask1[i] == SM_SentinelZero)
7233 Mask.push_back(Mask0[i]);
7234 else if (Mask0[i] == SM_SentinelZero)
7235 Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
7239 Ops.append(SrcInputs0.begin(), SrcInputs0.end());
7240 Ops.append(SrcInputs1.begin(), SrcInputs1.end());
7243 case ISD::INSERT_SUBVECTOR: {
7244 SDValue Src = N.getOperand(0);
7245 SDValue Sub = N.getOperand(1);
7246 EVT SubVT = Sub.getValueType();
7247 unsigned NumSubElts = SubVT.getVectorNumElements();
7248 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
7249 !N->isOnlyUserOf(Sub.getNode()))
7251 uint64_t InsertIdx = N.getConstantOperandVal(2);
7252 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
7253 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7254 Sub.getOperand(0).getValueType() == VT &&
7255 isa<ConstantSDNode>(Sub.getOperand(1))) {
7256 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
7257 for (int i = 0; i != (int)NumElts; ++i)
7259 for (int i = 0; i != (int)NumSubElts; ++i)
7260 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
7262 Ops.push_back(Sub.getOperand(0));
7265 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
7266 SmallVector<int, 64> SubMask;
7267 SmallVector<SDValue, 2> SubInputs;
7268 if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
7269 SubMask, DAG, Depth + 1, ResolveKnownElts))
7271 if (SubMask.size() != NumSubElts) {
7272 assert(((SubMask.size() % NumSubElts) == 0 ||
7273 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
7274 if ((NumSubElts % SubMask.size()) == 0) {
7275 int Scale = NumSubElts / SubMask.size();
7276 SmallVector<int,64> ScaledSubMask;
7277 scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
7278 SubMask = ScaledSubMask;
7280 int Scale = SubMask.size() / NumSubElts;
7281 NumSubElts = SubMask.size();
7287 for (SDValue &SubInput : SubInputs) {
7288 EVT SubSVT = SubInput.getValueType().getScalarType();
7289 EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
7290 NumSizeInBits / SubSVT.getSizeInBits());
7291 Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
7292 DAG.getUNDEF(AltVT), SubInput,
7293 DAG.getIntPtrConstant(0, SDLoc(N))));
7295 for (int i = 0; i != (int)NumElts; ++i)
7297 for (int i = 0; i != (int)NumSubElts; ++i) {
7300 int InputIdx = M / NumSubElts;
7301 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7303 Mask[i + InsertIdx] = M;
7307 case ISD::SCALAR_TO_VECTOR: {
7308 // Match against a scalar_to_vector of an extract from a vector,
7309 // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
7310 SDValue N0 = N.getOperand(0);
7313 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7314 N0.getOperand(0).getValueType() == VT) ||
7315 (N0.getOpcode() == X86ISD::PEXTRW &&
7316 N0.getOperand(0).getValueType() == MVT::v8i16) ||
7317 (N0.getOpcode() == X86ISD::PEXTRB &&
7318 N0.getOperand(0).getValueType() == MVT::v16i8)) {
7322 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7325 SDValue SrcVec = SrcExtract.getOperand(0);
7326 EVT SrcVT = SrcVec.getValueType();
7327 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7328 unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
7330 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7331 if (NumSrcElts <= SrcIdx)
7334 Ops.push_back(SrcVec);
7335 Mask.push_back(SrcIdx);
7336 Mask.append(NumZeros, SM_SentinelZero);
7337 Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
7340 case X86ISD::PINSRB:
7341 case X86ISD::PINSRW: {
7342 SDValue InVec = N.getOperand(0);
7343 SDValue InScl = N.getOperand(1);
7344 SDValue InIndex = N.getOperand(2);
7345 if (!isa<ConstantSDNode>(InIndex) ||
7346 cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
7348 uint64_t InIdx = N.getConstantOperandVal(2);
7350 // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
7351 if (X86::isZeroNode(InScl)) {
7352 Ops.push_back(InVec);
7353 for (unsigned i = 0; i != NumElts; ++i)
7354 Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
7358 // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
7359 // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
7361 (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
7362 if (InScl.getOpcode() != ExOp)
7365 SDValue ExVec = InScl.getOperand(0);
7366 SDValue ExIndex = InScl.getOperand(1);
7367 if (!isa<ConstantSDNode>(ExIndex) ||
7368 cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
7370 uint64_t ExIdx = InScl.getConstantOperandVal(1);
7372 Ops.push_back(InVec);
7373 Ops.push_back(ExVec);
7374 for (unsigned i = 0; i != NumElts; ++i)
7375 Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
7378 case X86ISD::PACKSS:
7379 case X86ISD::PACKUS: {
7380 SDValue N0 = N.getOperand(0);
7381 SDValue N1 = N.getOperand(1);
7382 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
7383 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
7384 "Unexpected input value type");
7386 APInt EltsLHS, EltsRHS;
7387 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7389 // If we know input saturation won't happen we can treat this
7390 // as a truncation shuffle.
7391 if (Opcode == X86ISD::PACKSS) {
7392 if ((!N0.isUndef() &&
7393 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7395 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7398 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7399 if ((!N0.isUndef() &&
7400 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7402 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7406 bool IsUnary = (N0 == N1);
7412 createPackShuffleMask(VT, Mask, IsUnary);
7416 case X86ISD::VSRLI: {
7417 uint64_t ShiftVal = N.getConstantOperandVal(1);
7418 // Out of range bit shifts are guaranteed to be zero.
7419 if (NumBitsPerElt <= ShiftVal) {
7420 Mask.append(NumElts, SM_SentinelZero);
7424 // We can only decode 'whole byte' bit shifts as shuffles.
7425 if ((ShiftVal % 8) != 0)
7428 uint64_t ByteShift = ShiftVal / 8;
7429 unsigned NumBytes = NumSizeInBits / 8;
7430 unsigned NumBytesPerElt = NumBitsPerElt / 8;
7431 Ops.push_back(N.getOperand(0));
7433 // Clear mask to all zeros and insert the shifted byte indices.
7434 Mask.append(NumBytes, SM_SentinelZero);
7436 if (X86ISD::VSHLI == Opcode) {
7437 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7438 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7439 Mask[i + j] = i + j - ByteShift;
7441 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7442 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7443 Mask[i + j - ByteShift] = i + j;
7447 case X86ISD::VBROADCAST: {
7448 SDValue Src = N.getOperand(0);
7449 MVT SrcVT = Src.getSimpleValueType();
7450 if (!SrcVT.isVector())
7453 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7454 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7455 "Illegal broadcast type");
7456 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
7457 NumSizeInBits / SrcVT.getScalarSizeInBits());
7458 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7459 DAG.getUNDEF(SrcVT), Src,
7460 DAG.getIntPtrConstant(0, SDLoc(N)));
7464 Mask.append(NumElts, 0);
7467 case ISD::ZERO_EXTEND:
7468 case ISD::ANY_EXTEND:
7469 case ISD::ZERO_EXTEND_VECTOR_INREG:
7470 case ISD::ANY_EXTEND_VECTOR_INREG: {
7471 SDValue Src = N.getOperand(0);
7472 EVT SrcVT = Src.getValueType();
7474 // Extended source must be a simple vector.
7475 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7476 (SrcVT.getScalarSizeInBits() % 8) != 0)
7479 unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
7481 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7482 DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
7485 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7486 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7487 "Illegal zero-extension type");
7488 SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
7489 NumSizeInBits / NumSrcBitsPerElt);
7490 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7491 DAG.getUNDEF(SrcVT), Src,
7492 DAG.getIntPtrConstant(0, SDLoc(N)));
7503 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7504 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7505 SmallVectorImpl<int> &Mask) {
7506 int MaskWidth = Mask.size();
7507 SmallVector<SDValue, 16> UsedInputs;
7508 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7509 int lo = UsedInputs.size() * MaskWidth;
7510 int hi = lo + MaskWidth;
7512 // Strip UNDEF input usage.
7513 if (Inputs[i].isUndef())
7515 if ((lo <= M) && (M < hi))
7516 M = SM_SentinelUndef;
7518 // Check for unused inputs.
7519 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7526 // Check for repeated inputs.
7527 bool IsRepeat = false;
7528 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7529 if (UsedInputs[j] != Inputs[i])
7533 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7540 UsedInputs.push_back(Inputs[i]);
7542 Inputs = UsedInputs;
7545 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7546 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7547 /// Returns true if the target shuffle mask was decoded.
7548 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7549 SmallVectorImpl<SDValue> &Inputs,
7550 SmallVectorImpl<int> &Mask,
7551 APInt &KnownUndef, APInt &KnownZero,
7552 SelectionDAG &DAG, unsigned Depth,
7553 bool ResolveKnownElts) {
7554 EVT VT = Op.getValueType();
7555 if (!VT.isSimple() || !VT.isVector())
7558 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
7559 if (ResolveKnownElts)
7560 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
7563 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7564 ResolveKnownElts)) {
7565 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
7571 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7572 SmallVectorImpl<int> &Mask,
7573 SelectionDAG &DAG, unsigned Depth = 0,
7574 bool ResolveKnownElts = true) {
7575 EVT VT = Op.getValueType();
7576 if (!VT.isSimple() || !VT.isVector())
7579 APInt KnownUndef, KnownZero;
7580 unsigned NumElts = Op.getValueType().getVectorNumElements();
7581 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7582 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
7583 KnownZero, DAG, Depth, ResolveKnownElts);
7586 /// Returns the scalar element that will make up the ith
7587 /// element of the result of the vector shuffle.
7588 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7591 return SDValue(); // Limit search depth.
7593 SDValue V = SDValue(N, 0);
7594 EVT VT = V.getValueType();
7595 unsigned Opcode = V.getOpcode();
7597 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7598 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7599 int Elt = SV->getMaskElt(Index);
7602 return DAG.getUNDEF(VT.getVectorElementType());
7604 unsigned NumElems = VT.getVectorNumElements();
7605 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7606 : SV->getOperand(1);
7607 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7610 // Recurse into target specific vector shuffles to find scalars.
7611 if (isTargetShuffle(Opcode)) {
7612 MVT ShufVT = V.getSimpleValueType();
7613 MVT ShufSVT = ShufVT.getVectorElementType();
7614 int NumElems = (int)ShufVT.getVectorNumElements();
7615 SmallVector<int, 16> ShuffleMask;
7616 SmallVector<SDValue, 16> ShuffleOps;
7619 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7622 int Elt = ShuffleMask[Index];
7623 if (Elt == SM_SentinelZero)
7624 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7625 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7626 if (Elt == SM_SentinelUndef)
7627 return DAG.getUNDEF(ShufSVT);
7629 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
7630 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7631 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7635 // Recurse into insert_subvector base/sub vector to find scalars.
7636 if (Opcode == ISD::INSERT_SUBVECTOR &&
7637 isa<ConstantSDNode>(N->getOperand(2))) {
7638 SDValue Vec = N->getOperand(0);
7639 SDValue Sub = N->getOperand(1);
7640 EVT SubVT = Sub.getValueType();
7641 unsigned NumSubElts = SubVT.getVectorNumElements();
7642 uint64_t SubIdx = N->getConstantOperandVal(2);
7644 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7645 return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7646 return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7649 // Recurse into extract_subvector src vector to find scalars.
7650 if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7651 isa<ConstantSDNode>(N->getOperand(1))) {
7652 SDValue Src = N->getOperand(0);
7653 uint64_t SrcIdx = N->getConstantOperandVal(1);
7654 return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7657 // Actual nodes that may contain scalar elements
7658 if (Opcode == ISD::BITCAST) {
7659 V = V.getOperand(0);
7660 EVT SrcVT = V.getValueType();
7661 unsigned NumElems = VT.getVectorNumElements();
7663 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7667 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7668 return (Index == 0) ? V.getOperand(0)
7669 : DAG.getUNDEF(VT.getVectorElementType());
7671 if (V.getOpcode() == ISD::BUILD_VECTOR)
7672 return V.getOperand(Index);
7677 // Use PINSRB/PINSRW/PINSRD to create a build vector.
7678 static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7679 unsigned NumNonZero, unsigned NumZero,
7681 const X86Subtarget &Subtarget) {
7682 MVT VT = Op.getSimpleValueType();
7683 unsigned NumElts = VT.getVectorNumElements();
7684 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7685 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7686 "Illegal vector insertion");
7692 for (unsigned i = 0; i < NumElts; ++i) {
7693 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7697 // If the build vector contains zeros or our first insertion is not the
7698 // first index then insert into zero vector to break any register
7699 // dependency else use SCALAR_TO_VECTOR.
7702 if (NumZero || 0 != i)
7703 V = getZeroVector(VT, Subtarget, DAG, dl);
7705 assert(0 == i && "Expected insertion into zero-index");
7706 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7707 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7708 V = DAG.getBitcast(VT, V);
7712 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7713 DAG.getIntPtrConstant(i, dl));
7719 /// Custom lower build_vector of v16i8.
7720 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7721 unsigned NumNonZero, unsigned NumZero,
7723 const X86Subtarget &Subtarget) {
7724 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7727 // SSE4.1 - use PINSRB to insert each byte directly.
7728 if (Subtarget.hasSSE41())
7729 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7735 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7736 for (unsigned i = 0; i < 16; i += 2) {
7737 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7738 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7739 if (!ThisIsNonZero && !NextIsNonZero)
7742 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7744 if (ThisIsNonZero) {
7745 if (NumZero || NextIsNonZero)
7746 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7748 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7751 if (NextIsNonZero) {
7752 SDValue NextElt = Op.getOperand(i + 1);
7753 if (i == 0 && NumZero)
7754 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7756 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7757 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7758 DAG.getConstant(8, dl, MVT::i8));
7760 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7765 // If our first insertion is not the first index then insert into zero
7766 // vector to break any register dependency else use SCALAR_TO_VECTOR.
7769 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7771 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7772 V = DAG.getBitcast(MVT::v8i16, V);
7776 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7777 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7778 DAG.getIntPtrConstant(i / 2, dl));
7781 return DAG.getBitcast(MVT::v16i8, V);
7784 /// Custom lower build_vector of v8i16.
7785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7786 unsigned NumNonZero, unsigned NumZero,
7788 const X86Subtarget &Subtarget) {
7789 if (NumNonZero > 4 && !Subtarget.hasSSE41())
7792 // Use PINSRW to insert each byte directly.
7793 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7797 /// Custom lower build_vector of v4i32 or v4f32.
7798 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7799 const X86Subtarget &Subtarget) {
7800 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7801 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7802 // Because we're creating a less complicated build vector here, we may enable
7803 // further folding of the MOVDDUP via shuffle transforms.
7804 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7805 Op.getOperand(0) == Op.getOperand(2) &&
7806 Op.getOperand(1) == Op.getOperand(3) &&
7807 Op.getOperand(0) != Op.getOperand(1)) {
7809 MVT VT = Op.getSimpleValueType();
7810 MVT EltVT = VT.getVectorElementType();
7811 // Create a new build vector with the first 2 elements followed by undef
7812 // padding, bitcast to v2f64, duplicate, and bitcast back.
7813 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7814 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7815 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7816 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7817 return DAG.getBitcast(VT, Dup);
7820 // Find all zeroable elements.
7821 std::bitset<4> Zeroable, Undefs;
7822 for (int i = 0; i < 4; ++i) {
7823 SDValue Elt = Op.getOperand(i);
7824 Undefs[i] = Elt.isUndef();
7825 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7827 assert(Zeroable.size() - Zeroable.count() > 1 &&
7828 "We expect at least two non-zero elements!");
7830 // We only know how to deal with build_vector nodes where elements are either
7831 // zeroable or extract_vector_elt with constant index.
7832 SDValue FirstNonZero;
7833 unsigned FirstNonZeroIdx;
7834 for (unsigned i = 0; i < 4; ++i) {
7837 SDValue Elt = Op.getOperand(i);
7838 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7839 !isa<ConstantSDNode>(Elt.getOperand(1)))
7841 // Make sure that this node is extracting from a 128-bit vector.
7842 MVT VT = Elt.getOperand(0).getSimpleValueType();
7843 if (!VT.is128BitVector())
7845 if (!FirstNonZero.getNode()) {
7847 FirstNonZeroIdx = i;
7851 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
7852 SDValue V1 = FirstNonZero.getOperand(0);
7853 MVT VT = V1.getSimpleValueType();
7855 // See if this build_vector can be lowered as a blend with zero.
7857 unsigned EltMaskIdx, EltIdx;
7859 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7860 if (Zeroable[EltIdx]) {
7861 // The zero vector will be on the right hand side.
7862 Mask[EltIdx] = EltIdx+4;
7866 Elt = Op->getOperand(EltIdx);
7867 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7868 EltMaskIdx = Elt.getConstantOperandVal(1);
7869 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7871 Mask[EltIdx] = EltIdx;
7875 // Let the shuffle legalizer deal with blend operations.
7876 SDValue VZeroOrUndef = (Zeroable == Undefs)
7878 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7879 if (V1.getSimpleValueType() != VT)
7880 V1 = DAG.getBitcast(VT, V1);
7881 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7884 // See if we can lower this build_vector to a INSERTPS.
7885 if (!Subtarget.hasSSE41())
7888 SDValue V2 = Elt.getOperand(0);
7889 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7892 bool CanFold = true;
7893 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7897 SDValue Current = Op->getOperand(i);
7898 SDValue SrcVector = Current->getOperand(0);
7901 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7907 assert(V1.getNode() && "Expected at least two non-zero elements!");
7908 if (V1.getSimpleValueType() != MVT::v4f32)
7909 V1 = DAG.getBitcast(MVT::v4f32, V1);
7910 if (V2.getSimpleValueType() != MVT::v4f32)
7911 V2 = DAG.getBitcast(MVT::v4f32, V2);
7913 // Ok, we can emit an INSERTPS instruction.
7914 unsigned ZMask = Zeroable.to_ulong();
7916 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7917 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7920 DAG.getIntPtrConstant(InsertPSMask, DL, true));
7921 return DAG.getBitcast(VT, Result);
7924 /// Return a vector logical shift node.
7925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7926 SelectionDAG &DAG, const TargetLowering &TLI,
7928 assert(VT.is128BitVector() && "Unknown type for VShift");
7929 MVT ShVT = MVT::v16i8;
7930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7931 SrcOp = DAG.getBitcast(ShVT, SrcOp);
7932 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
7933 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
7934 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7937 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7938 SelectionDAG &DAG) {
7940 // Check if the scalar load can be widened into a vector load. And if
7941 // the address is "base + cst" see if the cst can be "absorbed" into
7942 // the shuffle mask.
7943 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7944 SDValue Ptr = LD->getBasePtr();
7945 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
7947 EVT PVT = LD->getValueType(0);
7948 if (PVT != MVT::i32 && PVT != MVT::f32)
7953 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7954 FI = FINode->getIndex();
7956 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7957 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7958 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7959 Offset = Ptr.getConstantOperandVal(1);
7960 Ptr = Ptr.getOperand(0);
7965 // FIXME: 256-bit vector instructions don't require a strict alignment,
7966 // improve this code to support it better.
7967 unsigned RequiredAlign = VT.getSizeInBits()/8;
7968 SDValue Chain = LD->getChain();
7969 // Make sure the stack object alignment is at least 16 or 32.
7970 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7971 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7972 if (MFI.isFixedObjectIndex(FI)) {
7973 // Can't change the alignment. FIXME: It's possible to compute
7974 // the exact stack offset and reference FI + adjust offset instead.
7975 // If someone *really* cares about this. That's the way to implement it.
7978 MFI.setObjectAlignment(FI, RequiredAlign);
7982 // (Offset % 16 or 32) must be multiple of 4. Then address is then
7983 // Ptr + (Offset & ~15).
7986 if ((Offset % RequiredAlign) & 3)
7988 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7991 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7992 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7995 int EltNo = (Offset - StartOffset) >> 2;
7996 unsigned NumElems = VT.getVectorNumElements();
7998 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7999 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
8000 LD->getPointerInfo().getWithOffset(StartOffset));
8002 SmallVector<int, 8> Mask(NumElems, EltNo);
8004 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
8010 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
8011 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
8012 if (ISD::isNON_EXTLoad(Elt.getNode())) {
8013 auto *BaseLd = cast<LoadSDNode>(Elt);
8014 if (!BaseLd->isSimple())
8021 switch (Elt.getOpcode()) {
8024 case ISD::SCALAR_TO_VECTOR:
8025 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
8027 if (isa<ConstantSDNode>(Elt.getOperand(1))) {
8028 uint64_t Idx = Elt.getConstantOperandVal(1);
8029 if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
8030 ByteOffset += Idx / 8;
8035 case ISD::EXTRACT_VECTOR_ELT:
8036 if (isa<ConstantSDNode>(Elt.getOperand(1))) {
8037 SDValue Src = Elt.getOperand(0);
8038 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
8039 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
8040 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
8041 findEltLoadSrc(Src, Ld, ByteOffset)) {
8042 uint64_t Idx = Elt.getConstantOperandVal(1);
8043 ByteOffset += Idx * (SrcSizeInBits / 8);
8053 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
8054 /// elements can be replaced by a single large load which has the same value as
8055 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
8057 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
8058 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
8059 const SDLoc &DL, SelectionDAG &DAG,
8060 const X86Subtarget &Subtarget,
8061 bool isAfterLegalize) {
8062 if ((VT.getScalarSizeInBits() % 8) != 0)
8065 unsigned NumElems = Elts.size();
8067 int LastLoadedElt = -1;
8068 APInt LoadMask = APInt::getNullValue(NumElems);
8069 APInt ZeroMask = APInt::getNullValue(NumElems);
8070 APInt UndefMask = APInt::getNullValue(NumElems);
8072 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
8073 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
8075 // For each element in the initializer, see if we've found a load, zero or an
8077 for (unsigned i = 0; i < NumElems; ++i) {
8078 SDValue Elt = peekThroughBitcasts(Elts[i]);
8081 if (Elt.isUndef()) {
8082 UndefMask.setBit(i);
8085 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
8090 // Each loaded element must be the correct fractional portion of the
8091 // requested vector load.
8092 unsigned EltSizeInBits = Elt.getValueSizeInBits();
8093 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
8096 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
8098 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
8099 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
8105 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
8106 LoadMask.countPopulation()) == NumElems &&
8107 "Incomplete element masks");
8109 // Handle Special Cases - all undef or undef/zero.
8110 if (UndefMask.countPopulation() == NumElems)
8111 return DAG.getUNDEF(VT);
8113 // FIXME: Should we return this as a BUILD_VECTOR instead?
8114 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
8115 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
8116 : DAG.getConstantFP(0.0, DL, VT);
8118 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8119 int FirstLoadedElt = LoadMask.countTrailingZeros();
8120 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
8121 EVT EltBaseVT = EltBase.getValueType();
8122 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
8123 "Register/Memory size mismatch");
8124 LoadSDNode *LDBase = Loads[FirstLoadedElt];
8125 assert(LDBase && "Did not find base load for merging consecutive loads");
8126 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
8127 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
8128 int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
8129 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
8131 // TODO: Support offsetting the base load.
8132 if (ByteOffsets[FirstLoadedElt] != 0)
8135 // Check to see if the element's load is consecutive to the base load
8136 // or offset from a previous (already checked) load.
8137 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
8138 LoadSDNode *Ld = Loads[EltIdx];
8139 int64_t ByteOffset = ByteOffsets[EltIdx];
8140 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
8141 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
8142 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
8143 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
8145 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
8146 EltIdx - FirstLoadedElt);
8149 // Consecutive loads can contain UNDEFS but not ZERO elements.
8150 // Consecutive loads with UNDEFs and ZEROs elements require a
8151 // an additional shuffle stage to clear the ZERO elements.
8152 bool IsConsecutiveLoad = true;
8153 bool IsConsecutiveLoadWithZeros = true;
8154 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
8156 if (!CheckConsecutiveLoad(LDBase, i)) {
8157 IsConsecutiveLoad = false;
8158 IsConsecutiveLoadWithZeros = false;
8161 } else if (ZeroMask[i]) {
8162 IsConsecutiveLoad = false;
8166 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
8167 auto MMOFlags = LDBase->getMemOperand()->getFlags();
8168 assert(LDBase->isSimple() &&
8169 "Cannot merge volatile or atomic loads.");
8171 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
8172 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
8173 for (auto *LD : Loads)
8175 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
8179 // Check if the base load is entirely dereferenceable.
8180 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
8181 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
8183 // LOAD - all consecutive load/undefs (must start/end with a load or be
8184 // entirely dereferenceable). If we have found an entire vector of loads and
8185 // undefs, then return a large load of the entire vector width starting at the
8186 // base pointer. If the vector contains zeros, then attempt to shuffle those
8188 if (FirstLoadedElt == 0 &&
8189 (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
8190 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
8191 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
8194 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
8195 // will lower to regular temporal loads and use the cache.
8196 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
8197 VT.is256BitVector() && !Subtarget.hasInt256())
8201 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
8204 return CreateLoad(VT, LDBase);
8206 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
8207 // vector and a zero vector to clear out the zero elements.
8208 if (!isAfterLegalize && VT.isVector()) {
8209 unsigned NumMaskElts = VT.getVectorNumElements();
8210 if ((NumMaskElts % NumElems) == 0) {
8211 unsigned Scale = NumMaskElts / NumElems;
8212 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
8213 for (unsigned i = 0; i < NumElems; ++i) {
8216 int Offset = ZeroMask[i] ? NumMaskElts : 0;
8217 for (unsigned j = 0; j != Scale; ++j)
8218 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
8220 SDValue V = CreateLoad(VT, LDBase);
8221 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
8222 : DAG.getConstantFP(0.0, DL, VT);
8223 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
8228 // If the upper half of a ymm/zmm load is undef then just load the lower half.
8229 if (VT.is256BitVector() || VT.is512BitVector()) {
8230 unsigned HalfNumElems = NumElems / 2;
8231 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
8233 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
8235 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
8236 DAG, Subtarget, isAfterLegalize);
8238 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
8239 HalfLD, DAG.getIntPtrConstant(0, DL));
8243 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
8244 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
8245 (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
8246 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
8247 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
8248 : MVT::getIntegerVT(LoadSizeInBits);
8249 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
8250 if (TLI.isTypeLegal(VecVT)) {
8251 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
8252 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
8254 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
8255 LDBase->getPointerInfo(),
8256 LDBase->getAlignment(),
8257 MachineMemOperand::MOLoad);
8258 for (auto *LD : Loads)
8260 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
8261 return DAG.getBitcast(VT, ResNode);
8265 // BROADCAST - match the smallest possible repetition pattern, load that
8266 // scalar/subvector element and then broadcast to the entire vector.
8267 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
8268 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
8269 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
8270 unsigned RepeatSize = SubElems * BaseSizeInBits;
8271 unsigned ScalarSize = std::min(RepeatSize, 64u);
8272 if (!Subtarget.hasAVX2() && ScalarSize < 32)
8276 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
8277 for (unsigned i = 0; i != NumElems && Match; ++i) {
8280 SDValue Elt = peekThroughBitcasts(Elts[i]);
8281 if (RepeatedLoads[i % SubElems].isUndef())
8282 RepeatedLoads[i % SubElems] = Elt;
8284 Match &= (RepeatedLoads[i % SubElems] == Elt);
8287 // We must have loads at both ends of the repetition.
8288 Match &= !RepeatedLoads.front().isUndef();
8289 Match &= !RepeatedLoads.back().isUndef();
8294 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8295 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8296 : EVT::getFloatingPointVT(ScalarSize);
8297 if (RepeatSize > ScalarSize)
8298 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8299 RepeatSize / ScalarSize);
8301 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8302 VT.getSizeInBits() / ScalarSize);
8303 if (TLI.isTypeLegal(BroadcastVT)) {
8304 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8305 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8306 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8307 : X86ISD::VBROADCAST;
8308 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8309 return DAG.getBitcast(VT, Broadcast);
8318 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8319 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8320 // are consecutive, non-overlapping, and in the right order.
8321 static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
8323 const X86Subtarget &Subtarget,
8324 bool isAfterLegalize) {
8325 SmallVector<SDValue, 64> Elts;
8326 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8327 if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
8328 Elts.push_back(Elt);
8333 assert(Elts.size() == VT.getVectorNumElements());
8334 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8338 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8339 unsigned SplatBitSize, LLVMContext &C) {
8340 unsigned ScalarSize = VT.getScalarSizeInBits();
8341 unsigned NumElm = SplatBitSize / ScalarSize;
8343 SmallVector<Constant *, 32> ConstantVec;
8344 for (unsigned i = 0; i < NumElm; i++) {
8345 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8347 if (VT.isFloatingPoint()) {
8348 if (ScalarSize == 32) {
8349 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8351 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
8352 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8355 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8356 ConstantVec.push_back(Const);
8358 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8361 static bool isFoldableUseOfShuffle(SDNode *N) {
8362 for (auto *U : N->uses()) {
8363 unsigned Opc = U->getOpcode();
8364 // VPERMV/VPERMV3 shuffles can never fold their index operands.
8365 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8367 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8369 if (isTargetShuffle(Opc))
8371 if (Opc == ISD::BITCAST) // Ignore bitcasts
8372 return isFoldableUseOfShuffle(U);
8379 // Check if the current node of build vector is a zero extended vector.
8380 // // If so, return the value extended.
8381 // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8382 // // NumElt - return the number of zero extended identical values.
8383 // // EltType - return the type of the value include the zero extend.
8384 static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8385 unsigned &NumElt, MVT &EltType) {
8386 SDValue ExtValue = Op->getOperand(0);
8387 unsigned NumElts = Op->getNumOperands();
8388 unsigned Delta = NumElts;
8390 for (unsigned i = 1; i < NumElts; i++) {
8391 if (Op->getOperand(i) == ExtValue) {
8395 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8398 if (!isPowerOf2_32(Delta) || Delta == 1)
8401 for (unsigned i = Delta; i < NumElts; i++) {
8402 if (i % Delta == 0) {
8403 if (Op->getOperand(i) != ExtValue)
8405 } else if (!(isNullConstant(Op->getOperand(i)) ||
8406 Op->getOperand(i).isUndef()))
8409 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8410 unsigned ExtVTSize = EltSize * Delta;
8411 EltType = MVT::getIntegerVT(ExtVTSize);
8412 NumElt = NumElts / Delta;
8416 /// Attempt to use the vbroadcast instruction to generate a splat value
8417 /// from a splat BUILD_VECTOR which uses:
8418 /// a. A single scalar load, or a constant.
8419 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8421 /// The VBROADCAST node is returned when a pattern is found,
8422 /// or SDValue() otherwise.
8423 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8424 const X86Subtarget &Subtarget,
8425 SelectionDAG &DAG) {
8426 // VBROADCAST requires AVX.
8427 // TODO: Splats could be generated for non-AVX CPUs using SSE
8428 // instructions, but there's less potential gain for only 128-bit vectors.
8429 if (!Subtarget.hasAVX())
8432 MVT VT = BVOp->getSimpleValueType(0);
8435 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
8436 "Unsupported vector type for broadcast.");
8438 BitVector UndefElements;
8439 SDValue Ld = BVOp->getSplatValue(&UndefElements);
8441 // Attempt to use VBROADCASTM
8442 // From this paterrn:
8443 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8444 // b. t1 = (build_vector t0 t0)
8446 // Create (VBROADCASTM v2i1 X)
8447 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8448 MVT EltType = VT.getScalarType();
8449 unsigned NumElts = VT.getVectorNumElements();
8451 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8452 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8453 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8454 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8456 BOperand = ZeroExtended.getOperand(0);
8458 BOperand = Ld.getOperand(0).getOperand(0);
8459 MVT MaskVT = BOperand.getSimpleValueType();
8460 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8461 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8463 DAG.getNode(X86ISD::VBROADCASTM, dl,
8464 MVT::getVectorVT(EltType, NumElts), BOperand);
8465 return DAG.getBitcast(VT, Brdcst);
8470 unsigned NumElts = VT.getVectorNumElements();
8471 unsigned NumUndefElts = UndefElements.count();
8472 if (!Ld || (NumElts - NumUndefElts) <= 1) {
8473 APInt SplatValue, Undef;
8474 unsigned SplatBitSize;
8476 // Check if this is a repeated constant pattern suitable for broadcasting.
8477 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8478 SplatBitSize > VT.getScalarSizeInBits() &&
8479 SplatBitSize < VT.getSizeInBits()) {
8480 // Avoid replacing with broadcast when it's a use of a shuffle
8481 // instruction to preserve the present custom lowering of shuffles.
8482 if (isFoldableUseOfShuffle(BVOp))
8484 // replace BUILD_VECTOR with broadcast of the repeated constants.
8485 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8486 LLVMContext *Ctx = DAG.getContext();
8487 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8488 if (Subtarget.hasAVX()) {
8489 if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
8490 !(SplatBitSize == 64 && Subtarget.is32Bit())) {
8491 // Splatted value can fit in one INTEGER constant in constant pool.
8492 // Load the constant and broadcast it.
8493 MVT CVT = MVT::getIntegerVT(SplatBitSize);
8494 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8495 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8496 SDValue CP = DAG.getConstantPool(C, PVT);
8497 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8499 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8501 CVT, dl, DAG.getEntryNode(), CP,
8502 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8504 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8505 MVT::getVectorVT(CVT, Repeat), Ld);
8506 return DAG.getBitcast(VT, Brdcst);
8507 } else if (SplatBitSize == 32 || SplatBitSize == 64) {
8508 // Splatted value can fit in one FLOAT constant in constant pool.
8509 // Load the constant and broadcast it.
8510 // AVX have support for 32 and 64 bit broadcast for floats only.
8511 // No 64bit integer in 32bit subtarget.
8512 MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
8513 // Lower the splat via APFloat directly, to avoid any conversion.
8516 ? ConstantFP::get(*Ctx,
8517 APFloat(APFloat::IEEEsingle(), SplatValue))
8518 : ConstantFP::get(*Ctx,
8519 APFloat(APFloat::IEEEdouble(), SplatValue));
8520 SDValue CP = DAG.getConstantPool(C, PVT);
8521 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8523 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8525 CVT, dl, DAG.getEntryNode(), CP,
8526 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8528 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8529 MVT::getVectorVT(CVT, Repeat), Ld);
8530 return DAG.getBitcast(VT, Brdcst);
8531 } else if (SplatBitSize > 64) {
8532 // Load the vector of constants and broadcast it.
8533 MVT CVT = VT.getScalarType();
8534 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8536 SDValue VCP = DAG.getConstantPool(VecC, PVT);
8537 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8538 unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
8540 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8541 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8543 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8544 return DAG.getBitcast(VT, Brdcst);
8549 // If we are moving a scalar into a vector (Ld must be set and all elements
8550 // but 1 are undef) and that operation is not obviously supported by
8551 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8552 // That's better than general shuffling and may eliminate a load to GPR and
8553 // move from scalar to vector register.
8554 if (!Ld || NumElts - NumUndefElts != 1)
8556 unsigned ScalarSize = Ld.getValueSizeInBits();
8557 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8561 bool ConstSplatVal =
8562 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8564 // Make sure that all of the users of a non-constant load are from the
8565 // BUILD_VECTOR node.
8566 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
8569 unsigned ScalarSize = Ld.getValueSizeInBits();
8570 bool IsGE256 = (VT.getSizeInBits() >= 256);
8572 // When optimizing for size, generate up to 5 extra bytes for a broadcast
8573 // instruction to save 8 or more bytes of constant pool data.
8574 // TODO: If multiple splats are generated to load the same constant,
8575 // it may be detrimental to overall size. There needs to be a way to detect
8576 // that condition to know if this is truly a size win.
8577 bool OptForSize = DAG.shouldOptForSize();
8579 // Handle broadcasting a single constant scalar from the constant pool
8581 // On Sandybridge (no AVX2), it is still better to load a constant vector
8582 // from the constant pool and not to broadcast it from a scalar.
8583 // But override that restriction when optimizing for size.
8584 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8585 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8586 EVT CVT = Ld.getValueType();
8587 assert(!CVT.isVector() && "Must not broadcast a vector type");
8589 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8590 // For size optimization, also splat v2f64 and v2i64, and for size opt
8591 // with AVX2, also splat i8 and i16.
8592 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8593 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8594 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8595 const Constant *C = nullptr;
8596 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8597 C = CI->getConstantIntValue();
8598 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8599 C = CF->getConstantFPValue();
8601 assert(C && "Invalid constant type");
8603 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8605 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8606 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8608 CVT, dl, DAG.getEntryNode(), CP,
8609 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8612 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8616 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8618 // Handle AVX2 in-register broadcasts.
8619 if (!IsLoad && Subtarget.hasInt256() &&
8620 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8621 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8623 // The scalar source must be a normal load.
8627 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8628 (Subtarget.hasVLX() && ScalarSize == 64))
8629 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8631 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8632 // double since there is no vbroadcastsd xmm
8633 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8634 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8635 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8638 // Unsupported broadcast.
8642 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
8643 /// underlying vector and index.
8645 /// Modifies \p ExtractedFromVec to the real vector and returns the real
8647 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8649 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8650 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8653 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8655 // (extract_vector_elt (v8f32 %1), Constant<6>)
8657 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8658 // (extract_subvector (v8f32 %0), Constant<4>),
8661 // In this case the vector is the extract_subvector expression and the index
8662 // is 2, as specified by the shuffle.
8663 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8664 SDValue ShuffleVec = SVOp->getOperand(0);
8665 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8666 assert(ShuffleVecVT.getVectorElementType() ==
8667 ExtractedFromVec.getSimpleValueType().getVectorElementType());
8669 int ShuffleIdx = SVOp->getMaskElt(Idx);
8670 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8671 ExtractedFromVec = ShuffleVec;
8677 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8678 MVT VT = Op.getSimpleValueType();
8680 // Skip if insert_vec_elt is not supported.
8681 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8682 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8686 unsigned NumElems = Op.getNumOperands();
8690 SmallVector<unsigned, 4> InsertIndices;
8691 SmallVector<int, 8> Mask(NumElems, -1);
8693 for (unsigned i = 0; i != NumElems; ++i) {
8694 unsigned Opc = Op.getOperand(i).getOpcode();
8696 if (Opc == ISD::UNDEF)
8699 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8700 // Quit if more than 1 elements need inserting.
8701 if (InsertIndices.size() > 1)
8704 InsertIndices.push_back(i);
8708 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8709 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8711 // Quit if non-constant index.
8712 if (!isa<ConstantSDNode>(ExtIdx))
8714 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8716 // Quit if extracted from vector of different type.
8717 if (ExtractedFromVec.getValueType() != VT)
8720 if (!VecIn1.getNode())
8721 VecIn1 = ExtractedFromVec;
8722 else if (VecIn1 != ExtractedFromVec) {
8723 if (!VecIn2.getNode())
8724 VecIn2 = ExtractedFromVec;
8725 else if (VecIn2 != ExtractedFromVec)
8726 // Quit if more than 2 vectors to shuffle
8730 if (ExtractedFromVec == VecIn1)
8732 else if (ExtractedFromVec == VecIn2)
8733 Mask[i] = Idx + NumElems;
8736 if (!VecIn1.getNode())
8739 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8740 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8742 for (unsigned Idx : InsertIndices)
8743 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8744 DAG.getIntPtrConstant(Idx, DL));
8749 static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8750 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
8751 Op.getScalarValueSizeInBits() == 1 &&
8752 "Can not convert non-constant vector");
8753 uint64_t Immediate = 0;
8754 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8755 SDValue In = Op.getOperand(idx);
8757 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8760 MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8761 return DAG.getConstant(Immediate, dl, VT);
8763 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8764 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8765 const X86Subtarget &Subtarget) {
8767 MVT VT = Op.getSimpleValueType();
8768 assert((VT.getVectorElementType() == MVT::i1) &&
8769 "Unexpected type in LowerBUILD_VECTORvXi1!");
8772 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
8773 ISD::isBuildVectorAllOnes(Op.getNode()))
8776 uint64_t Immediate = 0;
8777 SmallVector<unsigned, 16> NonConstIdx;
8778 bool IsSplat = true;
8779 bool HasConstElts = false;
8781 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8782 SDValue In = Op.getOperand(idx);
8785 if (!isa<ConstantSDNode>(In))
8786 NonConstIdx.push_back(idx);
8788 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8789 HasConstElts = true;
8793 else if (In != Op.getOperand(SplatIdx))
8797 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8799 // The build_vector allows the scalar element to be larger than the vector
8800 // element type. We need to mask it to use as a condition unless we know
8801 // the upper bits are zero.
8802 // FIXME: Use computeKnownBits instead of checking specific opcode?
8803 SDValue Cond = Op.getOperand(SplatIdx);
8804 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
8805 if (Cond.getOpcode() != ISD::SETCC)
8806 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
8807 DAG.getConstant(1, dl, MVT::i8));
8808 return DAG.getSelect(dl, VT, Cond,
8809 DAG.getConstant(1, dl, VT),
8810 DAG.getConstant(0, dl, VT));
8813 // insert elements one by one
8816 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8817 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
8818 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
8819 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
8820 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
8821 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
8823 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
8824 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
8825 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
8826 DstVec = DAG.getBitcast(VecVT, Imm);
8827 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
8828 DAG.getIntPtrConstant(0, dl));
8831 DstVec = DAG.getUNDEF(VT);
8833 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8834 unsigned InsertIdx = NonConstIdx[i];
8835 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8836 Op.getOperand(InsertIdx),
8837 DAG.getIntPtrConstant(InsertIdx, dl));
8842 /// This is a helper function of LowerToHorizontalOp().
8843 /// This function checks that the build_vector \p N in input implements a
8844 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8845 /// may not match the layout of an x86 256-bit horizontal instruction.
8846 /// In other words, if this returns true, then some extraction/insertion will
8847 /// be required to produce a valid horizontal instruction.
8849 /// Parameter \p Opcode defines the kind of horizontal operation to match.
8850 /// For example, if \p Opcode is equal to ISD::ADD, then this function
8851 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8852 /// is equal to ISD::SUB, then this function checks if this is a horizontal
8855 /// This function only analyzes elements of \p N whose indices are
8856 /// in range [BaseIdx, LastIdx).
8858 /// TODO: This function was originally used to match both real and fake partial
8859 /// horizontal operations, but the index-matching logic is incorrect for that.
8860 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
8861 /// code because it is only used for partial h-op matching now?
8862 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8864 unsigned BaseIdx, unsigned LastIdx,
8865 SDValue &V0, SDValue &V1) {
8866 EVT VT = N->getValueType(0);
8867 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
8868 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
8869 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
8870 "Invalid Vector in input!");
8872 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8873 bool CanFold = true;
8874 unsigned ExpectedVExtractIdx = BaseIdx;
8875 unsigned NumElts = LastIdx - BaseIdx;
8876 V0 = DAG.getUNDEF(VT);
8877 V1 = DAG.getUNDEF(VT);
8879 // Check if N implements a horizontal binop.
8880 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8881 SDValue Op = N->getOperand(i + BaseIdx);
8884 if (Op->isUndef()) {
8885 // Update the expected vector extract index.
8886 if (i * 2 == NumElts)
8887 ExpectedVExtractIdx = BaseIdx;
8888 ExpectedVExtractIdx += 2;
8892 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8897 SDValue Op0 = Op.getOperand(0);
8898 SDValue Op1 = Op.getOperand(1);
8900 // Try to match the following pattern:
8901 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8902 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8903 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8904 Op0.getOperand(0) == Op1.getOperand(0) &&
8905 isa<ConstantSDNode>(Op0.getOperand(1)) &&
8906 isa<ConstantSDNode>(Op1.getOperand(1)));
8910 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8911 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8913 if (i * 2 < NumElts) {
8915 V0 = Op0.getOperand(0);
8916 if (V0.getValueType() != VT)
8921 V1 = Op0.getOperand(0);
8922 if (V1.getValueType() != VT)
8925 if (i * 2 == NumElts)
8926 ExpectedVExtractIdx = BaseIdx;
8929 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8930 if (I0 == ExpectedVExtractIdx)
8931 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8932 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8933 // Try to match the following dag sequence:
8934 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8935 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8939 ExpectedVExtractIdx += 2;
8945 /// Emit a sequence of two 128-bit horizontal add/sub followed by
8946 /// a concat_vector.
8948 /// This is a helper function of LowerToHorizontalOp().
8949 /// This function expects two 256-bit vectors called V0 and V1.
8950 /// At first, each vector is split into two separate 128-bit vectors.
8951 /// Then, the resulting 128-bit vectors are used to implement two
8952 /// horizontal binary operations.
8954 /// The kind of horizontal binary operation is defined by \p X86Opcode.
8956 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8957 /// the two new horizontal binop.
8958 /// When Mode is set, the first horizontal binop dag node would take as input
8959 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8960 /// horizontal binop dag node would take as input the lower 128-bit of V1
8961 /// and the upper 128-bit of V1.
8963 /// HADD V0_LO, V0_HI
8964 /// HADD V1_LO, V1_HI
8966 /// Otherwise, the first horizontal binop dag node takes as input the lower
8967 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8968 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8970 /// HADD V0_LO, V1_LO
8971 /// HADD V0_HI, V1_HI
8973 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8974 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8975 /// the upper 128-bits of the result.
8976 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8977 const SDLoc &DL, SelectionDAG &DAG,
8978 unsigned X86Opcode, bool Mode,
8979 bool isUndefLO, bool isUndefHI) {
8980 MVT VT = V0.getSimpleValueType();
8981 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
8982 "Invalid nodes in input!");
8984 unsigned NumElts = VT.getVectorNumElements();
8985 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8986 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8987 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8988 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8989 MVT NewVT = V0_LO.getSimpleValueType();
8991 SDValue LO = DAG.getUNDEF(NewVT);
8992 SDValue HI = DAG.getUNDEF(NewVT);
8995 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8996 if (!isUndefLO && !V0->isUndef())
8997 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8998 if (!isUndefHI && !V1->isUndef())
8999 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
9001 // Don't emit a horizontal binop if the result is expected to be UNDEF.
9002 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
9003 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
9005 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
9006 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
9009 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
9012 /// Returns true iff \p BV builds a vector with the result equivalent to
9013 /// the result of ADDSUB/SUBADD operation.
9014 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
9015 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
9016 /// \p Opnd0 and \p Opnd1.
9017 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
9018 const X86Subtarget &Subtarget, SelectionDAG &DAG,
9019 SDValue &Opnd0, SDValue &Opnd1,
9020 unsigned &NumExtracts,
9023 MVT VT = BV->getSimpleValueType(0);
9024 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
9027 unsigned NumElts = VT.getVectorNumElements();
9028 SDValue InVec0 = DAG.getUNDEF(VT);
9029 SDValue InVec1 = DAG.getUNDEF(VT);
9033 // Odd-numbered elements in the input build vector are obtained from
9034 // adding/subtracting two integer/float elements.
9035 // Even-numbered elements in the input build vector are obtained from
9036 // subtracting/adding two integer/float elements.
9037 unsigned Opc[2] = {0, 0};
9038 for (unsigned i = 0, e = NumElts; i != e; ++i) {
9039 SDValue Op = BV->getOperand(i);
9041 // Skip 'undef' values.
9042 unsigned Opcode = Op.getOpcode();
9043 if (Opcode == ISD::UNDEF)
9046 // Early exit if we found an unexpected opcode.
9047 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
9050 SDValue Op0 = Op.getOperand(0);
9051 SDValue Op1 = Op.getOperand(1);
9053 // Try to match the following pattern:
9054 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
9055 // Early exit if we cannot match that sequence.
9056 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9057 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9058 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9059 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
9060 Op0.getOperand(1) != Op1.getOperand(1))
9063 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
9067 // We found a valid add/sub node, make sure its the same opcode as previous
9068 // elements for this parity.
9069 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
9071 Opc[i % 2] = Opcode;
9073 // Update InVec0 and InVec1.
9074 if (InVec0.isUndef()) {
9075 InVec0 = Op0.getOperand(0);
9076 if (InVec0.getSimpleValueType() != VT)
9079 if (InVec1.isUndef()) {
9080 InVec1 = Op1.getOperand(0);
9081 if (InVec1.getSimpleValueType() != VT)
9085 // Make sure that operands in input to each add/sub node always
9086 // come from a same pair of vectors.
9087 if (InVec0 != Op0.getOperand(0)) {
9088 if (Opcode == ISD::FSUB)
9091 // FADD is commutable. Try to commute the operands
9092 // and then test again.
9093 std::swap(Op0, Op1);
9094 if (InVec0 != Op0.getOperand(0))
9098 if (InVec1 != Op1.getOperand(0))
9101 // Increment the number of extractions done.
9105 // Ensure we have found an opcode for both parities and that they are
9106 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
9107 // inputs are undef.
9108 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
9109 InVec0.isUndef() || InVec1.isUndef())
9112 IsSubAdd = Opc[0] == ISD::FADD;
9119 /// Returns true if is possible to fold MUL and an idiom that has already been
9120 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
9121 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
9122 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
9124 /// Prior to calling this function it should be known that there is some
9125 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
9126 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
9127 /// before replacement of such SDNode with ADDSUB operation. Thus the number
9128 /// of \p Opnd0 uses is expected to be equal to 2.
9129 /// For example, this function may be called for the following IR:
9130 /// %AB = fmul fast <2 x double> %A, %B
9131 /// %Sub = fsub fast <2 x double> %AB, %C
9132 /// %Add = fadd fast <2 x double> %AB, %C
9133 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
9134 /// <2 x i32> <i32 0, i32 3>
9135 /// There is a def for %Addsub here, which potentially can be replaced by
9136 /// X86ISD::ADDSUB operation:
9137 /// %Addsub = X86ISD::ADDSUB %AB, %C
9138 /// and such ADDSUB can further be replaced with FMADDSUB:
9139 /// %Addsub = FMADDSUB %A, %B, %C.
9141 /// The main reason why this method is called before the replacement of the
9142 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
9143 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
9145 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
9147 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
9148 unsigned ExpectedUses) {
9149 if (Opnd0.getOpcode() != ISD::FMUL ||
9150 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
9153 // FIXME: These checks must match the similar ones in
9154 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
9155 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
9156 // or MUL + ADDSUB to FMADDSUB.
9157 const TargetOptions &Options = DAG.getTarget().Options;
9159 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
9164 Opnd1 = Opnd0.getOperand(1);
9165 Opnd0 = Opnd0.getOperand(0);
9170 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
9171 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
9172 /// X86ISD::FMSUBADD node.
9173 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
9174 const X86Subtarget &Subtarget,
9175 SelectionDAG &DAG) {
9176 SDValue Opnd0, Opnd1;
9177 unsigned NumExtracts;
9179 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
9183 MVT VT = BV->getSimpleValueType(0);
9186 // Try to generate X86ISD::FMADDSUB node here.
9188 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
9189 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
9190 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
9193 // We only support ADDSUB.
9197 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
9198 // the ADDSUB idiom has been successfully recognized. There are no known
9199 // X86 targets with 512-bit ADDSUB instructions!
9200 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
9202 if (VT.is512BitVector())
9205 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
9208 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
9209 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
9210 // Initialize outputs to known values.
9211 MVT VT = BV->getSimpleValueType(0);
9212 HOpcode = ISD::DELETED_NODE;
9213 V0 = DAG.getUNDEF(VT);
9214 V1 = DAG.getUNDEF(VT);
9216 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
9217 // half of the result is calculated independently from the 128-bit halves of
9218 // the inputs, so that makes the index-checking logic below more complicated.
9219 unsigned NumElts = VT.getVectorNumElements();
9220 unsigned GenericOpcode = ISD::DELETED_NODE;
9221 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
9222 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
9223 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
9224 for (unsigned i = 0; i != Num128BitChunks; ++i) {
9225 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
9226 // Ignore undef elements.
9227 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
9231 // If there's an opcode mismatch, we're done.
9232 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
9235 // Initialize horizontal opcode.
9236 if (HOpcode == ISD::DELETED_NODE) {
9237 GenericOpcode = Op.getOpcode();
9238 switch (GenericOpcode) {
9239 case ISD::ADD: HOpcode = X86ISD::HADD; break;
9240 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
9241 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
9242 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
9243 default: return false;
9247 SDValue Op0 = Op.getOperand(0);
9248 SDValue Op1 = Op.getOperand(1);
9249 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9250 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9251 Op0.getOperand(0) != Op1.getOperand(0) ||
9252 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9253 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
9256 // The source vector is chosen based on which 64-bit half of the
9257 // destination vector is being calculated.
9258 if (j < NumEltsIn64Bits) {
9260 V0 = Op0.getOperand(0);
9263 V1 = Op0.getOperand(0);
9266 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
9267 if (SourceVec != Op0.getOperand(0))
9270 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
9271 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
9272 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
9273 unsigned ExpectedIndex = i * NumEltsIn128Bits +
9274 (j % NumEltsIn64Bits) * 2;
9275 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
9278 // If this is not a commutative op, this does not match.
9279 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
9282 // Addition is commutative, so try swapping the extract indexes.
9283 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
9284 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
9287 // Extract indexes do not match horizontal requirement.
9291 // We matched. Opcode and operands are returned by reference as arguments.
9295 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9296 SelectionDAG &DAG, unsigned HOpcode,
9297 SDValue V0, SDValue V1) {
9298 // If either input vector is not the same size as the build vector,
9299 // extract/insert the low bits to the correct size.
9300 // This is free (examples: zmm --> xmm, xmm --> ymm).
9301 MVT VT = BV->getSimpleValueType(0);
9302 unsigned Width = VT.getSizeInBits();
9303 if (V0.getValueSizeInBits() > Width)
9304 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9305 else if (V0.getValueSizeInBits() < Width)
9306 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9308 if (V1.getValueSizeInBits() > Width)
9309 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9310 else if (V1.getValueSizeInBits() < Width)
9311 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9313 unsigned NumElts = VT.getVectorNumElements();
9314 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9315 for (unsigned i = 0; i != NumElts; ++i)
9316 if (BV->getOperand(i).isUndef())
9317 DemandedElts.clearBit(i);
9319 // If we don't need the upper xmm, then perform as a xmm hop.
9320 unsigned HalfNumElts = NumElts / 2;
9321 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9322 MVT HalfVT = VT.getHalfNumVectorElementsVT();
9323 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9324 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9325 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9326 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9329 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9332 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9333 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9334 const X86Subtarget &Subtarget,
9335 SelectionDAG &DAG) {
9336 // We need at least 2 non-undef elements to make this worthwhile by default.
9337 unsigned NumNonUndefs =
9338 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9339 if (NumNonUndefs < 2)
9342 // There are 4 sets of horizontal math operations distinguished by type:
9343 // int/FP at 128-bit/256-bit. Each type was introduced with a different
9344 // subtarget feature. Try to match those "native" patterns first.
9345 MVT VT = BV->getSimpleValueType(0);
9346 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9347 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9348 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9349 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9352 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9353 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9356 // Try harder to match 256-bit ops by using extract/concat.
9357 if (!Subtarget.hasAVX() || !VT.is256BitVector())
9360 // Count the number of UNDEF operands in the build_vector in input.
9361 unsigned NumElts = VT.getVectorNumElements();
9362 unsigned Half = NumElts / 2;
9363 unsigned NumUndefsLO = 0;
9364 unsigned NumUndefsHI = 0;
9365 for (unsigned i = 0, e = Half; i != e; ++i)
9366 if (BV->getOperand(i)->isUndef())
9369 for (unsigned i = Half, e = NumElts; i != e; ++i)
9370 if (BV->getOperand(i)->isUndef())
9374 SDValue InVec0, InVec1;
9375 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9376 SDValue InVec2, InVec3;
9378 bool CanFold = true;
9380 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9381 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9383 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9384 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9385 X86Opcode = X86ISD::HADD;
9386 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9388 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9390 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9391 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9392 X86Opcode = X86ISD::HSUB;
9397 // Do not try to expand this build_vector into a pair of horizontal
9398 // add/sub if we can emit a pair of scalar add/sub.
9399 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9402 // Convert this build_vector into a pair of horizontal binops followed by
9403 // a concat vector. We must adjust the outputs from the partial horizontal
9404 // matching calls above to account for undefined vector halves.
9405 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9406 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9407 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
9408 bool isUndefLO = NumUndefsLO == Half;
9409 bool isUndefHI = NumUndefsHI == Half;
9410 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9415 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9416 VT == MVT::v16i16) {
9418 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9419 X86Opcode = X86ISD::HADD;
9420 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9422 X86Opcode = X86ISD::HSUB;
9423 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9425 X86Opcode = X86ISD::FHADD;
9426 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9428 X86Opcode = X86ISD::FHSUB;
9432 // Don't try to expand this build_vector into a pair of horizontal add/sub
9433 // if we can simply emit a pair of scalar add/sub.
9434 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9437 // Convert this build_vector into two horizontal add/sub followed by
9439 bool isUndefLO = NumUndefsLO == Half;
9440 bool isUndefHI = NumUndefsHI == Half;
9441 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9442 isUndefLO, isUndefHI);
9448 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
9449 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9450 /// just apply the bit to the vectors.
9451 /// NOTE: Its not in our interest to start make a general purpose vectorizer
9452 /// from this, but enough scalar bit operations are created from the later
9453 /// legalization + scalarization stages to need basic support.
9454 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9455 SelectionDAG &DAG) {
9457 MVT VT = Op->getSimpleValueType(0);
9458 unsigned NumElems = VT.getVectorNumElements();
9459 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9461 // Check that all elements have the same opcode.
9462 // TODO: Should we allow UNDEFS and if so how many?
9463 unsigned Opcode = Op->getOperand(0).getOpcode();
9464 for (unsigned i = 1; i < NumElems; ++i)
9465 if (Opcode != Op->getOperand(i).getOpcode())
9468 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9469 bool IsShift = false;
9481 // Don't do this if the buildvector is a splat - we'd replace one
9482 // constant with an entire vector.
9483 if (Op->getSplatValue())
9485 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9490 SmallVector<SDValue, 4> LHSElts, RHSElts;
9491 for (SDValue Elt : Op->ops()) {
9492 SDValue LHS = Elt.getOperand(0);
9493 SDValue RHS = Elt.getOperand(1);
9495 // We expect the canonicalized RHS operand to be the constant.
9496 if (!isa<ConstantSDNode>(RHS))
9499 // Extend shift amounts.
9500 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9503 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9506 LHSElts.push_back(LHS);
9507 RHSElts.push_back(RHS);
9510 // Limit to shifts by uniform immediates.
9511 // TODO: Only accept vXi8/vXi64 special cases?
9512 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9513 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9516 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9517 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9518 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9521 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
9522 /// functionality to do this, so it's all zeros, all ones, or some derivation
9523 /// that is cheap to calculate.
9524 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9525 const X86Subtarget &Subtarget) {
9527 MVT VT = Op.getSimpleValueType();
9529 // Vectors containing all zeros can be matched by pxor and xorps.
9530 if (ISD::isBuildVectorAllZeros(Op.getNode()))
9533 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9534 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9535 // vpcmpeqd on 256-bit vectors.
9536 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9537 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9540 return getOnesVector(VT, DAG, DL);
9546 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9547 /// from a vector of source values and a vector of extraction indices.
9548 /// The vectors might be manipulated to match the type of the permute op.
9549 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9550 SDLoc &DL, SelectionDAG &DAG,
9551 const X86Subtarget &Subtarget) {
9553 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9554 unsigned NumElts = VT.getVectorNumElements();
9555 unsigned SizeInBits = VT.getSizeInBits();
9557 // Adjust IndicesVec to match VT size.
9558 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9559 "Illegal variable permute mask size");
9560 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9561 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9562 NumElts * VT.getScalarSizeInBits());
9563 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9565 // Handle SrcVec that don't match VT type.
9566 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9567 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9568 // Handle larger SrcVec by treating it as a larger permute.
9569 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9570 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9571 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9572 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9573 Subtarget, DAG, SDLoc(IndicesVec));
9574 return extractSubVector(
9575 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9576 DAG, DL, SizeInBits);
9577 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9578 // Widen smaller SrcVec to match VT.
9579 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9584 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9585 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9586 EVT SrcVT = Idx.getValueType();
9587 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9588 uint64_t IndexScale = 0;
9589 uint64_t IndexOffset = 0;
9591 // If we're scaling a smaller permute op, then we need to repeat the
9592 // indices, scaling and offsetting them as well.
9593 // e.g. v4i32 -> v16i8 (Scale = 4)
9594 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9595 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9596 for (uint64_t i = 0; i != Scale; ++i) {
9597 IndexScale |= Scale << (i * NumDstBits);
9598 IndexOffset |= i << (i * NumDstBits);
9601 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9602 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9603 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9604 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9608 unsigned Opcode = 0;
9609 switch (VT.SimpleTy) {
9613 if (Subtarget.hasSSSE3())
9614 Opcode = X86ISD::PSHUFB;
9617 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9618 Opcode = X86ISD::VPERMV;
9619 else if (Subtarget.hasSSSE3()) {
9620 Opcode = X86ISD::PSHUFB;
9621 ShuffleVT = MVT::v16i8;
9626 if (Subtarget.hasAVX()) {
9627 Opcode = X86ISD::VPERMILPV;
9628 ShuffleVT = MVT::v4f32;
9629 } else if (Subtarget.hasSSSE3()) {
9630 Opcode = X86ISD::PSHUFB;
9631 ShuffleVT = MVT::v16i8;
9636 if (Subtarget.hasAVX()) {
9637 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9638 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9639 Opcode = X86ISD::VPERMILPV;
9640 ShuffleVT = MVT::v2f64;
9641 } else if (Subtarget.hasSSE41()) {
9642 // SSE41 can compare v2i64 - select between indices 0 and 1.
9643 return DAG.getSelectCC(
9645 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9646 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9647 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9648 ISD::CondCode::SETEQ);
9652 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9653 Opcode = X86ISD::VPERMV;
9654 else if (Subtarget.hasXOP()) {
9655 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9656 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9657 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9658 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9660 ISD::CONCAT_VECTORS, DL, VT,
9661 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9662 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9663 } else if (Subtarget.hasAVX()) {
9664 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9665 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9666 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9667 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9668 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9669 ArrayRef<SDValue> Ops) {
9670 // Permute Lo and Hi and then select based on index range.
9671 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9672 // care about the bit[7] as its just an index vector.
9673 SDValue Idx = Ops[2];
9674 EVT VT = Idx.getValueType();
9675 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9676 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9677 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9678 ISD::CondCode::SETGT);
9680 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9681 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9686 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9687 Opcode = X86ISD::VPERMV;
9688 else if (Subtarget.hasAVX()) {
9689 // Scale to v32i8 and perform as v32i8.
9690 IndicesVec = ScaleIndices(IndicesVec, 2);
9691 return DAG.getBitcast(
9692 VT, createVariablePermute(
9693 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9694 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9699 if (Subtarget.hasAVX2())
9700 Opcode = X86ISD::VPERMV;
9701 else if (Subtarget.hasAVX()) {
9702 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9703 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9704 {0, 1, 2, 3, 0, 1, 2, 3});
9705 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9706 {4, 5, 6, 7, 4, 5, 6, 7});
9707 if (Subtarget.hasXOP())
9708 return DAG.getBitcast(
9709 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9710 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9711 // Permute Lo and Hi and then select based on index range.
9712 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9713 SDValue Res = DAG.getSelectCC(
9714 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9715 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9716 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9717 ISD::CondCode::SETGT);
9718 return DAG.getBitcast(VT, Res);
9723 if (Subtarget.hasAVX512()) {
9724 if (!Subtarget.hasVLX()) {
9725 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9726 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9728 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9729 DAG, SDLoc(IndicesVec));
9730 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9732 return extract256BitVector(Res, 0, DAG, DL);
9734 Opcode = X86ISD::VPERMV;
9735 } else if (Subtarget.hasAVX()) {
9736 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9738 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9740 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9741 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9742 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9743 if (Subtarget.hasXOP())
9744 return DAG.getBitcast(
9745 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
9746 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9747 // Permute Lo and Hi and then select based on index range.
9748 // This works as VPERMILPD only uses index bit[1] to permute elements.
9749 SDValue Res = DAG.getSelectCC(
9750 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9751 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9752 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9753 ISD::CondCode::SETGT);
9754 return DAG.getBitcast(VT, Res);
9758 if (Subtarget.hasVBMI())
9759 Opcode = X86ISD::VPERMV;
9762 if (Subtarget.hasBWI())
9763 Opcode = X86ISD::VPERMV;
9769 if (Subtarget.hasAVX512())
9770 Opcode = X86ISD::VPERMV;
9776 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
9777 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
9778 "Illegal variable permute shuffle type");
9780 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9782 IndicesVec = ScaleIndices(IndicesVec, Scale);
9784 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9785 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9787 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9788 SDValue Res = Opcode == X86ISD::VPERMV
9789 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9790 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9791 return DAG.getBitcast(VT, Res);
9794 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9795 // reasoned to be a permutation of a vector by indices in a non-constant vector.
9796 // (build_vector (extract_elt V, (extract_elt I, 0)),
9797 // (extract_elt V, (extract_elt I, 1)),
9802 // TODO: Handle undefs
9803 // TODO: Utilize pshufb and zero mask blending to support more efficient
9804 // construction of vectors with constant-0 elements.
9806 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9807 const X86Subtarget &Subtarget) {
9808 SDValue SrcVec, IndicesVec;
9809 // Check for a match of the permute source vector and permute index elements.
9810 // This is done by checking that the i-th build_vector operand is of the form:
9811 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9812 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9813 SDValue Op = V.getOperand(Idx);
9814 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9817 // If this is the first extract encountered in V, set the source vector,
9818 // otherwise verify the extract is from the previously defined source
9821 SrcVec = Op.getOperand(0);
9822 else if (SrcVec != Op.getOperand(0))
9824 SDValue ExtractedIndex = Op->getOperand(1);
9825 // Peek through extends.
9826 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9827 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9828 ExtractedIndex = ExtractedIndex.getOperand(0);
9829 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9832 // If this is the first extract from the index vector candidate, set the
9833 // indices vector, otherwise verify the extract is from the previously
9834 // defined indices vector.
9836 IndicesVec = ExtractedIndex.getOperand(0);
9837 else if (IndicesVec != ExtractedIndex.getOperand(0))
9840 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9841 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
9846 MVT VT = V.getSimpleValueType();
9847 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9851 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9854 MVT VT = Op.getSimpleValueType();
9855 MVT EltVT = VT.getVectorElementType();
9856 unsigned NumElems = Op.getNumOperands();
9858 // Generate vectors for predicate vectors.
9859 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9860 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9862 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9863 return VectorConstant;
9865 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9866 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9868 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9869 return HorizontalOp;
9870 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9872 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9875 unsigned EVTBits = EltVT.getSizeInBits();
9877 unsigned NumZero = 0;
9878 unsigned NumNonZero = 0;
9879 uint64_t NonZeros = 0;
9880 bool IsAllConstants = true;
9881 SmallSet<SDValue, 8> Values;
9882 unsigned NumConstants = NumElems;
9883 for (unsigned i = 0; i < NumElems; ++i) {
9884 SDValue Elt = Op.getOperand(i);
9888 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9889 IsAllConstants = false;
9892 if (X86::isZeroNode(Elt))
9895 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
9896 NonZeros |= ((uint64_t)1 << i);
9901 // All undef vector. Return an UNDEF. All zero vectors were handled above.
9902 if (NumNonZero == 0)
9903 return DAG.getUNDEF(VT);
9905 // If we are inserting one variable into a vector of non-zero constants, try
9906 // to avoid loading each constant element as a scalar. Load the constants as a
9907 // vector and then insert the variable scalar element. If insertion is not
9908 // supported, fall back to a shuffle to get the scalar blended with the
9909 // constants. Insertion into a zero vector is handled as a special-case
9910 // somewhere below here.
9911 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9912 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9913 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9914 // Create an all-constant vector. The variable element in the old
9915 // build vector is replaced by undef in the constant vector. Save the
9916 // variable scalar element and its index for use in the insertelement.
9917 LLVMContext &Context = *DAG.getContext();
9918 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9919 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9922 for (unsigned i = 0; i != NumElems; ++i) {
9923 SDValue Elt = Op.getOperand(i);
9924 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9925 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9926 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9927 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9928 else if (!Elt.isUndef()) {
9929 assert(!VarElt.getNode() && !InsIndex.getNode() &&
9930 "Expected one variable element in this vector");
9932 InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9935 Constant *CV = ConstantVector::get(ConstVecOps);
9936 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9938 // The constants we just created may not be legal (eg, floating point). We
9939 // must lower the vector right here because we can not guarantee that we'll
9940 // legalize it before loading it. This is also why we could not just create
9941 // a new build vector here. If the build vector contains illegal constants,
9942 // it could get split back up into a series of insert elements.
9943 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9944 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9945 MachineFunction &MF = DAG.getMachineFunction();
9946 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9947 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9948 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9949 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9950 if (InsertC < NumEltsInLow128Bits)
9951 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9953 // There's no good way to insert into the high elements of a >128-bit
9954 // vector, so use shuffles to avoid an extract/insert sequence.
9955 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
9956 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
9957 SmallVector<int, 8> ShuffleMask;
9958 unsigned NumElts = VT.getVectorNumElements();
9959 for (unsigned i = 0; i != NumElts; ++i)
9960 ShuffleMask.push_back(i == InsertC ? NumElts : i);
9961 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9962 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9965 // Special case for single non-zero, non-undef, element.
9966 if (NumNonZero == 1) {
9967 unsigned Idx = countTrailingZeros(NonZeros);
9968 SDValue Item = Op.getOperand(Idx);
9970 // If we have a constant or non-constant insertion into the low element of
9971 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9972 // the rest of the elements. This will be matched as movd/movq/movss/movsd
9973 // depending on what the source datatype is.
9976 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9978 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9979 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9980 assert((VT.is128BitVector() || VT.is256BitVector() ||
9981 VT.is512BitVector()) &&
9982 "Expected an SSE value type!");
9983 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9984 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9985 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9988 // We can't directly insert an i8 or i16 into a vector, so zero extend
9990 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9991 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9992 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9993 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9994 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9995 return DAG.getBitcast(VT, Item);
9999 // Is it a vector logical left shift?
10000 if (NumElems == 2 && Idx == 1 &&
10001 X86::isZeroNode(Op.getOperand(0)) &&
10002 !X86::isZeroNode(Op.getOperand(1))) {
10003 unsigned NumBits = VT.getSizeInBits();
10004 return getVShift(true, VT,
10005 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
10006 VT, Op.getOperand(1)),
10007 NumBits/2, DAG, *this, dl);
10010 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
10013 // Otherwise, if this is a vector with i32 or f32 elements, and the element
10014 // is a non-constant being inserted into an element other than the low one,
10015 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
10016 // movd/movss) to move this into the low element, then shuffle it into
10018 if (EVTBits == 32) {
10019 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10020 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
10024 // Splat is obviously ok. Let legalizer expand it to a shuffle.
10025 if (Values.size() == 1) {
10026 if (EVTBits == 32) {
10027 // Instead of a shuffle like this:
10028 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
10029 // Check if it's possible to issue this instead.
10030 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
10031 unsigned Idx = countTrailingZeros(NonZeros);
10032 SDValue Item = Op.getOperand(Idx);
10033 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
10034 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
10039 // A vector full of immediates; various special cases are already
10040 // handled, so this is best done with a single constant-pool load.
10041 if (IsAllConstants)
10044 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
10047 // See if we can use a vector load to get all of the elements.
10049 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
10051 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
10055 // If this is a splat of pairs of 32-bit elements, we can use a narrower
10056 // build_vector and broadcast it.
10057 // TODO: We could probably generalize this more.
10058 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
10059 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
10060 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
10061 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
10062 // Make sure all the even/odd operands match.
10063 for (unsigned i = 2; i != NumElems; ++i)
10064 if (Ops[i % 2] != Op.getOperand(i))
10068 if (CanSplat(Op, NumElems, Ops)) {
10069 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
10070 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
10071 // Create a new build vector and cast to v2i64/v2f64.
10072 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
10073 DAG.getBuildVector(NarrowVT, dl, Ops));
10074 // Broadcast from v2i64/v2f64 and cast to final VT.
10075 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
10076 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
10081 // For AVX-length vectors, build the individual 128-bit pieces and use
10082 // shuffles to put them in place.
10083 if (VT.getSizeInBits() > 128) {
10084 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
10086 // Build both the lower and upper subvector.
10088 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
10089 SDValue Upper = DAG.getBuildVector(
10090 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
10092 // Recreate the wider vector with the lower and upper part.
10093 return concatSubVectors(Lower, Upper, DAG, dl);
10096 // Let legalizer expand 2-wide build_vectors.
10097 if (EVTBits == 64) {
10098 if (NumNonZero == 1) {
10099 // One half is zero or undef.
10100 unsigned Idx = countTrailingZeros(NonZeros);
10101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
10102 Op.getOperand(Idx));
10103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
10108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
10109 if (EVTBits == 8 && NumElems == 16)
10110 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
10114 if (EVTBits == 16 && NumElems == 8)
10115 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
10119 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
10120 if (EVTBits == 32 && NumElems == 4)
10121 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
10124 // If element VT is == 32 bits, turn it into a number of shuffles.
10125 if (NumElems == 4 && NumZero > 0) {
10126 SmallVector<SDValue, 8> Ops(NumElems);
10127 for (unsigned i = 0; i < 4; ++i) {
10128 bool isZero = !(NonZeros & (1ULL << i));
10130 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
10132 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10135 for (unsigned i = 0; i < 2; ++i) {
10136 switch ((NonZeros >> (i*2)) & 0x3) {
10137 default: llvm_unreachable("Unexpected NonZero count");
10139 Ops[i] = Ops[i*2]; // Must be a zero vector.
10142 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
10145 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10148 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10153 bool Reverse1 = (NonZeros & 0x3) == 2;
10154 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
10158 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
10159 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
10161 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
10164 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
10166 // Check for a build vector from mostly shuffle plus few inserting.
10167 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
10170 // For SSE 4.1, use insertps to put the high elements into the low element.
10171 if (Subtarget.hasSSE41()) {
10173 if (!Op.getOperand(0).isUndef())
10174 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
10176 Result = DAG.getUNDEF(VT);
10178 for (unsigned i = 1; i < NumElems; ++i) {
10179 if (Op.getOperand(i).isUndef()) continue;
10180 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
10181 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
10186 // Otherwise, expand into a number of unpckl*, start by extending each of
10187 // our (non-undef) elements to the full vector width with the element in the
10188 // bottom slot of the vector (which generates no code for SSE).
10189 SmallVector<SDValue, 8> Ops(NumElems);
10190 for (unsigned i = 0; i < NumElems; ++i) {
10191 if (!Op.getOperand(i).isUndef())
10192 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10194 Ops[i] = DAG.getUNDEF(VT);
10197 // Next, we iteratively mix elements, e.g. for v4f32:
10198 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
10199 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
10200 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
10201 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
10202 // Generate scaled UNPCKL shuffle mask.
10203 SmallVector<int, 16> Mask;
10204 for(unsigned i = 0; i != Scale; ++i)
10206 for (unsigned i = 0; i != Scale; ++i)
10207 Mask.push_back(NumElems+i);
10208 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
10210 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
10211 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
10216 // 256-bit AVX can use the vinsertf128 instruction
10217 // to create 256-bit vectors from two other 128-bit ones.
10218 // TODO: Detect subvector broadcast here instead of DAG combine?
10219 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
10220 const X86Subtarget &Subtarget) {
10222 MVT ResVT = Op.getSimpleValueType();
10224 assert((ResVT.is256BitVector() ||
10225 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
10227 unsigned NumOperands = Op.getNumOperands();
10228 unsigned NumZero = 0;
10229 unsigned NumNonZero = 0;
10230 unsigned NonZeros = 0;
10231 for (unsigned i = 0; i != NumOperands; ++i) {
10232 SDValue SubVec = Op.getOperand(i);
10233 if (SubVec.isUndef())
10235 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10238 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10239 NonZeros |= 1 << i;
10244 // If we have more than 2 non-zeros, build each half separately.
10245 if (NumNonZero > 2) {
10246 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10247 ArrayRef<SDUse> Ops = Op->ops();
10248 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10249 Ops.slice(0, NumOperands/2));
10250 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10251 Ops.slice(NumOperands/2));
10252 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10255 // Otherwise, build it up through insert_subvectors.
10256 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
10257 : DAG.getUNDEF(ResVT);
10259 MVT SubVT = Op.getOperand(0).getSimpleValueType();
10260 unsigned NumSubElems = SubVT.getVectorNumElements();
10261 for (unsigned i = 0; i != NumOperands; ++i) {
10262 if ((NonZeros & (1 << i)) == 0)
10265 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
10267 DAG.getIntPtrConstant(i * NumSubElems, dl));
10273 // Returns true if the given node is a type promotion (by concatenating i1
10274 // zeros) of the result of a node that already zeros all upper bits of
10276 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
10277 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
10278 const X86Subtarget &Subtarget,
10279 SelectionDAG & DAG) {
10281 MVT ResVT = Op.getSimpleValueType();
10282 unsigned NumOperands = Op.getNumOperands();
10284 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
10285 "Unexpected number of operands in CONCAT_VECTORS");
10287 uint64_t Zeros = 0;
10288 uint64_t NonZeros = 0;
10289 for (unsigned i = 0; i != NumOperands; ++i) {
10290 SDValue SubVec = Op.getOperand(i);
10291 if (SubVec.isUndef())
10293 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10294 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10295 Zeros |= (uint64_t)1 << i;
10297 NonZeros |= (uint64_t)1 << i;
10300 unsigned NumElems = ResVT.getVectorNumElements();
10302 // If we are inserting non-zero vector and there are zeros in LSBs and undef
10303 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10304 // insert_subvector will give us two kshifts.
10305 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10306 Log2_64(NonZeros) != NumOperands - 1) {
10307 MVT ShiftVT = ResVT;
10308 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10309 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10310 unsigned Idx = Log2_64(NonZeros);
10311 SDValue SubVec = Op.getOperand(Idx);
10312 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10313 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10314 DAG.getUNDEF(ShiftVT), SubVec,
10315 DAG.getIntPtrConstant(0, dl));
10316 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10317 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10318 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10319 DAG.getIntPtrConstant(0, dl));
10322 // If there are zero or one non-zeros we can handle this very simply.
10323 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10324 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10327 unsigned Idx = Log2_64(NonZeros);
10328 SDValue SubVec = Op.getOperand(Idx);
10329 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10330 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10331 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10334 if (NumOperands > 2) {
10335 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10336 ArrayRef<SDUse> Ops = Op->ops();
10337 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10338 Ops.slice(0, NumOperands/2));
10339 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10340 Ops.slice(NumOperands/2));
10341 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10344 assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
10346 if (ResVT.getVectorNumElements() >= 16)
10347 return Op; // The operation is legal with KUNPCK
10349 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10350 DAG.getUNDEF(ResVT), Op.getOperand(0),
10351 DAG.getIntPtrConstant(0, dl));
10352 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10353 DAG.getIntPtrConstant(NumElems/2, dl));
10356 static SDValue LowerCONCAT_VECTORS(SDValue Op,
10357 const X86Subtarget &Subtarget,
10358 SelectionDAG &DAG) {
10359 MVT VT = Op.getSimpleValueType();
10360 if (VT.getVectorElementType() == MVT::i1)
10361 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10363 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
10364 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
10365 Op.getNumOperands() == 4)));
10367 // AVX can use the vinsertf128 instruction to create 256-bit vectors
10368 // from two other 128-bit ones.
10370 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10371 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10374 //===----------------------------------------------------------------------===//
10375 // Vector shuffle lowering
10377 // This is an experimental code path for lowering vector shuffles on x86. It is
10378 // designed to handle arbitrary vector shuffles and blends, gracefully
10379 // degrading performance as necessary. It works hard to recognize idiomatic
10380 // shuffles and lower them to optimal instruction patterns without leaving
10381 // a framework that allows reasonably efficient handling of all vector shuffle
10383 //===----------------------------------------------------------------------===//
10385 /// Tiny helper function to identify a no-op mask.
10387 /// This is a somewhat boring predicate function. It checks whether the mask
10388 /// array input, which is assumed to be a single-input shuffle mask of the kind
10389 /// used by the X86 shuffle instructions (not a fully general
10390 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10391 /// in-place shuffle are 'no-op's.
10392 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10393 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10394 assert(Mask[i] >= -1 && "Out of bound mask element!");
10395 if (Mask[i] >= 0 && Mask[i] != i)
10401 /// Test whether there are elements crossing LaneSizeInBits lanes in this
10404 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10405 /// and we routinely test for these.
10406 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
10407 unsigned ScalarSizeInBits,
10408 ArrayRef<int> Mask) {
10409 assert(LaneSizeInBits && ScalarSizeInBits &&
10410 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
10411 "Illegal shuffle lane size");
10412 int LaneSize = LaneSizeInBits / ScalarSizeInBits;
10413 int Size = Mask.size();
10414 for (int i = 0; i < Size; ++i)
10415 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10420 /// Test whether there are elements crossing 128-bit lanes in this
10422 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10423 return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
10426 /// Test whether a shuffle mask is equivalent within each sub-lane.
10428 /// This checks a shuffle mask to see if it is performing the same
10429 /// lane-relative shuffle in each sub-lane. This trivially implies
10430 /// that it is also not lane-crossing. It may however involve a blend from the
10431 /// same lane of a second vector.
10433 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10434 /// non-trivial to compute in the face of undef lanes. The representation is
10435 /// suitable for use with existing 128-bit shuffles as entries from the second
10436 /// vector have been remapped to [LaneSize, 2*LaneSize).
10437 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10438 ArrayRef<int> Mask,
10439 SmallVectorImpl<int> &RepeatedMask) {
10440 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10441 RepeatedMask.assign(LaneSize, -1);
10442 int Size = Mask.size();
10443 for (int i = 0; i < Size; ++i) {
10444 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
10447 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10448 // This entry crosses lanes, so there is no way to model this shuffle.
10451 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10452 // Adjust second vector indices to start at LaneSize instead of Size.
10453 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10454 : Mask[i] % LaneSize + LaneSize;
10455 if (RepeatedMask[i % LaneSize] < 0)
10456 // This is the first non-undef entry in this slot of a 128-bit lane.
10457 RepeatedMask[i % LaneSize] = LocalM;
10458 else if (RepeatedMask[i % LaneSize] != LocalM)
10459 // Found a mismatch with the repeated mask.
10465 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
10467 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10468 SmallVectorImpl<int> &RepeatedMask) {
10469 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10473 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10474 SmallVector<int, 32> RepeatedMask;
10475 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10478 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
10480 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10481 SmallVectorImpl<int> &RepeatedMask) {
10482 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10485 /// Test whether a target shuffle mask is equivalent within each sub-lane.
10486 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10487 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10488 ArrayRef<int> Mask,
10489 SmallVectorImpl<int> &RepeatedMask) {
10490 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10491 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10492 int Size = Mask.size();
10493 for (int i = 0; i < Size; ++i) {
10494 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
10495 if (Mask[i] == SM_SentinelUndef)
10497 if (Mask[i] == SM_SentinelZero) {
10498 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10500 RepeatedMask[i % LaneSize] = SM_SentinelZero;
10503 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10504 // This entry crosses lanes, so there is no way to model this shuffle.
10507 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10508 // Adjust second vector indices to start at LaneSize instead of Size.
10510 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10511 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10512 // This is the first non-undef entry in this slot of a 128-bit lane.
10513 RepeatedMask[i % LaneSize] = LocalM;
10514 else if (RepeatedMask[i % LaneSize] != LocalM)
10515 // Found a mismatch with the repeated mask.
10521 /// Checks whether a shuffle mask is equivalent to an explicit list of
10524 /// This is a fast way to test a shuffle mask against a fixed pattern:
10526 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10528 /// It returns true if the mask is exactly as wide as the argument list, and
10529 /// each element of the mask is either -1 (signifying undef) or the value given
10530 /// in the argument.
10531 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10532 ArrayRef<int> ExpectedMask) {
10533 if (Mask.size() != ExpectedMask.size())
10536 int Size = Mask.size();
10538 // If the values are build vectors, we can look through them to find
10539 // equivalent inputs that make the shuffles equivalent.
10540 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10541 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10543 for (int i = 0; i < Size; ++i) {
10544 assert(Mask[i] >= -1 && "Out of bound mask element!");
10545 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10546 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10547 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10548 if (!MaskBV || !ExpectedBV ||
10549 MaskBV->getOperand(Mask[i] % Size) !=
10550 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10558 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10560 /// The masks must be exactly the same width.
10562 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10563 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
10565 /// SM_SentinelZero is accepted as a valid negative index but must match in
10567 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10568 ArrayRef<int> ExpectedMask,
10569 SDValue V1 = SDValue(),
10570 SDValue V2 = SDValue()) {
10571 int Size = Mask.size();
10572 if (Size != (int)ExpectedMask.size())
10574 assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10575 "Illegal target shuffle mask");
10577 // Check for out-of-range target shuffle mask indices.
10578 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10581 // If the values are build vectors, we can look through them to find
10582 // equivalent inputs that make the shuffles equivalent.
10583 auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10584 auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10585 BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10586 BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10588 for (int i = 0; i < Size; ++i) {
10589 if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10591 if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10592 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10593 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10594 if (MaskBV && ExpectedBV &&
10595 MaskBV->getOperand(Mask[i] % Size) ==
10596 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10599 // TODO - handle SM_Sentinel equivalences.
10605 // Attempt to create a shuffle mask from a VSELECT condition mask.
10606 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10608 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10611 unsigned Size = Cond.getValueType().getVectorNumElements();
10612 Mask.resize(Size, SM_SentinelUndef);
10614 for (int i = 0; i != (int)Size; ++i) {
10615 SDValue CondElt = Cond.getOperand(i);
10617 // Arbitrarily choose from the 2nd operand if the select condition element
10619 // TODO: Can we do better by matching patterns such as even/odd?
10620 if (CondElt.isUndef() || isNullConstant(CondElt))
10627 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10629 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10630 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10633 SmallVector<int, 8> Unpcklwd;
10634 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10635 /* Unary = */ false);
10636 SmallVector<int, 8> Unpckhwd;
10637 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10638 /* Unary = */ false);
10639 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10640 isTargetShuffleEquivalent(Mask, Unpckhwd));
10641 return IsUnpackwdMask;
10644 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10645 // Create 128-bit vector type based on mask size.
10646 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10647 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10649 // We can't assume a canonical shuffle mask, so try the commuted version too.
10650 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10651 ShuffleVectorSDNode::commuteMask(CommutedMask);
10653 // Match any of unary/binary or low/high.
10654 for (unsigned i = 0; i != 4; ++i) {
10655 SmallVector<int, 16> UnpackMask;
10656 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10657 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10658 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10664 /// Return true if a shuffle mask chooses elements identically in its top and
10665 /// bottom halves. For example, any splat mask has the same top and bottom
10666 /// halves. If an element is undefined in only one half of the mask, the halves
10667 /// are not considered identical.
10668 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10669 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10670 unsigned HalfSize = Mask.size() / 2;
10671 for (unsigned i = 0; i != HalfSize; ++i) {
10672 if (Mask[i] != Mask[i + HalfSize])
10678 /// Get a 4-lane 8-bit shuffle immediate for a mask.
10680 /// This helper function produces an 8-bit shuffle immediate corresponding to
10681 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
10682 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10685 /// NB: We rely heavily on "undef" masks preserving the input lane.
10686 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10687 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10688 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10689 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10690 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10691 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10694 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10695 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10696 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10697 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10701 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10702 SelectionDAG &DAG) {
10703 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10706 // The Shuffle result is as follow:
10707 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10708 // Each Zeroable's element correspond to a particular Mask's element.
10709 // As described in computeZeroableShuffleElements function.
10711 // The function looks for a sub-mask that the nonzero elements are in
10712 // increasing order. If such sub-mask exist. The function returns true.
10713 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10714 ArrayRef<int> Mask, const EVT &VectorType,
10715 bool &IsZeroSideLeft) {
10716 int NextElement = -1;
10717 // Check if the Mask's nonzero elements are in increasing order.
10718 for (int i = 0, e = Mask.size(); i < e; i++) {
10719 // Checks if the mask's zeros elements are built from only zeros.
10720 assert(Mask[i] >= -1 && "Out of bound mask element!");
10725 // Find the lowest non zero element
10726 if (NextElement < 0) {
10727 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10728 IsZeroSideLeft = NextElement != 0;
10730 // Exit if the mask's non zero elements are not in increasing order.
10731 if (NextElement != Mask[i])
10738 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10739 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10740 ArrayRef<int> Mask, SDValue V1,
10741 SDValue V2, const APInt &Zeroable,
10742 const X86Subtarget &Subtarget,
10743 SelectionDAG &DAG) {
10744 int Size = Mask.size();
10745 int LaneSize = 128 / VT.getScalarSizeInBits();
10746 const int NumBytes = VT.getSizeInBits() / 8;
10747 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10749 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10750 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10751 (Subtarget.hasBWI() && VT.is512BitVector()));
10753 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10754 // Sign bit set in i8 mask means zero element.
10755 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10758 for (int i = 0; i < NumBytes; ++i) {
10759 int M = Mask[i / NumEltBytes];
10761 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10764 if (Zeroable[i / NumEltBytes]) {
10765 PSHUFBMask[i] = ZeroMask;
10769 // We can only use a single input of V1 or V2.
10770 SDValue SrcV = (M >= Size ? V2 : V1);
10771 if (V && V != SrcV)
10776 // PSHUFB can't cross lanes, ensure this doesn't happen.
10777 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10781 M = M * NumEltBytes + (i % NumEltBytes);
10782 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10784 assert(V && "Failed to find a source input");
10786 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10787 return DAG.getBitcast(
10788 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10789 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10792 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10793 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10796 // X86 has dedicated shuffle that can be lowered to VEXPAND
10797 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10798 const APInt &Zeroable,
10799 ArrayRef<int> Mask, SDValue &V1,
10800 SDValue &V2, SelectionDAG &DAG,
10801 const X86Subtarget &Subtarget) {
10802 bool IsLeftZeroSide = true;
10803 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10806 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10808 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10809 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10810 unsigned NumElts = VT.getVectorNumElements();
10811 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
10812 "Unexpected number of vector elements");
10813 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10814 Subtarget, DAG, DL);
10815 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10816 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10817 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10820 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10821 unsigned &UnpackOpcode, bool IsUnary,
10822 ArrayRef<int> TargetMask, const SDLoc &DL,
10824 const X86Subtarget &Subtarget) {
10825 int NumElts = VT.getVectorNumElements();
10827 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10828 for (int i = 0; i != NumElts; i += 2) {
10829 int M1 = TargetMask[i + 0];
10830 int M2 = TargetMask[i + 1];
10831 Undef1 &= (SM_SentinelUndef == M1);
10832 Undef2 &= (SM_SentinelUndef == M2);
10833 Zero1 &= isUndefOrZero(M1);
10834 Zero2 &= isUndefOrZero(M2);
10836 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
10837 "Zeroable shuffle detected");
10839 // Attempt to match the target mask against the unpack lo/hi mask patterns.
10840 SmallVector<int, 64> Unpckl, Unpckh;
10841 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10842 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10843 UnpackOpcode = X86ISD::UNPCKL;
10844 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10845 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10849 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10850 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10851 UnpackOpcode = X86ISD::UNPCKH;
10852 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10853 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10857 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10858 if (IsUnary && (Zero1 || Zero2)) {
10859 // Don't bother if we can blend instead.
10860 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10861 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10864 bool MatchLo = true, MatchHi = true;
10865 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10866 int M = TargetMask[i];
10868 // Ignore if the input is known to be zero or the index is undef.
10869 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10870 (M == SM_SentinelUndef))
10873 MatchLo &= (M == Unpckl[i]);
10874 MatchHi &= (M == Unpckh[i]);
10877 if (MatchLo || MatchHi) {
10878 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10879 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10880 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10885 // If a binary shuffle, commute and try again.
10887 ShuffleVectorSDNode::commuteMask(Unpckl);
10888 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10889 UnpackOpcode = X86ISD::UNPCKL;
10894 ShuffleVectorSDNode::commuteMask(Unpckh);
10895 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10896 UnpackOpcode = X86ISD::UNPCKH;
10905 // X86 has dedicated unpack instructions that can handle specific blend
10906 // operations: UNPCKH and UNPCKL.
10907 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10908 ArrayRef<int> Mask, SDValue V1, SDValue V2,
10909 SelectionDAG &DAG) {
10910 SmallVector<int, 8> Unpckl;
10911 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10912 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10913 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10915 SmallVector<int, 8> Unpckh;
10916 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10917 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10918 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10920 // Commute and try again.
10921 ShuffleVectorSDNode::commuteMask(Unpckl);
10922 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10923 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10925 ShuffleVectorSDNode::commuteMask(Unpckh);
10926 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10927 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10932 static bool matchShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10934 int Size = (int)Mask.size();
10935 int Split = Size / Delta;
10936 int TruncatedVectorStart = SwappedOps ? Size : 0;
10938 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10939 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10942 // The rest of the mask should not refer to the truncated vector's elements.
10943 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10944 TruncatedVectorStart + Size))
10950 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10952 // An example is the following:
10954 // t0: ch = EntryToken
10955 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10956 // t25: v4i32 = truncate t2
10957 // t41: v8i16 = bitcast t25
10958 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10959 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10960 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10961 // t18: v2i64 = bitcast t51
10963 // Without avx512vl, this is lowered to:
10965 // vpmovqd %zmm0, %ymm0
10966 // vpshufb {{.*#+}} xmm0 =
10967 // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10969 // But when avx512vl is available, one can just use a single vpmovdw
10971 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10972 MVT VT, SDValue V1, SDValue V2,
10974 const X86Subtarget &Subtarget) {
10975 if (VT != MVT::v16i8 && VT != MVT::v8i16)
10978 if (Mask.size() != VT.getVectorNumElements())
10981 bool SwappedOps = false;
10983 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10984 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10993 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10994 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10996 // and similar ones.
10997 if (V1.getOpcode() != ISD::BITCAST)
10999 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
11002 SDValue Src = V1.getOperand(0).getOperand(0);
11003 MVT SrcVT = Src.getSimpleValueType();
11005 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
11006 // are only available with avx512vl.
11007 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
11010 // Down Convert Word to Byte is only available with avx512bw. The case with
11011 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
11012 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
11013 !Subtarget.hasBWI())
11016 // The first half/quarter of the mask should refer to every second/fourth
11017 // element of the vector truncated and bitcasted.
11018 if (!matchShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11019 !matchShuffleAsVPMOV(Mask, SwappedOps, 4))
11022 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
11025 // X86 has dedicated pack instructions that can handle specific truncation
11026 // operations: PACKSS and PACKUS.
11027 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
11028 unsigned &PackOpcode, ArrayRef<int> TargetMask,
11030 const X86Subtarget &Subtarget) {
11031 unsigned NumElts = VT.getVectorNumElements();
11032 unsigned BitSize = VT.getScalarSizeInBits();
11033 MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
11034 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
11036 auto MatchPACK = [&](SDValue N1, SDValue N2) {
11037 SDValue VV1 = DAG.getBitcast(PackVT, N1);
11038 SDValue VV2 = DAG.getBitcast(PackVT, N2);
11039 if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
11040 APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
11041 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
11042 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
11046 PackOpcode = X86ISD::PACKUS;
11050 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
11051 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
11055 PackOpcode = X86ISD::PACKSS;
11061 // Try binary shuffle.
11062 SmallVector<int, 32> BinaryMask;
11063 createPackShuffleMask(VT, BinaryMask, false);
11064 if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
11065 if (MatchPACK(V1, V2))
11068 // Try unary shuffle.
11069 SmallVector<int, 32> UnaryMask;
11070 createPackShuffleMask(VT, UnaryMask, true);
11071 if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
11072 if (MatchPACK(V1, V1))
11078 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
11079 SDValue V1, SDValue V2, SelectionDAG &DAG,
11080 const X86Subtarget &Subtarget) {
11082 unsigned PackOpcode;
11083 if (matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11085 return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
11086 DAG.getBitcast(PackVT, V2));
11091 /// Try to emit a bitmask instruction for a shuffle.
11093 /// This handles cases where we can model a blend exactly as a bitmask due to
11094 /// one of the inputs being zeroable.
11095 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
11096 SDValue V2, ArrayRef<int> Mask,
11097 const APInt &Zeroable,
11098 const X86Subtarget &Subtarget,
11099 SelectionDAG &DAG) {
11101 MVT EltVT = VT.getVectorElementType();
11102 SDValue Zero, AllOnes;
11103 // Use f64 if i64 isn't legal.
11104 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
11106 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
11110 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
11111 Zero = DAG.getConstantFP(0.0, DL, EltVT);
11112 AllOnes = DAG.getConstantFP(
11113 APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
11115 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
11117 Zero = DAG.getConstant(0, DL, EltVT);
11118 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11121 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
11123 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11126 if (Mask[i] % Size != i)
11127 return SDValue(); // Not a blend.
11129 V = Mask[i] < Size ? V1 : V2;
11130 else if (V != (Mask[i] < Size ? V1 : V2))
11131 return SDValue(); // Can only let one input through the mask.
11133 VMaskOps[i] = AllOnes;
11136 return SDValue(); // No non-zeroable elements!
11138 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
11139 VMask = DAG.getBitcast(LogicVT, VMask);
11140 V = DAG.getBitcast(LogicVT, V);
11141 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
11142 return DAG.getBitcast(VT, And);
11145 /// Try to emit a blend instruction for a shuffle using bit math.
11147 /// This is used as a fallback approach when first class blend instructions are
11148 /// unavailable. Currently it is only suitable for integer vectors, but could
11149 /// be generalized for floating point vectors if desirable.
11150 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
11151 SDValue V2, ArrayRef<int> Mask,
11152 SelectionDAG &DAG) {
11153 assert(VT.isInteger() && "Only supports integer vector types!");
11154 MVT EltVT = VT.getVectorElementType();
11155 SDValue Zero = DAG.getConstant(0, DL, EltVT);
11156 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11157 SmallVector<SDValue, 16> MaskOps;
11158 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11159 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
11160 return SDValue(); // Shuffled input!
11161 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
11164 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
11165 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
11166 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
11167 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
11170 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
11171 SDValue PreservedSrc,
11172 const X86Subtarget &Subtarget,
11173 SelectionDAG &DAG);
11175 static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
11176 MutableArrayRef<int> Mask,
11177 const APInt &Zeroable, bool &ForceV1Zero,
11178 bool &ForceV2Zero, uint64_t &BlendMask) {
11179 bool V1IsZeroOrUndef =
11180 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
11181 bool V2IsZeroOrUndef =
11182 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
11185 ForceV1Zero = false, ForceV2Zero = false;
11186 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
11188 // Attempt to generate the binary blend mask. If an input is zero then
11189 // we can use any lane.
11190 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11192 if (M == SM_SentinelUndef)
11196 if (M == i + Size) {
11197 BlendMask |= 1ull << i;
11201 if (V1IsZeroOrUndef) {
11202 ForceV1Zero = true;
11206 if (V2IsZeroOrUndef) {
11207 ForceV2Zero = true;
11208 BlendMask |= 1ull << i;
11209 Mask[i] = i + Size;
11218 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11220 uint64_t ScaledMask = 0;
11221 for (int i = 0; i != Size; ++i)
11222 if (BlendMask & (1ull << i))
11223 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11227 /// Try to emit a blend instruction for a shuffle.
11229 /// This doesn't do any checks for the availability of instructions for blending
11230 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11231 /// be matched in the backend with the type given. What it does check for is
11232 /// that the shuffle mask is a blend, or convertible into a blend with zero.
11233 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11234 SDValue V2, ArrayRef<int> Original,
11235 const APInt &Zeroable,
11236 const X86Subtarget &Subtarget,
11237 SelectionDAG &DAG) {
11238 uint64_t BlendMask = 0;
11239 bool ForceV1Zero = false, ForceV2Zero = false;
11240 SmallVector<int, 64> Mask(Original.begin(), Original.end());
11241 if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11245 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11247 V1 = getZeroVector(VT, Subtarget, DAG, DL);
11249 V2 = getZeroVector(VT, Subtarget, DAG, DL);
11251 switch (VT.SimpleTy) {
11254 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
11258 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
11265 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
11266 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11267 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11268 case MVT::v16i16: {
11269 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
11270 SmallVector<int, 8> RepeatedMask;
11271 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11272 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11273 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
11275 for (int i = 0; i < 8; ++i)
11276 if (RepeatedMask[i] >= 8)
11277 BlendMask |= 1ull << i;
11278 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11279 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11281 // Use PBLENDW for lower/upper lanes and then blend lanes.
11282 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11283 // merge to VSELECT where useful.
11284 uint64_t LoMask = BlendMask & 0xFF;
11285 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11286 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11287 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11288 DAG.getTargetConstant(LoMask, DL, MVT::i8));
11289 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11290 DAG.getTargetConstant(HiMask, DL, MVT::i8));
11291 return DAG.getVectorShuffle(
11292 MVT::v16i16, DL, Lo, Hi,
11293 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11298 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
11301 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
11303 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11304 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11308 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11310 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11311 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11312 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11315 // Scale the blend by the number of bytes per element.
11316 int Scale = VT.getScalarSizeInBits() / 8;
11318 // This form of blend is always done on bytes. Compute the byte vector
11320 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11322 // x86 allows load folding with blendvb from the 2nd source operand. But
11323 // we are still using LLVM select here (see comment below), so that's V1.
11324 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11325 // allow that load-folding possibility.
11326 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11327 ShuffleVectorSDNode::commuteMask(Mask);
11331 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11332 // mix of LLVM's code generator and the x86 backend. We tell the code
11333 // generator that boolean values in the elements of an x86 vector register
11334 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11335 // mapping a select to operand #1, and 'false' mapping to operand #2. The
11336 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11337 // of the element (the remaining are ignored) and 0 in that high bit would
11338 // mean operand #1 while 1 in the high bit would mean operand #2. So while
11339 // the LLVM model for boolean values in vector elements gets the relevant
11340 // bit set, it is set backwards and over constrained relative to x86's
11342 SmallVector<SDValue, 32> VSELECTMask;
11343 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11344 for (int j = 0; j < Scale; ++j)
11345 VSELECTMask.push_back(
11346 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11347 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11350 V1 = DAG.getBitcast(BlendVT, V1);
11351 V2 = DAG.getBitcast(BlendVT, V2);
11352 return DAG.getBitcast(
11354 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11363 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11364 bool OptForSize = DAG.shouldOptForSize();
11366 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11371 // Otherwise load an immediate into a GPR, cast to k-register, and use a
11374 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11375 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11376 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11379 llvm_unreachable("Not a supported integer vector type!");
11383 /// Try to lower as a blend of elements from two inputs followed by
11384 /// a single-input permutation.
11386 /// This matches the pattern where we can blend elements from two inputs and
11387 /// then reduce the shuffle to a single-input permutation.
11388 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11389 SDValue V1, SDValue V2,
11390 ArrayRef<int> Mask,
11392 bool ImmBlends = false) {
11393 // We build up the blend mask while checking whether a blend is a viable way
11394 // to reduce the shuffle.
11395 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11396 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11398 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11402 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
11404 if (BlendMask[Mask[i] % Size] < 0)
11405 BlendMask[Mask[i] % Size] = Mask[i];
11406 else if (BlendMask[Mask[i] % Size] != Mask[i])
11407 return SDValue(); // Can't blend in the needed input!
11409 PermuteMask[i] = Mask[i] % Size;
11412 // If only immediate blends, then bail if the blend mask can't be widened to
11414 unsigned EltSize = VT.getScalarSizeInBits();
11415 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11418 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11419 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11422 /// Try to lower as an unpack of elements from two inputs followed by
11423 /// a single-input permutation.
11425 /// This matches the pattern where we can unpack elements from two inputs and
11426 /// then reduce the shuffle to a single-input (wider) permutation.
11427 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11428 SDValue V1, SDValue V2,
11429 ArrayRef<int> Mask,
11430 SelectionDAG &DAG) {
11431 int NumElts = Mask.size();
11432 int NumLanes = VT.getSizeInBits() / 128;
11433 int NumLaneElts = NumElts / NumLanes;
11434 int NumHalfLaneElts = NumLaneElts / 2;
11436 bool MatchLo = true, MatchHi = true;
11437 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11439 // Determine UNPCKL/UNPCKH type and operand order.
11440 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11441 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11442 int M = Mask[Lane + Elt];
11446 SDValue &Op = Ops[Elt & 1];
11447 if (M < NumElts && (Op.isUndef() || Op == V1))
11449 else if (NumElts <= M && (Op.isUndef() || Op == V2))
11454 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11455 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11456 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11457 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11458 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11459 if (!MatchLo && !MatchHi)
11463 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
11465 // Now check that each pair of elts come from the same unpack pair
11466 // and set the permute mask based on each pair.
11467 // TODO - Investigate cases where we permute individual elements.
11468 SmallVector<int, 32> PermuteMask(NumElts, -1);
11469 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11470 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11471 int M0 = Mask[Lane + Elt + 0];
11472 int M1 = Mask[Lane + Elt + 1];
11473 if (0 <= M0 && 0 <= M1 &&
11474 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11477 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11479 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11483 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11484 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11485 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11488 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11489 /// permuting the elements of the result in place.
11490 static SDValue lowerShuffleAsByteRotateAndPermute(
11491 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11492 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11493 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11494 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11495 (VT.is512BitVector() && !Subtarget.hasBWI()))
11498 // We don't currently support lane crossing permutes.
11499 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11502 int Scale = VT.getScalarSizeInBits() / 8;
11503 int NumLanes = VT.getSizeInBits() / 128;
11504 int NumElts = VT.getVectorNumElements();
11505 int NumEltsPerLane = NumElts / NumLanes;
11507 // Determine range of mask elts.
11508 bool Blend1 = true;
11509 bool Blend2 = true;
11510 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11511 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11512 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11513 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11514 int M = Mask[Lane + Elt];
11518 Blend1 &= (M == (Lane + Elt));
11519 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11520 M = M % NumEltsPerLane;
11521 Range1.first = std::min(Range1.first, M);
11522 Range1.second = std::max(Range1.second, M);
11525 Blend2 &= (M == (Lane + Elt));
11526 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11527 M = M % NumEltsPerLane;
11528 Range2.first = std::min(Range2.first, M);
11529 Range2.second = std::max(Range2.second, M);
11534 // Bail if we don't need both elements.
11535 // TODO - it might be worth doing this for unary shuffles if the permute
11537 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11538 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11541 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11544 // Rotate the 2 ops so we can access both ranges, then permute the result.
11545 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11546 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11547 SDValue Rotate = DAG.getBitcast(
11548 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11549 DAG.getBitcast(ByteVT, Lo),
11550 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11551 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11552 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11553 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11554 int M = Mask[Lane + Elt];
11558 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11560 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11563 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11566 // Check if the ranges are small enough to rotate from either direction.
11567 if (Range2.second < Range1.first)
11568 return RotateAndPermute(V1, V2, Range1.first, 0);
11569 if (Range1.second < Range2.first)
11570 return RotateAndPermute(V2, V1, Range2.first, NumElts);
11574 /// Generic routine to decompose a shuffle and blend into independent
11575 /// blends and permutes.
11577 /// This matches the extremely common pattern for handling combined
11578 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11579 /// operations. It will try to pick the best arrangement of shuffles and
11581 static SDValue lowerShuffleAsDecomposedShuffleBlend(
11582 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11583 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11584 // Shuffle the input elements into the desired positions in V1 and V2 and
11585 // blend them together.
11586 SmallVector<int, 32> V1Mask(Mask.size(), -1);
11587 SmallVector<int, 32> V2Mask(Mask.size(), -1);
11588 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11589 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11590 if (Mask[i] >= 0 && Mask[i] < Size) {
11591 V1Mask[i] = Mask[i];
11593 } else if (Mask[i] >= Size) {
11594 V2Mask[i] = Mask[i] - Size;
11595 BlendMask[i] = i + Size;
11598 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11599 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11600 // the shuffle may be able to fold with a load or other benefit. However, when
11601 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11602 // pre-shuffle first is a better strategy.
11603 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11604 // Only prefer immediate blends to unpack/rotate.
11605 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11608 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11611 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11612 DL, VT, V1, V2, Mask, Subtarget, DAG))
11614 // Unpack/rotate failed - try again with variable blends.
11615 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11620 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11621 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11622 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11625 /// Try to lower a vector shuffle as a rotation.
11627 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11628 static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11629 int NumElts = Mask.size();
11631 // We need to detect various ways of spelling a rotation:
11632 // [11, 12, 13, 14, 15, 0, 1, 2]
11633 // [-1, 12, 13, 14, -1, -1, 1, -1]
11634 // [-1, -1, -1, -1, -1, -1, 1, 2]
11635 // [ 3, 4, 5, 6, 7, 8, 9, 10]
11636 // [-1, 4, 5, 6, -1, -1, 9, -1]
11637 // [-1, 4, 5, 6, -1, -1, -1, -1]
11640 for (int i = 0; i < NumElts; ++i) {
11642 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11643 "Unexpected mask index.");
11647 // Determine where a rotated vector would have started.
11648 int StartIdx = i - (M % NumElts);
11650 // The identity rotation isn't interesting, stop.
11653 // If we found the tail of a vector the rotation must be the missing
11654 // front. If we found the head of a vector, it must be how much of the
11656 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11659 Rotation = CandidateRotation;
11660 else if (Rotation != CandidateRotation)
11661 // The rotations don't match, so we can't match this mask.
11664 // Compute which value this mask is pointing at.
11665 SDValue MaskV = M < NumElts ? V1 : V2;
11667 // Compute which of the two target values this index should be assigned
11668 // to. This reflects whether the high elements are remaining or the low
11669 // elements are remaining.
11670 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11672 // Either set up this value if we've not encountered it before, or check
11673 // that it remains consistent.
11676 else if (TargetV != MaskV)
11677 // This may be a rotation, but it pulls from the inputs in some
11678 // unsupported interleaving.
11682 // Check that we successfully analyzed the mask, and normalize the results.
11683 assert(Rotation != 0 && "Failed to locate a viable rotation!");
11684 assert((Lo || Hi) && "Failed to find a rotated input vector!");
11696 /// Try to lower a vector shuffle as a byte rotation.
11698 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11699 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11700 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11701 /// try to generically lower a vector shuffle through such an pattern. It
11702 /// does not check for the profitability of lowering either as PALIGNR or
11703 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11704 /// This matches shuffle vectors that look like:
11706 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11708 /// Essentially it concatenates V1 and V2, shifts right by some number of
11709 /// elements, and takes the low elements as the result. Note that while this is
11710 /// specified as a *right shift* because x86 is little-endian, it is a *left
11711 /// rotate* of the vector lanes.
11712 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11713 ArrayRef<int> Mask) {
11714 // Don't accept any shuffles with zero elements.
11715 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11718 // PALIGNR works on 128-bit lanes.
11719 SmallVector<int, 16> RepeatedMask;
11720 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11723 int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11727 // PALIGNR rotates bytes, so we need to scale the
11728 // rotation based on how many bytes are in the vector lane.
11729 int NumElts = RepeatedMask.size();
11730 int Scale = 16 / NumElts;
11731 return Rotation * Scale;
11734 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11735 SDValue V2, ArrayRef<int> Mask,
11736 const X86Subtarget &Subtarget,
11737 SelectionDAG &DAG) {
11738 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11740 SDValue Lo = V1, Hi = V2;
11741 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11742 if (ByteRotation <= 0)
11745 // Cast the inputs to i8 vector of correct length to match PALIGNR or
11747 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11748 Lo = DAG.getBitcast(ByteVT, Lo);
11749 Hi = DAG.getBitcast(ByteVT, Hi);
11751 // SSSE3 targets can use the palignr instruction.
11752 if (Subtarget.hasSSSE3()) {
11753 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11754 "512-bit PALIGNR requires BWI instructions");
11755 return DAG.getBitcast(
11756 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11757 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11760 assert(VT.is128BitVector() &&
11761 "Rotate-based lowering only supports 128-bit lowering!");
11762 assert(Mask.size() <= 16 &&
11763 "Can shuffle at most 16 bytes in a 128-bit vector!");
11764 assert(ByteVT == MVT::v16i8 &&
11765 "SSE2 rotate lowering only needed for v16i8!");
11767 // Default SSE2 implementation
11768 int LoByteShift = 16 - ByteRotation;
11769 int HiByteShift = ByteRotation;
11772 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11773 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11775 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11776 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11777 return DAG.getBitcast(VT,
11778 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11781 /// Try to lower a vector shuffle as a dword/qword rotation.
11783 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11784 /// rotation of the concatenation of two vectors; This routine will
11785 /// try to generically lower a vector shuffle through such an pattern.
11787 /// Essentially it concatenates V1 and V2, shifts right by some number of
11788 /// elements, and takes the low elements as the result. Note that while this is
11789 /// specified as a *right shift* because x86 is little-endian, it is a *left
11790 /// rotate* of the vector lanes.
11791 static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11792 SDValue V2, ArrayRef<int> Mask,
11793 const X86Subtarget &Subtarget,
11794 SelectionDAG &DAG) {
11795 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11796 "Only 32-bit and 64-bit elements are supported!");
11798 // 128/256-bit vectors are only supported with VLX.
11799 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11800 && "VLX required for 128/256-bit vectors");
11802 SDValue Lo = V1, Hi = V2;
11803 int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11807 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11808 DAG.getTargetConstant(Rotation, DL, MVT::i8));
11811 /// Try to lower a vector shuffle as a byte shift sequence.
11812 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11813 SDValue V2, ArrayRef<int> Mask,
11814 const APInt &Zeroable,
11815 const X86Subtarget &Subtarget,
11816 SelectionDAG &DAG) {
11817 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11818 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11820 // We need a shuffle that has zeros at one/both ends and a sequential
11821 // shuffle from one source within.
11822 unsigned ZeroLo = Zeroable.countTrailingOnes();
11823 unsigned ZeroHi = Zeroable.countLeadingOnes();
11824 if (!ZeroLo && !ZeroHi)
11827 unsigned NumElts = Mask.size();
11828 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11829 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11832 unsigned Scale = VT.getScalarSizeInBits() / 8;
11833 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11834 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11835 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11838 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11839 Res = DAG.getBitcast(MVT::v16i8, Res);
11841 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11842 // inner sequential set of elements, possibly offset:
11843 // 01234567 --> zzzzzz01 --> 1zzzzzzz
11844 // 01234567 --> 4567zzzz --> zzzzz456
11845 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11847 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11848 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11849 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11850 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11851 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11852 } else if (ZeroHi == 0) {
11853 unsigned Shift = Mask[ZeroLo] % NumElts;
11854 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11855 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11856 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11857 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11858 } else if (!Subtarget.hasSSSE3()) {
11859 // If we don't have PSHUFB then its worth avoiding an AND constant mask
11860 // by performing 3 byte shifts. Shuffle combining can kick in above that.
11861 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11862 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11863 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11864 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11865 Shift += Mask[ZeroLo] % NumElts;
11866 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11867 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11868 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11869 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11873 return DAG.getBitcast(VT, Res);
11876 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11878 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11879 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11880 /// matches elements from one of the input vectors shuffled to the left or
11881 /// right with zeroable elements 'shifted in'. It handles both the strictly
11882 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11883 /// quad word lane.
11885 /// PSHL : (little-endian) left bit shift.
11886 /// [ zz, 0, zz, 2 ]
11887 /// [ -1, 4, zz, -1 ]
11888 /// PSRL : (little-endian) right bit shift.
11889 /// [ 1, zz, 3, zz]
11890 /// [ -1, -1, 7, zz]
11891 /// PSLLDQ : (little-endian) left byte shift
11892 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
11893 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
11894 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
11895 /// PSRLDQ : (little-endian) right byte shift
11896 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
11897 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
11898 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
11899 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11900 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11901 int MaskOffset, const APInt &Zeroable,
11902 const X86Subtarget &Subtarget) {
11903 int Size = Mask.size();
11904 unsigned SizeInBits = Size * ScalarSizeInBits;
11906 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11907 for (int i = 0; i < Size; i += Scale)
11908 for (int j = 0; j < Shift; ++j)
11909 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11915 auto MatchShift = [&](int Shift, int Scale, bool Left) {
11916 for (int i = 0; i != Size; i += Scale) {
11917 unsigned Pos = Left ? i + Shift : i;
11918 unsigned Low = Left ? i : i + Shift;
11919 unsigned Len = Scale - Shift;
11920 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11924 int ShiftEltBits = ScalarSizeInBits * Scale;
11925 bool ByteShift = ShiftEltBits > 64;
11926 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11927 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11928 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11930 // Normalize the scale for byte shifts to still produce an i64 element
11932 Scale = ByteShift ? Scale / 2 : Scale;
11934 // We need to round trip through the appropriate type for the shift.
11935 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11936 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11937 : MVT::getVectorVT(ShiftSVT, Size / Scale);
11938 return (int)ShiftAmt;
11941 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11942 // keep doubling the size of the integer elements up to that. We can
11943 // then shift the elements of the integer vector by whole multiples of
11944 // their width within the elements of the larger integer vector. Test each
11945 // multiple to see if we can find a match with the moved element indices
11946 // and that the shifted in elements are all zeroable.
11947 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11948 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11949 for (int Shift = 1; Shift != Scale; ++Shift)
11950 for (bool Left : {true, false})
11951 if (CheckZeros(Shift, Scale, Left)) {
11952 int ShiftAmt = MatchShift(Shift, Scale, Left);
11961 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11962 SDValue V2, ArrayRef<int> Mask,
11963 const APInt &Zeroable,
11964 const X86Subtarget &Subtarget,
11965 SelectionDAG &DAG) {
11966 int Size = Mask.size();
11967 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11973 // Try to match shuffle against V1 shift.
11974 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11975 Mask, 0, Zeroable, Subtarget);
11977 // If V1 failed, try to match shuffle against V2 shift.
11978 if (ShiftAmt < 0) {
11979 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11980 Mask, Size, Zeroable, Subtarget);
11987 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11988 "Illegal integer vector type");
11989 V = DAG.getBitcast(ShiftVT, V);
11990 V = DAG.getNode(Opcode, DL, ShiftVT, V,
11991 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11992 return DAG.getBitcast(VT, V);
11995 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11996 // Remainder of lower half result is zero and upper half is all undef.
11997 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11998 ArrayRef<int> Mask, uint64_t &BitLen,
11999 uint64_t &BitIdx, const APInt &Zeroable) {
12000 int Size = Mask.size();
12001 int HalfSize = Size / 2;
12002 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12003 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
12005 // Upper half must be undefined.
12006 if (!isUndefUpperHalf(Mask))
12009 // Determine the extraction length from the part of the
12010 // lower half that isn't zeroable.
12011 int Len = HalfSize;
12012 for (; Len > 0; --Len)
12013 if (!Zeroable[Len - 1])
12015 assert(Len > 0 && "Zeroable shuffle mask");
12017 // Attempt to match first Len sequential elements from the lower half.
12020 for (int i = 0; i != Len; ++i) {
12022 if (M == SM_SentinelUndef)
12024 SDValue &V = (M < Size ? V1 : V2);
12027 // The extracted elements must start at a valid index and all mask
12028 // elements must be in the lower half.
12029 if (i > M || M >= HalfSize)
12032 if (Idx < 0 || (Src == V && Idx == (M - i))) {
12040 if (!Src || Idx < 0)
12043 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
12044 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12045 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12050 // INSERTQ: Extract lowest Len elements from lower half of second source and
12051 // insert over first source, starting at Idx.
12052 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
12053 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
12054 ArrayRef<int> Mask, uint64_t &BitLen,
12055 uint64_t &BitIdx) {
12056 int Size = Mask.size();
12057 int HalfSize = Size / 2;
12058 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12060 // Upper half must be undefined.
12061 if (!isUndefUpperHalf(Mask))
12064 for (int Idx = 0; Idx != HalfSize; ++Idx) {
12067 // Attempt to match first source from mask before insertion point.
12068 if (isUndefInRange(Mask, 0, Idx)) {
12070 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
12072 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
12078 // Extend the extraction length looking to match both the insertion of
12079 // the second source and the remaining elements of the first.
12080 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
12082 int Len = Hi - Idx;
12084 // Match insertion.
12085 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
12087 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
12093 // Match the remaining elements of the lower half.
12094 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
12096 } else if ((!Base || (Base == V1)) &&
12097 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
12099 } else if ((!Base || (Base == V2)) &&
12100 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
12107 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12108 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12118 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
12119 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
12120 SDValue V2, ArrayRef<int> Mask,
12121 const APInt &Zeroable, SelectionDAG &DAG) {
12122 uint64_t BitLen, BitIdx;
12123 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
12124 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
12125 DAG.getTargetConstant(BitLen, DL, MVT::i8),
12126 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12128 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
12129 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
12130 V2 ? V2 : DAG.getUNDEF(VT),
12131 DAG.getTargetConstant(BitLen, DL, MVT::i8),
12132 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12137 /// Lower a vector shuffle as a zero or any extension.
12139 /// Given a specific number of elements, element bit width, and extension
12140 /// stride, produce either a zero or any extension based on the available
12141 /// features of the subtarget. The extended elements are consecutive and
12142 /// begin and can start from an offsetted element index in the input; to
12143 /// avoid excess shuffling the offset must either being in the bottom lane
12144 /// or at the start of a higher lane. All extended elements must be from
12146 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
12147 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
12148 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12149 assert(Scale > 1 && "Need a scale to extend.");
12150 int EltBits = VT.getScalarSizeInBits();
12151 int NumElements = VT.getVectorNumElements();
12152 int NumEltsPerLane = 128 / EltBits;
12153 int OffsetLane = Offset / NumEltsPerLane;
12154 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
12155 "Only 8, 16, and 32 bit elements can be extended.");
12156 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
12157 assert(0 <= Offset && "Extension offset must be positive.");
12158 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
12159 "Extension offset must be in the first lane or start an upper lane.");
12161 // Check that an index is in same lane as the base offset.
12162 auto SafeOffset = [&](int Idx) {
12163 return OffsetLane == (Idx / NumEltsPerLane);
12166 // Shift along an input so that the offset base moves to the first element.
12167 auto ShuffleOffset = [&](SDValue V) {
12171 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12172 for (int i = 0; i * Scale < NumElements; ++i) {
12173 int SrcIdx = i + Offset;
12174 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
12176 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
12179 // Found a valid a/zext mask! Try various lowering strategies based on the
12180 // input type and available ISA extensions.
12181 if (Subtarget.hasSSE41()) {
12182 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
12183 // PUNPCK will catch this in a later shuffle match.
12184 if (Offset && Scale == 2 && VT.is128BitVector())
12186 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
12187 NumElements / Scale);
12188 InputV = ShuffleOffset(InputV);
12189 InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
12190 ExtVT, InputV, DAG);
12191 return DAG.getBitcast(VT, InputV);
12194 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
12196 // For any extends we can cheat for larger element sizes and use shuffle
12197 // instructions that can fold with a load and/or copy.
12198 if (AnyExt && EltBits == 32) {
12199 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
12201 return DAG.getBitcast(
12202 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12203 DAG.getBitcast(MVT::v4i32, InputV),
12204 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12206 if (AnyExt && EltBits == 16 && Scale > 2) {
12207 int PSHUFDMask[4] = {Offset / 2, -1,
12208 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
12209 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12210 DAG.getBitcast(MVT::v4i32, InputV),
12211 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
12212 int PSHUFWMask[4] = {1, -1, -1, -1};
12213 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
12214 return DAG.getBitcast(
12215 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
12216 DAG.getBitcast(MVT::v8i16, InputV),
12217 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
12220 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12222 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12223 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
12224 assert(VT.is128BitVector() && "Unexpected vector width!");
12226 int LoIdx = Offset * EltBits;
12227 SDValue Lo = DAG.getBitcast(
12228 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12229 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12230 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12232 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12233 return DAG.getBitcast(VT, Lo);
12235 int HiIdx = (Offset + 1) * EltBits;
12236 SDValue Hi = DAG.getBitcast(
12237 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12238 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12239 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12240 return DAG.getBitcast(VT,
12241 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12244 // If this would require more than 2 unpack instructions to expand, use
12245 // pshufb when available. We can only use more than 2 unpack instructions
12246 // when zero extending i8 elements which also makes it easier to use pshufb.
12247 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12248 assert(NumElements == 16 && "Unexpected byte vector width!");
12249 SDValue PSHUFBMask[16];
12250 for (int i = 0; i < 16; ++i) {
12251 int Idx = Offset + (i / Scale);
12252 if ((i % Scale == 0 && SafeOffset(Idx))) {
12253 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12257 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12259 InputV = DAG.getBitcast(MVT::v16i8, InputV);
12260 return DAG.getBitcast(
12261 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12262 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12265 // If we are extending from an offset, ensure we start on a boundary that
12266 // we can unpack from.
12267 int AlignToUnpack = Offset % (NumElements / Scale);
12268 if (AlignToUnpack) {
12269 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12270 for (int i = AlignToUnpack; i < NumElements; ++i)
12271 ShMask[i - AlignToUnpack] = i;
12272 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12273 Offset -= AlignToUnpack;
12276 // Otherwise emit a sequence of unpacks.
12278 unsigned UnpackLoHi = X86ISD::UNPCKL;
12279 if (Offset >= (NumElements / 2)) {
12280 UnpackLoHi = X86ISD::UNPCKH;
12281 Offset -= (NumElements / 2);
12284 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12285 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12286 : getZeroVector(InputVT, Subtarget, DAG, DL);
12287 InputV = DAG.getBitcast(InputVT, InputV);
12288 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12292 } while (Scale > 1);
12293 return DAG.getBitcast(VT, InputV);
12296 /// Try to lower a vector shuffle as a zero extension on any microarch.
12298 /// This routine will try to do everything in its power to cleverly lower
12299 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
12300 /// check for the profitability of this lowering, it tries to aggressively
12301 /// match this pattern. It will use all of the micro-architectural details it
12302 /// can to emit an efficient lowering. It handles both blends with all-zero
12303 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12304 /// masking out later).
12306 /// The reason we have dedicated lowering for zext-style shuffles is that they
12307 /// are both incredibly common and often quite performance sensitive.
12308 static SDValue lowerShuffleAsZeroOrAnyExtend(
12309 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12310 const APInt &Zeroable, const X86Subtarget &Subtarget,
12311 SelectionDAG &DAG) {
12312 int Bits = VT.getSizeInBits();
12313 int NumLanes = Bits / 128;
12314 int NumElements = VT.getVectorNumElements();
12315 int NumEltsPerLane = NumElements / NumLanes;
12316 assert(VT.getScalarSizeInBits() <= 32 &&
12317 "Exceeds 32-bit integer zero extension limit");
12318 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
12320 // Define a helper function to check a particular ext-scale and lower to it if
12322 auto Lower = [&](int Scale) -> SDValue {
12324 bool AnyExt = true;
12327 for (int i = 0; i < NumElements; ++i) {
12330 continue; // Valid anywhere but doesn't tell us anything.
12331 if (i % Scale != 0) {
12332 // Each of the extended elements need to be zeroable.
12336 // We no longer are in the anyext case.
12341 // Each of the base elements needs to be consecutive indices into the
12342 // same input vector.
12343 SDValue V = M < NumElements ? V1 : V2;
12344 M = M % NumElements;
12347 Offset = M - (i / Scale);
12348 } else if (InputV != V)
12349 return SDValue(); // Flip-flopping inputs.
12351 // Offset must start in the lowest 128-bit lane or at the start of an
12353 // FIXME: Is it ever worth allowing a negative base offset?
12354 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12355 (Offset % NumEltsPerLane) == 0))
12358 // If we are offsetting, all referenced entries must come from the same
12360 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12363 if ((M % NumElements) != (Offset + (i / Scale)))
12364 return SDValue(); // Non-consecutive strided elements.
12368 // If we fail to find an input, we have a zero-shuffle which should always
12369 // have already been handled.
12370 // FIXME: Maybe handle this here in case during blending we end up with one?
12374 // If we are offsetting, don't extend if we only match a single input, we
12375 // can always do better by using a basic PSHUF or PUNPCK.
12376 if (Offset != 0 && Matches < 2)
12379 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12380 InputV, Mask, Subtarget, DAG);
12383 // The widest scale possible for extending is to a 64-bit integer.
12384 assert(Bits % 64 == 0 &&
12385 "The number of bits in a vector must be divisible by 64 on x86!");
12386 int NumExtElements = Bits / 64;
12388 // Each iteration, try extending the elements half as much, but into twice as
12390 for (; NumExtElements < NumElements; NumExtElements *= 2) {
12391 assert(NumElements % NumExtElements == 0 &&
12392 "The input vector size must be divisible by the extended size.");
12393 if (SDValue V = Lower(NumElements / NumExtElements))
12397 // General extends failed, but 128-bit vectors may be able to use MOVQ.
12401 // Returns one of the source operands if the shuffle can be reduced to a
12402 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12403 auto CanZExtLowHalf = [&]() {
12404 for (int i = NumElements / 2; i != NumElements; ++i)
12407 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12409 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12414 if (SDValue V = CanZExtLowHalf()) {
12415 V = DAG.getBitcast(MVT::v2i64, V);
12416 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12417 return DAG.getBitcast(VT, V);
12420 // No viable ext lowering found.
12424 /// Try to get a scalar value for a specific element of a vector.
12426 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12427 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12428 SelectionDAG &DAG) {
12429 MVT VT = V.getSimpleValueType();
12430 MVT EltVT = VT.getVectorElementType();
12431 V = peekThroughBitcasts(V);
12433 // If the bitcasts shift the element size, we can't extract an equivalent
12434 // element from it.
12435 MVT NewVT = V.getSimpleValueType();
12436 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12439 if (V.getOpcode() == ISD::BUILD_VECTOR ||
12440 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12441 // Ensure the scalar operand is the same size as the destination.
12442 // FIXME: Add support for scalar truncation where possible.
12443 SDValue S = V.getOperand(Idx);
12444 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12445 return DAG.getBitcast(EltVT, S);
12451 /// Helper to test for a load that can be folded with x86 shuffles.
12453 /// This is particularly important because the set of instructions varies
12454 /// significantly based on whether the operand is a load or not.
12455 static bool isShuffleFoldableLoad(SDValue V) {
12456 V = peekThroughBitcasts(V);
12457 return ISD::isNON_EXTLoad(V.getNode());
12460 /// Try to lower insertion of a single element into a zero vector.
12462 /// This is a common pattern that we have especially efficient patterns to lower
12463 /// across all subtarget feature sets.
12464 static SDValue lowerShuffleAsElementInsertion(
12465 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12466 const APInt &Zeroable, const X86Subtarget &Subtarget,
12467 SelectionDAG &DAG) {
12469 MVT EltVT = VT.getVectorElementType();
12472 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12474 bool IsV1Zeroable = true;
12475 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12476 if (i != V2Index && !Zeroable[i]) {
12477 IsV1Zeroable = false;
12481 // Check for a single input from a SCALAR_TO_VECTOR node.
12482 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12483 // all the smarts here sunk into that routine. However, the current
12484 // lowering of BUILD_VECTOR makes that nearly impossible until the old
12485 // vector shuffle lowering is dead.
12486 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12488 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12489 // We need to zext the scalar if it is smaller than an i32.
12490 V2S = DAG.getBitcast(EltVT, V2S);
12491 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
12492 // Using zext to expand a narrow element won't work for non-zero
12497 // Zero-extend directly to i32.
12498 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12499 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12501 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12502 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12503 EltVT == MVT::i16) {
12504 // Either not inserting from the low element of the input or the input
12505 // element size is too small to use VZEXT_MOVL to clear the high bits.
12509 if (!IsV1Zeroable) {
12510 // If V1 can't be treated as a zero vector we have fewer options to lower
12511 // this. We can't support integer vectors or non-zero targets cheaply, and
12512 // the V1 elements can't be permuted in any way.
12513 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12514 if (!VT.isFloatingPoint() || V2Index != 0)
12516 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12517 V1Mask[V2Index] = -1;
12518 if (!isNoopShuffleMask(V1Mask))
12520 if (!VT.is128BitVector())
12523 // Otherwise, use MOVSD or MOVSS.
12524 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
12525 "Only two types of floating point element types to handle!");
12526 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12530 // This lowering only works for the low element with floating point vectors.
12531 if (VT.isFloatingPoint() && V2Index != 0)
12534 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12536 V2 = DAG.getBitcast(VT, V2);
12538 if (V2Index != 0) {
12539 // If we have 4 or fewer lanes we can cheaply shuffle the element into
12540 // the desired position. Otherwise it is more efficient to do a vector
12541 // shift left. We know that we can do a vector shift left because all
12542 // the inputs are zero.
12543 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12544 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12545 V2Shuffle[V2Index] = 0;
12546 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12548 V2 = DAG.getBitcast(MVT::v16i8, V2);
12549 V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12550 DAG.getTargetConstant(
12551 V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12552 V2 = DAG.getBitcast(VT, V2);
12558 /// Try to lower broadcast of a single - truncated - integer element,
12559 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12561 /// This assumes we have AVX2.
12562 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12564 const X86Subtarget &Subtarget,
12565 SelectionDAG &DAG) {
12566 assert(Subtarget.hasAVX2() &&
12567 "We can only lower integer broadcasts with AVX2!");
12569 EVT EltVT = VT.getVectorElementType();
12570 EVT V0VT = V0.getValueType();
12572 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12573 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12575 EVT V0EltVT = V0VT.getVectorElementType();
12576 if (!V0EltVT.isInteger())
12579 const unsigned EltSize = EltVT.getSizeInBits();
12580 const unsigned V0EltSize = V0EltVT.getSizeInBits();
12582 // This is only a truncation if the original element type is larger.
12583 if (V0EltSize <= EltSize)
12586 assert(((V0EltSize % EltSize) == 0) &&
12587 "Scalar type sizes must all be powers of 2 on x86!");
12589 const unsigned V0Opc = V0.getOpcode();
12590 const unsigned Scale = V0EltSize / EltSize;
12591 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12593 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12594 V0Opc != ISD::BUILD_VECTOR)
12597 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12599 // If we're extracting non-least-significant bits, shift so we can truncate.
12600 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12601 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12602 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12603 if (const int OffsetIdx = BroadcastIdx % Scale)
12604 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12605 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12607 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12608 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12611 /// Test whether this can be lowered with a single SHUFPS instruction.
12613 /// This is used to disable more specialized lowerings when the shufps lowering
12614 /// will happen to be efficient.
12615 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12616 // This routine only handles 128-bit shufps.
12617 assert(Mask.size() == 4 && "Unsupported mask size!");
12618 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12619 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12620 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12621 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12623 // To lower with a single SHUFPS we need to have the low half and high half
12624 // each requiring a single input.
12625 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12627 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12633 /// If we are extracting two 128-bit halves of a vector and shuffling the
12634 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12635 /// multi-shuffle lowering.
12636 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12637 SDValue N1, ArrayRef<int> Mask,
12638 SelectionDAG &DAG) {
12639 EVT VT = N0.getValueType();
12640 assert((VT.is128BitVector() &&
12641 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12642 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12644 // Check that both sources are extracts of the same source vector.
12645 if (!N0.hasOneUse() || !N1.hasOneUse() ||
12646 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12647 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12648 N0.getOperand(0) != N1.getOperand(0))
12651 SDValue WideVec = N0.getOperand(0);
12652 EVT WideVT = WideVec.getValueType();
12653 if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12654 !isa<ConstantSDNode>(N1.getOperand(1)))
12657 // Match extracts of each half of the wide source vector. Commute the shuffle
12658 // if the extract of the low half is N1.
12659 unsigned NumElts = VT.getVectorNumElements();
12660 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12661 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12662 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12663 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12664 ShuffleVectorSDNode::commuteMask(NewMask);
12665 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12668 // Final bailout: if the mask is simple, we are better off using an extract
12669 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12670 // because that avoids a constant load from memory.
12671 if (NumElts == 4 &&
12672 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12675 // Extend the shuffle mask with undef elements.
12676 NewMask.append(NumElts, -1);
12678 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12679 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12681 // This is free: ymm -> xmm.
12682 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12683 DAG.getIntPtrConstant(0, DL));
12686 /// Try to lower broadcast of a single element.
12688 /// For convenience, this code also bundles all of the subtarget feature set
12689 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12690 /// a convenient way to factor it out.
12691 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12692 SDValue V2, ArrayRef<int> Mask,
12693 const X86Subtarget &Subtarget,
12694 SelectionDAG &DAG) {
12695 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12696 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12697 (Subtarget.hasAVX2() && VT.isInteger())))
12700 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12701 // we can only broadcast from a register with AVX2.
12702 unsigned NumElts = Mask.size();
12703 unsigned NumEltBits = VT.getScalarSizeInBits();
12704 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12706 : X86ISD::VBROADCAST;
12707 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12709 // Check that the mask is a broadcast.
12710 int BroadcastIdx = -1;
12711 for (int i = 0; i != (int)NumElts; ++i) {
12712 SmallVector<int, 8> BroadcastMask(NumElts, i);
12713 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12719 if (BroadcastIdx < 0)
12721 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12722 "a sorted mask where the broadcast "
12725 // Go up the chain of (vector) values to find a scalar load that we can
12726 // combine with the broadcast.
12727 int BitOffset = BroadcastIdx * NumEltBits;
12730 switch (V.getOpcode()) {
12731 case ISD::BITCAST: {
12732 V = V.getOperand(0);
12735 case ISD::CONCAT_VECTORS: {
12736 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12737 int OpIdx = BitOffset / OpBitWidth;
12738 V = V.getOperand(OpIdx);
12739 BitOffset %= OpBitWidth;
12742 case ISD::INSERT_SUBVECTOR: {
12743 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12744 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12748 int EltBitWidth = VOuter.getScalarValueSizeInBits();
12749 int Idx = (int)ConstantIdx->getZExtValue();
12750 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12751 int BeginOffset = Idx * EltBitWidth;
12752 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12753 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12754 BitOffset -= BeginOffset;
12764 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12765 BroadcastIdx = BitOffset / NumEltBits;
12767 // Do we need to bitcast the source to retrieve the original broadcast index?
12768 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12770 // Check if this is a broadcast of a scalar. We special case lowering
12771 // for scalars so that we can more effectively fold with loads.
12772 // If the original value has a larger element type than the shuffle, the
12773 // broadcast element is in essence truncated. Make that explicit to ease
12775 if (BitCastSrc && VT.isInteger())
12776 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12777 DL, VT, V, BroadcastIdx, Subtarget, DAG))
12778 return TruncBroadcast;
12780 MVT BroadcastVT = VT;
12782 // Also check the simpler case, where we can directly reuse the scalar.
12784 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12785 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12786 V = V.getOperand(BroadcastIdx);
12788 // If we can't broadcast from a register, check that the input is a load.
12789 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12791 } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) {
12792 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12793 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12794 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12795 Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12800 // If we are broadcasting a load that is only used by the shuffle
12801 // then we can reduce the vector load to the broadcasted scalar load.
12802 LoadSDNode *Ld = cast<LoadSDNode>(V);
12803 SDValue BaseAddr = Ld->getOperand(1);
12804 EVT SVT = BroadcastVT.getScalarType();
12805 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12806 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12807 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12808 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12809 DAG.getMachineFunction().getMachineMemOperand(
12810 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12811 DAG.makeEquivalentMemoryOrdering(Ld, V);
12812 } else if (!BroadcastFromReg) {
12813 // We can't broadcast from a vector register.
12815 } else if (BitOffset != 0) {
12816 // We can only broadcast from the zero-element of a vector register,
12817 // but it can be advantageous to broadcast from the zero-element of a
12819 if (!VT.is256BitVector() && !VT.is512BitVector())
12822 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12823 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12826 // Only broadcast the zero-element of a 128-bit subvector.
12827 if ((BitOffset % 128) != 0)
12830 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12831 "Unexpected bit-offset");
12832 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12833 "Unexpected vector size");
12834 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12835 V = extract128BitVector(V, ExtractIdx, DAG, DL);
12838 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12839 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12840 DAG.getBitcast(MVT::f64, V));
12842 // Bitcast back to the same scalar type as BroadcastVT.
12843 if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12844 assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
12845 "Unexpected vector element size");
12847 if (V.getValueType().isVector()) {
12848 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12849 ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12851 ExtVT = BroadcastVT.getScalarType();
12853 V = DAG.getBitcast(ExtVT, V);
12856 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12857 if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12858 V = DAG.getBitcast(MVT::f64, V);
12859 unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12860 BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12863 // We only support broadcasting from 128-bit vectors to minimize the
12864 // number of patterns we need to deal with in isel. So extract down to
12865 // 128-bits, removing as many bitcasts as possible.
12866 if (V.getValueSizeInBits() > 128) {
12867 MVT ExtVT = V.getSimpleValueType().getScalarType();
12868 ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12869 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12870 V = DAG.getBitcast(ExtVT, V);
12873 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12876 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12877 // INSERTPS when the V1 elements are already in the correct locations
12878 // because otherwise we can just always use two SHUFPS instructions which
12879 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12880 // perform INSERTPS if a single V1 element is out of place and all V2
12881 // elements are zeroable.
12882 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12883 unsigned &InsertPSMask,
12884 const APInt &Zeroable,
12885 ArrayRef<int> Mask, SelectionDAG &DAG) {
12886 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12887 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12888 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12890 // Attempt to match INSERTPS with one element from VA or VB being
12891 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12893 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12894 ArrayRef<int> CandidateMask) {
12895 unsigned ZMask = 0;
12896 int VADstIndex = -1;
12897 int VBDstIndex = -1;
12898 bool VAUsedInPlace = false;
12900 for (int i = 0; i < 4; ++i) {
12901 // Synthesize a zero mask from the zeroable elements (includes undefs).
12907 // Flag if we use any VA inputs in place.
12908 if (i == CandidateMask[i]) {
12909 VAUsedInPlace = true;
12913 // We can only insert a single non-zeroable element.
12914 if (VADstIndex >= 0 || VBDstIndex >= 0)
12917 if (CandidateMask[i] < 4) {
12918 // VA input out of place for insertion.
12921 // VB input for insertion.
12926 // Don't bother if we have no (non-zeroable) element for insertion.
12927 if (VADstIndex < 0 && VBDstIndex < 0)
12930 // Determine element insertion src/dst indices. The src index is from the
12931 // start of the inserted vector, not the start of the concatenated vector.
12932 unsigned VBSrcIndex = 0;
12933 if (VADstIndex >= 0) {
12934 // If we have a VA input out of place, we use VA as the V2 element
12935 // insertion and don't use the original V2 at all.
12936 VBSrcIndex = CandidateMask[VADstIndex];
12937 VBDstIndex = VADstIndex;
12940 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12943 // If no V1 inputs are used in place, then the result is created only from
12944 // the zero mask and the V2 insertion - so remove V1 dependency.
12945 if (!VAUsedInPlace)
12946 VA = DAG.getUNDEF(MVT::v4f32);
12948 // Update V1, V2 and InsertPSMask accordingly.
12952 // Insert the V2 element into the desired position.
12953 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12954 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12958 if (matchAsInsertPS(V1, V2, Mask))
12961 // Commute and try again.
12962 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12963 ShuffleVectorSDNode::commuteMask(CommutedMask);
12964 if (matchAsInsertPS(V2, V1, CommutedMask))
12970 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12971 ArrayRef<int> Mask, const APInt &Zeroable,
12972 SelectionDAG &DAG) {
12973 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12974 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12976 // Attempt to match the insertps pattern.
12977 unsigned InsertPSMask;
12978 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12981 // Insert the V2 element into the desired position.
12982 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12983 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12986 /// Try to lower a shuffle as a permute of the inputs followed by an
12987 /// UNPCK instruction.
12989 /// This specifically targets cases where we end up with alternating between
12990 /// the two inputs, and so can permute them into something that feeds a single
12991 /// UNPCK instruction. Note that this routine only targets integer vectors
12992 /// because for floating point vectors we have a generalized SHUFPS lowering
12993 /// strategy that handles everything that doesn't *exactly* match an unpack,
12994 /// making this clever lowering unnecessary.
12995 static SDValue lowerShuffleAsPermuteAndUnpack(
12996 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12997 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12998 assert(!VT.isFloatingPoint() &&
12999 "This routine only supports integer vectors.");
13000 assert(VT.is128BitVector() &&
13001 "This routine only works on 128-bit vectors.");
13002 assert(!V2.isUndef() &&
13003 "This routine should only be used when blending two inputs.");
13004 assert(Mask.size() >= 2 && "Single element masks are invalid.");
13006 int Size = Mask.size();
13009 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13011 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13013 bool UnpackLo = NumLoInputs >= NumHiInputs;
13015 auto TryUnpack = [&](int ScalarSize, int Scale) {
13016 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13017 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13019 for (int i = 0; i < Size; ++i) {
13023 // Each element of the unpack contains Scale elements from this mask.
13024 int UnpackIdx = i / Scale;
13026 // We only handle the case where V1 feeds the first slots of the unpack.
13027 // We rely on canonicalization to ensure this is the case.
13028 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13031 // Setup the mask for this input. The indexing is tricky as we have to
13032 // handle the unpack stride.
13033 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13034 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13038 // If we will have to shuffle both inputs to use the unpack, check whether
13039 // we can just unpack first and shuffle the result. If so, skip this unpack.
13040 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13041 !isNoopShuffleMask(V2Mask))
13044 // Shuffle the inputs into place.
13045 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13046 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13048 // Cast the inputs to the type we will use to unpack them.
13049 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13050 V1 = DAG.getBitcast(UnpackVT, V1);
13051 V2 = DAG.getBitcast(UnpackVT, V2);
13053 // Unpack the inputs and cast the result back to the desired type.
13054 return DAG.getBitcast(
13055 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13056 UnpackVT, V1, V2));
13059 // We try each unpack from the largest to the smallest to try and find one
13060 // that fits this mask.
13061 int OrigScalarSize = VT.getScalarSizeInBits();
13062 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13063 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13066 // If we're shuffling with a zero vector then we're better off not doing
13067 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13068 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13069 ISD::isBuildVectorAllZeros(V2.getNode()))
13072 // If none of the unpack-rooted lowerings worked (or were profitable) try an
13074 if (NumLoInputs == 0 || NumHiInputs == 0) {
13075 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13076 "We have to have *some* inputs!");
13077 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13079 // FIXME: We could consider the total complexity of the permute of each
13080 // possible unpacking. Or at the least we should consider how many
13081 // half-crossings are created.
13082 // FIXME: We could consider commuting the unpacks.
13084 SmallVector<int, 32> PermMask((unsigned)Size, -1);
13085 for (int i = 0; i < Size; ++i) {
13089 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13092 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13094 return DAG.getVectorShuffle(
13095 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
13097 DAG.getUNDEF(VT), PermMask);
13103 /// Handle lowering of 2-lane 64-bit floating point shuffles.
13105 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
13106 /// support for floating point shuffles but not integer shuffles. These
13107 /// instructions will incur a domain crossing penalty on some chips though so
13108 /// it is better to avoid lowering through this for integer vectors where
13110 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13111 const APInt &Zeroable, SDValue V1, SDValue V2,
13112 const X86Subtarget &Subtarget,
13113 SelectionDAG &DAG) {
13114 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13115 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13116 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13118 if (V2.isUndef()) {
13119 // Check for being able to broadcast a single element.
13120 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
13121 Mask, Subtarget, DAG))
13124 // Straight shuffle of a single input vector. Simulate this by using the
13125 // single input as both of the "inputs" to this instruction..
13126 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
13128 if (Subtarget.hasAVX()) {
13129 // If we have AVX, we can use VPERMILPS which will allow folding a load
13130 // into the shuffle.
13131 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
13132 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13135 return DAG.getNode(
13136 X86ISD::SHUFP, DL, MVT::v2f64,
13137 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13138 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13139 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13141 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13142 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13143 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13144 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13146 if (Subtarget.hasAVX2())
13147 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13150 // When loading a scalar and then shuffling it into a vector we can often do
13151 // the insertion cheaply.
13152 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13153 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13155 // Try inverting the insertion since for v2 masks it is easy to do and we
13156 // can't reliably sort the mask one way or the other.
13157 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
13158 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
13159 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13160 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13163 // Try to use one of the special instruction patterns to handle two common
13164 // blend patterns if a zero-blend above didn't work.
13165 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
13166 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
13167 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
13168 // We can either use a special instruction to load over the low double or
13169 // to move just the low double.
13170 return DAG.getNode(
13171 X86ISD::MOVSD, DL, MVT::v2f64, V2,
13172 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
13174 if (Subtarget.hasSSE41())
13175 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
13176 Zeroable, Subtarget, DAG))
13179 // Use dedicated unpack instructions for masks that match their pattern.
13180 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
13183 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
13184 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
13185 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13188 /// Handle lowering of 2-lane 64-bit integer shuffles.
13190 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
13191 /// the integer unit to minimize domain crossing penalties. However, for blends
13192 /// it falls back to the floating point shuffle operation with appropriate bit
13194 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13195 const APInt &Zeroable, SDValue V1, SDValue V2,
13196 const X86Subtarget &Subtarget,
13197 SelectionDAG &DAG) {
13198 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13199 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13200 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13202 if (V2.isUndef()) {
13203 // Check for being able to broadcast a single element.
13204 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
13205 Mask, Subtarget, DAG))
13208 // Straight shuffle of a single input vector. For everything from SSE2
13209 // onward this has a single fast instruction with no scary immediates.
13210 // We have to map the mask as it is actually a v4i32 shuffle instruction.
13211 V1 = DAG.getBitcast(MVT::v4i32, V1);
13212 int WidenedMask[4] = {
13213 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
13214 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
13215 return DAG.getBitcast(
13217 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13218 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
13220 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
13221 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
13222 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13223 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13225 if (Subtarget.hasAVX2())
13226 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13229 // Try to use shift instructions.
13230 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13231 Zeroable, Subtarget, DAG))
13234 // When loading a scalar and then shuffling it into a vector we can often do
13235 // the insertion cheaply.
13236 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13237 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13239 // Try inverting the insertion since for v2 masks it is easy to do and we
13240 // can't reliably sort the mask one way or the other.
13241 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13242 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13243 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13246 // We have different paths for blend lowering, but they all must use the
13247 // *exact* same predicate.
13248 bool IsBlendSupported = Subtarget.hasSSE41();
13249 if (IsBlendSupported)
13250 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13251 Zeroable, Subtarget, DAG))
13254 // Use dedicated unpack instructions for masks that match their pattern.
13255 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13258 // Try to use byte rotation instructions.
13259 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13260 if (Subtarget.hasSSSE3()) {
13261 if (Subtarget.hasVLX())
13262 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
13266 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13271 // If we have direct support for blends, we should lower by decomposing into
13272 // a permute. That will be faster than the domain cross.
13273 if (IsBlendSupported)
13274 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13277 // We implement this with SHUFPD which is pretty lame because it will likely
13278 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13279 // However, all the alternatives are still more cycles and newer chips don't
13280 // have this problem. It would be really nice if x86 had better shuffles here.
13281 V1 = DAG.getBitcast(MVT::v2f64, V1);
13282 V2 = DAG.getBitcast(MVT::v2f64, V2);
13283 return DAG.getBitcast(MVT::v2i64,
13284 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13287 /// Lower a vector shuffle using the SHUFPS instruction.
13289 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13290 /// It makes no assumptions about whether this is the *best* lowering, it simply
13292 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13293 ArrayRef<int> Mask, SDValue V1,
13294 SDValue V2, SelectionDAG &DAG) {
13295 SDValue LowV = V1, HighV = V2;
13296 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
13298 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13300 if (NumV2Elements == 1) {
13301 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13303 // Compute the index adjacent to V2Index and in the same half by toggling
13305 int V2AdjIndex = V2Index ^ 1;
13307 if (Mask[V2AdjIndex] < 0) {
13308 // Handles all the cases where we have a single V2 element and an undef.
13309 // This will only ever happen in the high lanes because we commute the
13310 // vector otherwise.
13312 std::swap(LowV, HighV);
13313 NewMask[V2Index] -= 4;
13315 // Handle the case where the V2 element ends up adjacent to a V1 element.
13316 // To make this work, blend them together as the first step.
13317 int V1Index = V2AdjIndex;
13318 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13319 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13320 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13322 // Now proceed to reconstruct the final blend as we have the necessary
13323 // high or low half formed.
13330 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13331 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13333 } else if (NumV2Elements == 2) {
13334 if (Mask[0] < 4 && Mask[1] < 4) {
13335 // Handle the easy case where we have V1 in the low lanes and V2 in the
13339 } else if (Mask[2] < 4 && Mask[3] < 4) {
13340 // We also handle the reversed case because this utility may get called
13341 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13342 // arrange things in the right direction.
13348 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13349 // trying to place elements directly, just blend them and set up the final
13350 // shuffle to place them.
13352 // The first two blend mask elements are for V1, the second two are for
13354 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13355 Mask[2] < 4 ? Mask[2] : Mask[3],
13356 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13357 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13358 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13359 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13361 // Now we do a normal shuffle of V1 by giving V1 as both operands to
13364 NewMask[0] = Mask[0] < 4 ? 0 : 2;
13365 NewMask[1] = Mask[0] < 4 ? 2 : 0;
13366 NewMask[2] = Mask[2] < 4 ? 1 : 3;
13367 NewMask[3] = Mask[2] < 4 ? 3 : 1;
13370 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13371 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13374 /// Lower 4-lane 32-bit floating point shuffles.
13376 /// Uses instructions exclusively from the floating point unit to minimize
13377 /// domain crossing penalties, as these are sufficient to implement all v4f32
13379 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13380 const APInt &Zeroable, SDValue V1, SDValue V2,
13381 const X86Subtarget &Subtarget,
13382 SelectionDAG &DAG) {
13383 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13384 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13385 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13387 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13389 if (NumV2Elements == 0) {
13390 // Check for being able to broadcast a single element.
13391 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13392 Mask, Subtarget, DAG))
13395 // Use even/odd duplicate instructions for masks that match their pattern.
13396 if (Subtarget.hasSSE3()) {
13397 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13398 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13399 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13400 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13403 if (Subtarget.hasAVX()) {
13404 // If we have AVX, we can use VPERMILPS which will allow folding a load
13405 // into the shuffle.
13406 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13407 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13410 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13411 // in SSE1 because otherwise they are widened to v2f64 and never get here.
13412 if (!Subtarget.hasSSE2()) {
13413 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13414 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13415 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13416 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13419 // Otherwise, use a straight shuffle of a single input vector. We pass the
13420 // input vector to both operands to simulate this with a SHUFPS.
13421 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13422 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13425 if (Subtarget.hasAVX2())
13426 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13429 // There are special ways we can lower some single-element blends. However, we
13430 // have custom ways we can lower more complex single-element blends below that
13431 // we defer to if both this and BLENDPS fail to match, so restrict this to
13432 // when the V2 input is targeting element 0 of the mask -- that is the fast
13434 if (NumV2Elements == 1 && Mask[0] >= 4)
13435 if (SDValue V = lowerShuffleAsElementInsertion(
13436 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13439 if (Subtarget.hasSSE41()) {
13440 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13441 Zeroable, Subtarget, DAG))
13444 // Use INSERTPS if we can complete the shuffle efficiently.
13445 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13448 if (!isSingleSHUFPSMask(Mask))
13449 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13454 // Use low/high mov instructions. These are only valid in SSE1 because
13455 // otherwise they are widened to v2f64 and never get here.
13456 if (!Subtarget.hasSSE2()) {
13457 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13458 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13459 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13460 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13463 // Use dedicated unpack instructions for masks that match their pattern.
13464 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13467 // Otherwise fall back to a SHUFPS lowering strategy.
13468 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13471 /// Lower 4-lane i32 vector shuffles.
13473 /// We try to handle these with integer-domain shuffles where we can, but for
13474 /// blends we use the floating point domain blend instructions.
13475 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13476 const APInt &Zeroable, SDValue V1, SDValue V2,
13477 const X86Subtarget &Subtarget,
13478 SelectionDAG &DAG) {
13479 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13480 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13481 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13483 // Whenever we can lower this as a zext, that instruction is strictly faster
13484 // than any alternative. It also allows us to fold memory operands into the
13485 // shuffle in many cases.
13486 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13487 Zeroable, Subtarget, DAG))
13490 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13492 if (NumV2Elements == 0) {
13493 // Try to use broadcast unless the mask only has one non-undef element.
13494 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13495 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13496 Mask, Subtarget, DAG))
13500 // Straight shuffle of a single input vector. For everything from SSE2
13501 // onward this has a single fast instruction with no scary immediates.
13502 // We coerce the shuffle pattern to be compatible with UNPCK instructions
13503 // but we aren't actually going to use the UNPCK instruction because doing
13504 // so prevents folding a load into this instruction or making a copy.
13505 const int UnpackLoMask[] = {0, 0, 1, 1};
13506 const int UnpackHiMask[] = {2, 2, 3, 3};
13507 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13508 Mask = UnpackLoMask;
13509 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13510 Mask = UnpackHiMask;
13512 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13513 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13516 if (Subtarget.hasAVX2())
13517 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13520 // Try to use shift instructions.
13521 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13522 Zeroable, Subtarget, DAG))
13525 // There are special ways we can lower some single-element blends.
13526 if (NumV2Elements == 1)
13527 if (SDValue V = lowerShuffleAsElementInsertion(
13528 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13531 // We have different paths for blend lowering, but they all must use the
13532 // *exact* same predicate.
13533 bool IsBlendSupported = Subtarget.hasSSE41();
13534 if (IsBlendSupported)
13535 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13536 Zeroable, Subtarget, DAG))
13539 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13540 Zeroable, Subtarget, DAG))
13543 // Use dedicated unpack instructions for masks that match their pattern.
13544 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13547 // Try to use byte rotation instructions.
13548 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13549 if (Subtarget.hasSSSE3()) {
13550 if (Subtarget.hasVLX())
13551 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13555 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13560 // Assume that a single SHUFPS is faster than an alternative sequence of
13561 // multiple instructions (even if the CPU has a domain penalty).
13562 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13563 if (!isSingleSHUFPSMask(Mask)) {
13564 // If we have direct support for blends, we should lower by decomposing into
13565 // a permute. That will be faster than the domain cross.
13566 if (IsBlendSupported)
13567 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13570 // Try to lower by permuting the inputs into an unpack instruction.
13571 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13572 Mask, Subtarget, DAG))
13576 // We implement this with SHUFPS because it can blend from two vectors.
13577 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13578 // up the inputs, bypassing domain shift penalties that we would incur if we
13579 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13581 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13582 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13583 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13584 return DAG.getBitcast(MVT::v4i32, ShufPS);
13587 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13588 /// shuffle lowering, and the most complex part.
13590 /// The lowering strategy is to try to form pairs of input lanes which are
13591 /// targeted at the same half of the final vector, and then use a dword shuffle
13592 /// to place them onto the right half, and finally unpack the paired lanes into
13593 /// their final position.
13595 /// The exact breakdown of how to form these dword pairs and align them on the
13596 /// correct sides is really tricky. See the comments within the function for
13597 /// more of the details.
13599 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13600 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13601 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13602 /// vector, form the analogous 128-bit 8-element Mask.
13603 static SDValue lowerV8I16GeneralSingleInputShuffle(
13604 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13605 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13606 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13607 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13609 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13610 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13611 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13613 // Attempt to directly match PSHUFLW or PSHUFHW.
13614 if (isUndefOrInRange(LoMask, 0, 4) &&
13615 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13616 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13617 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13619 if (isUndefOrInRange(HiMask, 4, 8) &&
13620 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13621 for (int i = 0; i != 4; ++i)
13622 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13623 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13624 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13627 SmallVector<int, 4> LoInputs;
13628 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13629 array_pod_sort(LoInputs.begin(), LoInputs.end());
13630 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13631 SmallVector<int, 4> HiInputs;
13632 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13633 array_pod_sort(HiInputs.begin(), HiInputs.end());
13634 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13635 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13636 int NumHToL = LoInputs.size() - NumLToL;
13637 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13638 int NumHToH = HiInputs.size() - NumLToH;
13639 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13640 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13641 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13642 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13644 // If we are shuffling values from one half - check how many different DWORD
13645 // pairs we need to create. If only 1 or 2 then we can perform this as a
13646 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13647 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13648 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13649 V = DAG.getNode(ShufWOp, DL, VT, V,
13650 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13651 V = DAG.getBitcast(PSHUFDVT, V);
13652 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13653 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13654 return DAG.getBitcast(VT, V);
13657 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13658 int PSHUFDMask[4] = { -1, -1, -1, -1 };
13659 SmallVector<std::pair<int, int>, 4> DWordPairs;
13660 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13662 // Collect the different DWORD pairs.
13663 for (int DWord = 0; DWord != 4; ++DWord) {
13664 int M0 = Mask[2 * DWord + 0];
13665 int M1 = Mask[2 * DWord + 1];
13666 M0 = (M0 >= 0 ? M0 % 4 : M0);
13667 M1 = (M1 >= 0 ? M1 % 4 : M1);
13668 if (M0 < 0 && M1 < 0)
13671 bool Match = false;
13672 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13673 auto &DWordPair = DWordPairs[j];
13674 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13675 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13676 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13677 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13678 PSHUFDMask[DWord] = DOffset + j;
13684 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13685 DWordPairs.push_back(std::make_pair(M0, M1));
13689 if (DWordPairs.size() <= 2) {
13690 DWordPairs.resize(2, std::make_pair(-1, -1));
13691 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13692 DWordPairs[1].first, DWordPairs[1].second};
13693 if ((NumHToL + NumHToH) == 0)
13694 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13695 if ((NumLToL + NumLToH) == 0)
13696 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13700 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13701 // such inputs we can swap two of the dwords across the half mark and end up
13702 // with <=2 inputs to each half in each half. Once there, we can fall through
13703 // to the generic code below. For example:
13705 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13706 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13708 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13709 // and an existing 2-into-2 on the other half. In this case we may have to
13710 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13711 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13712 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13713 // because any other situation (including a 3-into-1 or 1-into-3 in the other
13714 // half than the one we target for fixing) will be fixed when we re-enter this
13715 // path. We will also combine away any sequence of PSHUFD instructions that
13716 // result into a single instruction. Here is an example of the tricky case:
13718 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13719 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13721 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13723 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13724 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13726 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13727 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13729 // The result is fine to be handled by the generic logic.
13730 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13731 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13732 int AOffset, int BOffset) {
13733 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13734 "Must call this with A having 3 or 1 inputs from the A half.");
13735 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13736 "Must call this with B having 1 or 3 inputs from the B half.");
13737 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13738 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13740 bool ThreeAInputs = AToAInputs.size() == 3;
13742 // Compute the index of dword with only one word among the three inputs in
13743 // a half by taking the sum of the half with three inputs and subtracting
13744 // the sum of the actual three inputs. The difference is the remaining
13746 int ADWord = 0, BDWord = 0;
13747 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13748 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13749 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13750 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13751 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13752 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13753 int TripleNonInputIdx =
13754 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13755 TripleDWord = TripleNonInputIdx / 2;
13757 // We use xor with one to compute the adjacent DWord to whichever one the
13759 OneInputDWord = (OneInput / 2) ^ 1;
13761 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13762 // and BToA inputs. If there is also such a problem with the BToB and AToB
13763 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13764 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13765 // is essential that we don't *create* a 3<-1 as then we might oscillate.
13766 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13767 // Compute how many inputs will be flipped by swapping these DWords. We
13769 // to balance this to ensure we don't form a 3-1 shuffle in the other
13771 int NumFlippedAToBInputs =
13772 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13773 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13774 int NumFlippedBToBInputs =
13775 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13776 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13777 if ((NumFlippedAToBInputs == 1 &&
13778 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13779 (NumFlippedBToBInputs == 1 &&
13780 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13781 // We choose whether to fix the A half or B half based on whether that
13782 // half has zero flipped inputs. At zero, we may not be able to fix it
13783 // with that half. We also bias towards fixing the B half because that
13784 // will more commonly be the high half, and we have to bias one way.
13785 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13786 ArrayRef<int> Inputs) {
13787 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13788 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13789 // Determine whether the free index is in the flipped dword or the
13790 // unflipped dword based on where the pinned index is. We use this bit
13791 // in an xor to conditionally select the adjacent dword.
13792 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13793 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13794 if (IsFixIdxInput == IsFixFreeIdxInput)
13796 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13797 assert(IsFixIdxInput != IsFixFreeIdxInput &&
13798 "We need to be changing the number of flipped inputs!");
13799 int PSHUFHalfMask[] = {0, 1, 2, 3};
13800 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13802 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13803 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13804 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13806 for (int &M : Mask)
13807 if (M >= 0 && M == FixIdx)
13809 else if (M >= 0 && M == FixFreeIdx)
13812 if (NumFlippedBToBInputs != 0) {
13814 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13815 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13817 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13818 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13819 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13824 int PSHUFDMask[] = {0, 1, 2, 3};
13825 PSHUFDMask[ADWord] = BDWord;
13826 PSHUFDMask[BDWord] = ADWord;
13827 V = DAG.getBitcast(
13829 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13830 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13832 // Adjust the mask to match the new locations of A and B.
13833 for (int &M : Mask)
13834 if (M >= 0 && M/2 == ADWord)
13835 M = 2 * BDWord + M % 2;
13836 else if (M >= 0 && M/2 == BDWord)
13837 M = 2 * ADWord + M % 2;
13839 // Recurse back into this routine to re-compute state now that this isn't
13840 // a 3 and 1 problem.
13841 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13843 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13844 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13845 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13846 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13848 // At this point there are at most two inputs to the low and high halves from
13849 // each half. That means the inputs can always be grouped into dwords and
13850 // those dwords can then be moved to the correct half with a dword shuffle.
13851 // We use at most one low and one high word shuffle to collect these paired
13852 // inputs into dwords, and finally a dword shuffle to place them.
13853 int PSHUFLMask[4] = {-1, -1, -1, -1};
13854 int PSHUFHMask[4] = {-1, -1, -1, -1};
13855 int PSHUFDMask[4] = {-1, -1, -1, -1};
13857 // First fix the masks for all the inputs that are staying in their
13858 // original halves. This will then dictate the targets of the cross-half
13860 auto fixInPlaceInputs =
13861 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13862 MutableArrayRef<int> SourceHalfMask,
13863 MutableArrayRef<int> HalfMask, int HalfOffset) {
13864 if (InPlaceInputs.empty())
13866 if (InPlaceInputs.size() == 1) {
13867 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13868 InPlaceInputs[0] - HalfOffset;
13869 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13872 if (IncomingInputs.empty()) {
13873 // Just fix all of the in place inputs.
13874 for (int Input : InPlaceInputs) {
13875 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13876 PSHUFDMask[Input / 2] = Input / 2;
13881 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13882 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13883 InPlaceInputs[0] - HalfOffset;
13884 // Put the second input next to the first so that they are packed into
13885 // a dword. We find the adjacent index by toggling the low bit.
13886 int AdjIndex = InPlaceInputs[0] ^ 1;
13887 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13888 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13889 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13891 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13892 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13894 // Now gather the cross-half inputs and place them into a free dword of
13895 // their target half.
13896 // FIXME: This operation could almost certainly be simplified dramatically to
13897 // look more like the 3-1 fixing operation.
13898 auto moveInputsToRightHalf = [&PSHUFDMask](
13899 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13900 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13901 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13903 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13904 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13906 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13908 int LowWord = Word & ~1;
13909 int HighWord = Word | 1;
13910 return isWordClobbered(SourceHalfMask, LowWord) ||
13911 isWordClobbered(SourceHalfMask, HighWord);
13914 if (IncomingInputs.empty())
13917 if (ExistingInputs.empty()) {
13918 // Map any dwords with inputs from them into the right half.
13919 for (int Input : IncomingInputs) {
13920 // If the source half mask maps over the inputs, turn those into
13921 // swaps and use the swapped lane.
13922 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13923 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13924 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13925 Input - SourceOffset;
13926 // We have to swap the uses in our half mask in one sweep.
13927 for (int &M : HalfMask)
13928 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13930 else if (M == Input)
13931 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13933 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13934 Input - SourceOffset &&
13935 "Previous placement doesn't match!");
13937 // Note that this correctly re-maps both when we do a swap and when
13938 // we observe the other side of the swap above. We rely on that to
13939 // avoid swapping the members of the input list directly.
13940 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13943 // Map the input's dword into the correct half.
13944 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13945 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13947 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13949 "Previous placement doesn't match!");
13952 // And just directly shift any other-half mask elements to be same-half
13953 // as we will have mirrored the dword containing the element into the
13954 // same position within that half.
13955 for (int &M : HalfMask)
13956 if (M >= SourceOffset && M < SourceOffset + 4) {
13957 M = M - SourceOffset + DestOffset;
13958 assert(M >= 0 && "This should never wrap below zero!");
13963 // Ensure we have the input in a viable dword of its current half. This
13964 // is particularly tricky because the original position may be clobbered
13965 // by inputs being moved and *staying* in that half.
13966 if (IncomingInputs.size() == 1) {
13967 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13968 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13970 SourceHalfMask[InputFixed - SourceOffset] =
13971 IncomingInputs[0] - SourceOffset;
13972 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13974 IncomingInputs[0] = InputFixed;
13976 } else if (IncomingInputs.size() == 2) {
13977 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13978 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13979 // We have two non-adjacent or clobbered inputs we need to extract from
13980 // the source half. To do this, we need to map them into some adjacent
13981 // dword slot in the source mask.
13982 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13983 IncomingInputs[1] - SourceOffset};
13985 // If there is a free slot in the source half mask adjacent to one of
13986 // the inputs, place the other input in it. We use (Index XOR 1) to
13987 // compute an adjacent index.
13988 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13989 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13990 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13991 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13992 InputsFixed[1] = InputsFixed[0] ^ 1;
13993 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13994 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13995 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13996 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13997 InputsFixed[0] = InputsFixed[1] ^ 1;
13998 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13999 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
14000 // The two inputs are in the same DWord but it is clobbered and the
14001 // adjacent DWord isn't used at all. Move both inputs to the free
14003 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
14004 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
14005 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
14006 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
14008 // The only way we hit this point is if there is no clobbering
14009 // (because there are no off-half inputs to this half) and there is no
14010 // free slot adjacent to one of the inputs. In this case, we have to
14011 // swap an input with a non-input.
14012 for (int i = 0; i < 4; ++i)
14013 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
14014 "We can't handle any clobbers here!");
14015 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
14016 "Cannot have adjacent inputs here!");
14018 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14019 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
14021 // We also have to update the final source mask in this case because
14022 // it may need to undo the above swap.
14023 for (int &M : FinalSourceHalfMask)
14024 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
14025 M = InputsFixed[1] + SourceOffset;
14026 else if (M == InputsFixed[1] + SourceOffset)
14027 M = (InputsFixed[0] ^ 1) + SourceOffset;
14029 InputsFixed[1] = InputsFixed[0] ^ 1;
14032 // Point everything at the fixed inputs.
14033 for (int &M : HalfMask)
14034 if (M == IncomingInputs[0])
14035 M = InputsFixed[0] + SourceOffset;
14036 else if (M == IncomingInputs[1])
14037 M = InputsFixed[1] + SourceOffset;
14039 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
14040 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
14043 llvm_unreachable("Unhandled input size!");
14046 // Now hoist the DWord down to the right half.
14047 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
14048 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
14049 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
14050 for (int &M : HalfMask)
14051 for (int Input : IncomingInputs)
14053 M = FreeDWord * 2 + Input % 2;
14055 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
14056 /*SourceOffset*/ 4, /*DestOffset*/ 0);
14057 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
14058 /*SourceOffset*/ 0, /*DestOffset*/ 4);
14060 // Now enact all the shuffles we've computed to move the inputs into their
14062 if (!isNoopShuffleMask(PSHUFLMask))
14063 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14064 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
14065 if (!isNoopShuffleMask(PSHUFHMask))
14066 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14067 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
14068 if (!isNoopShuffleMask(PSHUFDMask))
14069 V = DAG.getBitcast(
14071 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14072 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14074 // At this point, each half should contain all its inputs, and we can then
14075 // just shuffle them into their final position.
14076 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
14077 "Failed to lift all the high half inputs to the low mask!");
14078 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
14079 "Failed to lift all the low half inputs to the high mask!");
14081 // Do a half shuffle for the low mask.
14082 if (!isNoopShuffleMask(LoMask))
14083 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14084 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14086 // Do a half shuffle with the high mask after shifting its values down.
14087 for (int &M : HiMask)
14090 if (!isNoopShuffleMask(HiMask))
14091 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14092 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14097 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
14098 /// blend if only one input is used.
14099 static SDValue lowerShuffleAsBlendOfPSHUFBs(
14100 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14101 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
14102 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
14103 "Lane crossing shuffle masks not supported");
14105 int NumBytes = VT.getSizeInBits() / 8;
14106 int Size = Mask.size();
14107 int Scale = NumBytes / Size;
14109 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14110 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14114 for (int i = 0; i < NumBytes; ++i) {
14115 int M = Mask[i / Scale];
14119 const int ZeroMask = 0x80;
14120 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
14121 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
14122 if (Zeroable[i / Scale])
14123 V1Idx = V2Idx = ZeroMask;
14125 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
14126 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
14127 V1InUse |= (ZeroMask != V1Idx);
14128 V2InUse |= (ZeroMask != V2Idx);
14131 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
14133 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
14134 DAG.getBuildVector(ShufVT, DL, V1Mask));
14136 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
14137 DAG.getBuildVector(ShufVT, DL, V2Mask));
14139 // If we need shuffled inputs from both, blend the two.
14141 if (V1InUse && V2InUse)
14142 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
14144 V = V1InUse ? V1 : V2;
14146 // Cast the result back to the correct type.
14147 return DAG.getBitcast(VT, V);
14150 /// Generic lowering of 8-lane i16 shuffles.
14152 /// This handles both single-input shuffles and combined shuffle/blends with
14153 /// two inputs. The single input shuffles are immediately delegated to
14154 /// a dedicated lowering routine.
14156 /// The blends are lowered in one of three fundamental ways. If there are few
14157 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
14158 /// of the input is significantly cheaper when lowered as an interleaving of
14159 /// the two inputs, try to interleave them. Otherwise, blend the low and high
14160 /// halves of the inputs separately (making them have relatively few inputs)
14161 /// and then concatenate them.
14162 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14163 const APInt &Zeroable, SDValue V1, SDValue V2,
14164 const X86Subtarget &Subtarget,
14165 SelectionDAG &DAG) {
14166 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14167 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14168 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
14170 // Whenever we can lower this as a zext, that instruction is strictly faster
14171 // than any alternative.
14172 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
14173 Zeroable, Subtarget, DAG))
14176 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
14178 if (NumV2Inputs == 0) {
14179 // Try to use shift instructions.
14180 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
14181 Zeroable, Subtarget, DAG))
14184 // Check for being able to broadcast a single element.
14185 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
14186 Mask, Subtarget, DAG))
14189 // Use dedicated unpack instructions for masks that match their pattern.
14190 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14193 // Use dedicated pack instructions for masks that match their pattern.
14194 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14198 // Try to use byte rotation instructions.
14199 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
14203 // Make a copy of the mask so it can be modified.
14204 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
14205 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
14209 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
14210 "All single-input shuffles should be canonicalized to be V1-input "
14213 // Try to use shift instructions.
14214 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
14215 Zeroable, Subtarget, DAG))
14218 // See if we can use SSE4A Extraction / Insertion.
14219 if (Subtarget.hasSSE4A())
14220 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14224 // There are special ways we can lower some single-element blends.
14225 if (NumV2Inputs == 1)
14226 if (SDValue V = lowerShuffleAsElementInsertion(
14227 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14230 // We have different paths for blend lowering, but they all must use the
14231 // *exact* same predicate.
14232 bool IsBlendSupported = Subtarget.hasSSE41();
14233 if (IsBlendSupported)
14234 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14235 Zeroable, Subtarget, DAG))
14238 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14239 Zeroable, Subtarget, DAG))
14242 // Use dedicated unpack instructions for masks that match their pattern.
14243 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14246 // Use dedicated pack instructions for masks that match their pattern.
14247 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14251 // Try to use byte rotation instructions.
14252 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14256 if (SDValue BitBlend =
14257 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14260 // Try to use byte shift instructions to mask.
14261 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
14262 Zeroable, Subtarget, DAG))
14265 // Try to lower by permuting the inputs into an unpack instruction.
14266 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14267 Mask, Subtarget, DAG))
14270 // If we can't directly blend but can use PSHUFB, that will be better as it
14271 // can both shuffle and set up the inefficient blend.
14272 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14273 bool V1InUse, V2InUse;
14274 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14275 Zeroable, DAG, V1InUse, V2InUse);
14278 // We can always bit-blend if we have to so the fallback strategy is to
14279 // decompose into single-input permutes and blends.
14280 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14281 Mask, Subtarget, DAG);
14284 /// Check whether a compaction lowering can be done by dropping even
14285 /// elements and compute how many times even elements must be dropped.
14287 /// This handles shuffles which take every Nth element where N is a power of
14288 /// two. Example shuffle masks:
14290 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
14291 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
14292 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
14293 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
14294 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
14295 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
14297 /// Any of these lanes can of course be undef.
14299 /// This routine only supports N <= 3.
14300 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
14303 /// \returns N above, or the number of times even elements must be dropped if
14304 /// there is such a number. Otherwise returns zero.
14305 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
14306 bool IsSingleInput) {
14307 // The modulus for the shuffle vector entries is based on whether this is
14308 // a single input or not.
14309 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
14310 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
14311 "We should only be called with masks with a power-of-2 size!");
14313 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
14315 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
14316 // and 2^3 simultaneously. This is because we may have ambiguity with
14317 // partially undef inputs.
14318 bool ViableForN[3] = {true, true, true};
14320 for (int i = 0, e = Mask.size(); i < e; ++i) {
14321 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
14326 bool IsAnyViable = false;
14327 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14328 if (ViableForN[j]) {
14329 uint64_t N = j + 1;
14331 // The shuffle mask must be equal to (i * 2^N) % M.
14332 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
14333 IsAnyViable = true;
14335 ViableForN[j] = false;
14337 // Early exit if we exhaust the possible powers of two.
14342 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14346 // Return 0 as there is no viable power of two.
14350 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14351 ArrayRef<int> Mask, SDValue V1,
14352 SDValue V2, SelectionDAG &DAG) {
14353 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14354 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14356 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14358 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14360 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14363 /// Generic lowering of v16i8 shuffles.
14365 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14366 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14367 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14368 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14370 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14371 const APInt &Zeroable, SDValue V1, SDValue V2,
14372 const X86Subtarget &Subtarget,
14373 SelectionDAG &DAG) {
14374 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14375 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14376 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14378 // Try to use shift instructions.
14379 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14380 Zeroable, Subtarget, DAG))
14383 // Try to use byte rotation instructions.
14384 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14388 // Use dedicated pack instructions for masks that match their pattern.
14389 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14393 // Try to use a zext lowering.
14394 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14395 Zeroable, Subtarget, DAG))
14398 // See if we can use SSE4A Extraction / Insertion.
14399 if (Subtarget.hasSSE4A())
14400 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14404 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14406 // For single-input shuffles, there are some nicer lowering tricks we can use.
14407 if (NumV2Elements == 0) {
14408 // Check for being able to broadcast a single element.
14409 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14410 Mask, Subtarget, DAG))
14413 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14416 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14417 // Notably, this handles splat and partial-splat shuffles more efficiently.
14418 // However, it only makes sense if the pre-duplication shuffle simplifies
14419 // things significantly. Currently, this means we need to be able to
14420 // express the pre-duplication shuffle as an i16 shuffle.
14422 // FIXME: We should check for other patterns which can be widened into an
14423 // i16 shuffle as well.
14424 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14425 for (int i = 0; i < 16; i += 2)
14426 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14431 auto tryToWidenViaDuplication = [&]() -> SDValue {
14432 if (!canWidenViaDuplication(Mask))
14434 SmallVector<int, 4> LoInputs;
14435 copy_if(Mask, std::back_inserter(LoInputs),
14436 [](int M) { return M >= 0 && M < 8; });
14437 array_pod_sort(LoInputs.begin(), LoInputs.end());
14438 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14440 SmallVector<int, 4> HiInputs;
14441 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14442 array_pod_sort(HiInputs.begin(), HiInputs.end());
14443 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14446 bool TargetLo = LoInputs.size() >= HiInputs.size();
14447 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14448 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14450 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14451 SmallDenseMap<int, int, 8> LaneMap;
14452 for (int I : InPlaceInputs) {
14453 PreDupI16Shuffle[I/2] = I/2;
14456 int j = TargetLo ? 0 : 4, je = j + 4;
14457 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14458 // Check if j is already a shuffle of this input. This happens when
14459 // there are two adjacent bytes after we move the low one.
14460 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14461 // If we haven't yet mapped the input, search for a slot into which
14463 while (j < je && PreDupI16Shuffle[j] >= 0)
14467 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14470 // Map this input with the i16 shuffle.
14471 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14474 // Update the lane map based on the mapping we ended up with.
14475 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14477 V1 = DAG.getBitcast(
14479 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14480 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14482 // Unpack the bytes to form the i16s that will be shuffled into place.
14483 bool EvenInUse = false, OddInUse = false;
14484 for (int i = 0; i < 16; i += 2) {
14485 EvenInUse |= (Mask[i + 0] >= 0);
14486 OddInUse |= (Mask[i + 1] >= 0);
14487 if (EvenInUse && OddInUse)
14490 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14491 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14492 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14494 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14495 for (int i = 0; i < 16; ++i)
14496 if (Mask[i] >= 0) {
14497 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14498 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14499 if (PostDupI16Shuffle[i / 2] < 0)
14500 PostDupI16Shuffle[i / 2] = MappedMask;
14502 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14503 "Conflicting entries in the original shuffle!");
14505 return DAG.getBitcast(
14507 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14508 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14510 if (SDValue V = tryToWidenViaDuplication())
14514 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14515 Zeroable, Subtarget, DAG))
14518 // Use dedicated unpack instructions for masks that match their pattern.
14519 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14522 // Try to use byte shift instructions to mask.
14523 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14524 Zeroable, Subtarget, DAG))
14527 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14528 // with PSHUFB. It is important to do this before we attempt to generate any
14529 // blends but after all of the single-input lowerings. If the single input
14530 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14531 // want to preserve that and we can DAG combine any longer sequences into
14532 // a PSHUFB in the end. But once we start blending from multiple inputs,
14533 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14534 // and there are *very* few patterns that would actually be faster than the
14535 // PSHUFB approach because of its ability to zero lanes.
14537 // FIXME: The only exceptions to the above are blends which are exact
14538 // interleavings with direct instructions supporting them. We currently don't
14539 // handle those well here.
14540 if (Subtarget.hasSSSE3()) {
14541 bool V1InUse = false;
14542 bool V2InUse = false;
14544 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14545 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14547 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14548 // do so. This avoids using them to handle blends-with-zero which is
14549 // important as a single pshufb is significantly faster for that.
14550 if (V1InUse && V2InUse) {
14551 if (Subtarget.hasSSE41())
14552 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14553 Zeroable, Subtarget, DAG))
14556 // We can use an unpack to do the blending rather than an or in some
14557 // cases. Even though the or may be (very minorly) more efficient, we
14558 // preference this lowering because there are common cases where part of
14559 // the complexity of the shuffles goes away when we do the final blend as
14561 // FIXME: It might be worth trying to detect if the unpack-feeding
14562 // shuffles will both be pshufb, in which case we shouldn't bother with
14564 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14565 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14568 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14569 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14570 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14572 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14573 // PALIGNR will be cheaper than the second PSHUFB+OR.
14574 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14575 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14582 // There are special ways we can lower some single-element blends.
14583 if (NumV2Elements == 1)
14584 if (SDValue V = lowerShuffleAsElementInsertion(
14585 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14588 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14591 // Check whether a compaction lowering can be done. This handles shuffles
14592 // which take every Nth element for some even N. See the helper function for
14595 // We special case these as they can be particularly efficiently handled with
14596 // the PACKUSB instruction on x86 and they show up in common patterns of
14597 // rearranging bytes to truncate wide elements.
14598 bool IsSingleInput = V2.isUndef();
14599 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14600 // NumEvenDrops is the power of two stride of the elements. Another way of
14601 // thinking about it is that we need to drop the even elements this many
14602 // times to get the original input.
14604 // First we need to zero all the dropped bytes.
14605 assert(NumEvenDrops <= 3 &&
14606 "No support for dropping even elements more than 3 times.");
14607 SmallVector<SDValue, 16> ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8));
14608 for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops)
14609 ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8);
14610 SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps);
14611 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14612 if (!IsSingleInput)
14613 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14615 // Now pack things back together.
14616 V1 = DAG.getBitcast(MVT::v8i16, V1);
14617 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14618 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14619 for (int i = 1; i < NumEvenDrops; ++i) {
14620 Result = DAG.getBitcast(MVT::v8i16, Result);
14621 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14627 // Handle multi-input cases by blending single-input shuffles.
14628 if (NumV2Elements > 0)
14629 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14632 // The fallback path for single-input shuffles widens this into two v8i16
14633 // vectors with unpacks, shuffles those, and then pulls them back together
14637 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14638 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14639 for (int i = 0; i < 16; ++i)
14641 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14643 SDValue VLoHalf, VHiHalf;
14644 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14645 // them out and avoid using UNPCK{L,H} to extract the elements of V as
14647 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14648 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14649 // Use a mask to drop the high bytes.
14650 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14651 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14652 DAG.getConstant(0x00FF, DL, MVT::v8i16));
14654 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14655 VHiHalf = DAG.getUNDEF(MVT::v8i16);
14657 // Squash the masks to point directly into VLoHalf.
14658 for (int &M : LoBlendMask)
14661 for (int &M : HiBlendMask)
14665 // Otherwise just unpack the low half of V into VLoHalf and the high half into
14666 // VHiHalf so that we can blend them as i16s.
14667 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14669 VLoHalf = DAG.getBitcast(
14670 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14671 VHiHalf = DAG.getBitcast(
14672 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14675 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14676 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14678 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14681 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14683 /// This routine breaks down the specific type of 128-bit shuffle and
14684 /// dispatches to the lowering routines accordingly.
14685 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14686 MVT VT, SDValue V1, SDValue V2,
14687 const APInt &Zeroable,
14688 const X86Subtarget &Subtarget,
14689 SelectionDAG &DAG) {
14690 switch (VT.SimpleTy) {
14692 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14694 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14696 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14698 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14700 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14702 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14705 llvm_unreachable("Unimplemented!");
14709 /// Generic routine to split vector shuffle into half-sized shuffles.
14711 /// This routine just extracts two subvectors, shuffles them independently, and
14712 /// then concatenates them back together. This should work effectively with all
14713 /// AVX vector shuffle types.
14714 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14715 SDValue V2, ArrayRef<int> Mask,
14716 SelectionDAG &DAG) {
14717 assert(VT.getSizeInBits() >= 256 &&
14718 "Only for 256-bit or wider vector shuffles!");
14719 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14720 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14722 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14723 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14725 int NumElements = VT.getVectorNumElements();
14726 int SplitNumElements = NumElements / 2;
14727 MVT ScalarVT = VT.getVectorElementType();
14728 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14730 // Rather than splitting build-vectors, just build two narrower build
14731 // vectors. This helps shuffling with splats and zeros.
14732 auto SplitVector = [&](SDValue V) {
14733 V = peekThroughBitcasts(V);
14735 MVT OrigVT = V.getSimpleValueType();
14736 int OrigNumElements = OrigVT.getVectorNumElements();
14737 int OrigSplitNumElements = OrigNumElements / 2;
14738 MVT OrigScalarVT = OrigVT.getVectorElementType();
14739 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14743 auto *BV = dyn_cast<BuildVectorSDNode>(V);
14745 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14746 DAG.getIntPtrConstant(0, DL));
14747 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14748 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14751 SmallVector<SDValue, 16> LoOps, HiOps;
14752 for (int i = 0; i < OrigSplitNumElements; ++i) {
14753 LoOps.push_back(BV->getOperand(i));
14754 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14756 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14757 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14759 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14760 DAG.getBitcast(SplitVT, HiV));
14763 SDValue LoV1, HiV1, LoV2, HiV2;
14764 std::tie(LoV1, HiV1) = SplitVector(V1);
14765 std::tie(LoV2, HiV2) = SplitVector(V2);
14767 // Now create two 4-way blends of these half-width vectors.
14768 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14769 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14770 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14771 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14772 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14773 for (int i = 0; i < SplitNumElements; ++i) {
14774 int M = HalfMask[i];
14775 if (M >= NumElements) {
14776 if (M >= NumElements + SplitNumElements)
14780 V2BlendMask[i] = M - NumElements;
14781 BlendMask[i] = SplitNumElements + i;
14782 } else if (M >= 0) {
14783 if (M >= SplitNumElements)
14787 V1BlendMask[i] = M;
14792 // Because the lowering happens after all combining takes place, we need to
14793 // manually combine these blend masks as much as possible so that we create
14794 // a minimal number of high-level vector shuffle nodes.
14796 // First try just blending the halves of V1 or V2.
14797 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14798 return DAG.getUNDEF(SplitVT);
14799 if (!UseLoV2 && !UseHiV2)
14800 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14801 if (!UseLoV1 && !UseHiV1)
14802 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14804 SDValue V1Blend, V2Blend;
14805 if (UseLoV1 && UseHiV1) {
14807 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14809 // We only use half of V1 so map the usage down into the final blend mask.
14810 V1Blend = UseLoV1 ? LoV1 : HiV1;
14811 for (int i = 0; i < SplitNumElements; ++i)
14812 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14813 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14815 if (UseLoV2 && UseHiV2) {
14817 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14819 // We only use half of V2 so map the usage down into the final blend mask.
14820 V2Blend = UseLoV2 ? LoV2 : HiV2;
14821 for (int i = 0; i < SplitNumElements; ++i)
14822 if (BlendMask[i] >= SplitNumElements)
14823 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14825 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14827 SDValue Lo = HalfBlend(LoMask);
14828 SDValue Hi = HalfBlend(HiMask);
14829 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14832 /// Either split a vector in halves or decompose the shuffles and the
14835 /// This is provided as a good fallback for many lowerings of non-single-input
14836 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14837 /// between splitting the shuffle into 128-bit components and stitching those
14838 /// back together vs. extracting the single-input shuffles and blending those
14840 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14841 SDValue V2, ArrayRef<int> Mask,
14842 const X86Subtarget &Subtarget,
14843 SelectionDAG &DAG) {
14844 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14845 "shuffles as it could then recurse on itself.");
14846 int Size = Mask.size();
14848 // If this can be modeled as a broadcast of two elements followed by a blend,
14849 // prefer that lowering. This is especially important because broadcasts can
14850 // often fold with memory operands.
14851 auto DoBothBroadcast = [&] {
14852 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14855 if (V2BroadcastIdx < 0)
14856 V2BroadcastIdx = M - Size;
14857 else if (M - Size != V2BroadcastIdx)
14859 } else if (M >= 0) {
14860 if (V1BroadcastIdx < 0)
14861 V1BroadcastIdx = M;
14862 else if (M != V1BroadcastIdx)
14867 if (DoBothBroadcast())
14868 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14871 // If the inputs all stem from a single 128-bit lane of each input, then we
14872 // split them rather than blending because the split will decompose to
14873 // unusually few instructions.
14874 int LaneCount = VT.getSizeInBits() / 128;
14875 int LaneSize = Size / LaneCount;
14876 SmallBitVector LaneInputs[2];
14877 LaneInputs[0].resize(LaneCount, false);
14878 LaneInputs[1].resize(LaneCount, false);
14879 for (int i = 0; i < Size; ++i)
14881 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14882 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14883 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14885 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14886 // that the decomposed single-input shuffles don't end up here.
14887 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14891 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14892 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
14893 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
14894 SDValue V1, SDValue V2,
14895 ArrayRef<int> Mask,
14896 SelectionDAG &DAG) {
14897 assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
14899 int LHSMask[4] = {-1, -1, -1, -1};
14900 int RHSMask[4] = {-1, -1, -1, -1};
14901 unsigned SHUFPMask = 0;
14903 // As SHUFPD uses a single LHS/RHS element per lane, we can always
14904 // perform the shuffle once the lanes have been shuffled in place.
14905 for (int i = 0; i != 4; ++i) {
14909 int LaneBase = i & ~1;
14910 auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
14911 LaneMask[LaneBase + (M & 1)] = M;
14912 SHUFPMask |= (M & 1) << i;
14915 SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
14916 SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
14917 return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
14918 DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
14921 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14922 /// a lane permutation followed by a per-lane permutation.
14924 /// This is mainly for cases where we can have non-repeating permutes
14927 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14928 /// we should investigate merging them.
14929 static SDValue lowerShuffleAsLanePermuteAndPermute(
14930 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14931 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14932 int NumElts = VT.getVectorNumElements();
14933 int NumLanes = VT.getSizeInBits() / 128;
14934 int NumEltsPerLane = NumElts / NumLanes;
14936 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14937 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14939 for (int i = 0; i != NumElts; ++i) {
14944 // Ensure that each lane comes from a single source lane.
14945 int SrcLane = M / NumEltsPerLane;
14946 int DstLane = i / NumEltsPerLane;
14947 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14949 SrcLaneMask[DstLane] = SrcLane;
14951 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14954 // Make sure we set all elements of the lane mask, to avoid undef propagation.
14955 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14956 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14957 int SrcLane = SrcLaneMask[DstLane];
14959 for (int j = 0; j != NumEltsPerLane; ++j) {
14960 LaneMask[(DstLane * NumEltsPerLane) + j] =
14961 (SrcLane * NumEltsPerLane) + j;
14965 // If we're only shuffling a single lowest lane and the rest are identity
14966 // then don't bother.
14967 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14968 int NumIdentityLanes = 0;
14969 bool OnlyShuffleLowestLane = true;
14970 for (int i = 0; i != NumLanes; ++i) {
14971 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14972 i * NumEltsPerLane))
14973 NumIdentityLanes++;
14974 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14975 OnlyShuffleLowestLane = false;
14977 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14980 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14981 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14984 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14985 /// source with a lane permutation.
14987 /// This lowering strategy results in four instructions in the worst case for a
14988 /// single-input cross lane shuffle which is lower than any other fully general
14989 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14990 /// shuffle pattern should be handled prior to trying this lowering.
14991 static SDValue lowerShuffleAsLanePermuteAndShuffle(
14992 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14993 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14994 // FIXME: This should probably be generalized for 512-bit vectors as well.
14995 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14996 int Size = Mask.size();
14997 int LaneSize = Size / 2;
14999 // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15000 // Only do this if the elements aren't all from the lower lane,
15001 // otherwise we're (probably) better off doing a split.
15002 if (VT == MVT::v4f64 &&
15003 !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
15005 lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG))
15008 // If there are only inputs from one 128-bit lane, splitting will in fact be
15009 // less expensive. The flags track whether the given lane contains an element
15010 // that crosses to another lane.
15011 if (!Subtarget.hasAVX2()) {
15012 bool LaneCrossing[2] = {false, false};
15013 for (int i = 0; i < Size; ++i)
15014 if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
15015 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
15016 if (!LaneCrossing[0] || !LaneCrossing[1])
15017 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15019 bool LaneUsed[2] = {false, false};
15020 for (int i = 0; i < Size; ++i)
15022 LaneUsed[(Mask[i] % Size) / LaneSize] = true;
15023 if (!LaneUsed[0] || !LaneUsed[1])
15024 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15027 // TODO - we could support shuffling V2 in the Flipped input.
15028 assert(V2.isUndef() &&
15029 "This last part of this routine only works on single input shuffles");
15031 SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
15032 for (int i = 0; i < Size; ++i) {
15033 int &M = InLaneMask[i];
15036 if (((M % Size) / LaneSize) != (i / LaneSize))
15037 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
15039 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
15040 "In-lane shuffle mask expected");
15042 // Flip the lanes, and shuffle the results which should now be in-lane.
15043 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
15044 SDValue Flipped = DAG.getBitcast(PVT, V1);
15046 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
15047 Flipped = DAG.getBitcast(VT, Flipped);
15048 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
15051 /// Handle lowering 2-lane 128-bit shuffles.
15052 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
15053 SDValue V2, ArrayRef<int> Mask,
15054 const APInt &Zeroable,
15055 const X86Subtarget &Subtarget,
15056 SelectionDAG &DAG) {
15057 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
15058 if (Subtarget.hasAVX2() && V2.isUndef())
15061 bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
15063 SmallVector<int, 4> WidenedMask;
15064 if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
15067 bool IsLowZero = (Zeroable & 0x3) == 0x3;
15068 bool IsHighZero = (Zeroable & 0xc) == 0xc;
15070 // Try to use an insert into a zero vector.
15071 if (WidenedMask[0] == 0 && IsHighZero) {
15072 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15073 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15074 DAG.getIntPtrConstant(0, DL));
15075 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15076 getZeroVector(VT, Subtarget, DAG, DL), LoV,
15077 DAG.getIntPtrConstant(0, DL));
15080 // TODO: If minimizing size and one of the inputs is a zero vector and the
15081 // the zero vector has only one use, we could use a VPERM2X128 to save the
15082 // instruction bytes needed to explicitly generate the zero vector.
15084 // Blends are faster and handle all the non-lane-crossing cases.
15085 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
15089 // If either input operand is a zero vector, use VPERM2X128 because its mask
15090 // allows us to replace the zero input with an implicit zero.
15091 if (!IsLowZero && !IsHighZero) {
15092 // Check for patterns which can be matched with a single insert of a 128-bit
15094 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
15095 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
15097 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
15098 // this will likely become vinsertf128 which can't fold a 256-bit memop.
15099 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
15100 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15101 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15102 OnlyUsesV1 ? V1 : V2,
15103 DAG.getIntPtrConstant(0, DL));
15104 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15105 DAG.getIntPtrConstant(2, DL));
15109 // Try to use SHUF128 if possible.
15110 if (Subtarget.hasVLX()) {
15111 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
15112 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
15113 ((WidenedMask[1] % 2) << 1);
15114 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
15115 DAG.getTargetConstant(PermMask, DL, MVT::i8));
15120 // Otherwise form a 128-bit permutation. After accounting for undefs,
15121 // convert the 64-bit shuffle mask selection values into 128-bit
15122 // selection bits by dividing the indexes by 2 and shifting into positions
15123 // defined by a vperm2*128 instruction's immediate control byte.
15125 // The immediate permute control byte looks like this:
15126 // [1:0] - select 128 bits from sources for low half of destination
15128 // [3] - zero low half of destination
15129 // [5:4] - select 128 bits from sources for high half of destination
15131 // [7] - zero high half of destination
15133 assert((WidenedMask[0] >= 0 || IsLowZero) &&
15134 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
15136 unsigned PermMask = 0;
15137 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
15138 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
15140 // Check the immediate mask and replace unused sources with undef.
15141 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
15142 V1 = DAG.getUNDEF(VT);
15143 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
15144 V2 = DAG.getUNDEF(VT);
15146 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
15147 DAG.getTargetConstant(PermMask, DL, MVT::i8));
15150 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
15151 /// shuffling each lane.
15153 /// This attempts to create a repeated lane shuffle where each lane uses one
15154 /// or two of the lanes of the inputs. The lanes of the input vectors are
15155 /// shuffled in one or two independent shuffles to get the lanes into the
15156 /// position needed by the final shuffle.
15157 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
15158 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15159 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15160 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
15162 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15165 int NumElts = Mask.size();
15166 int NumLanes = VT.getSizeInBits() / 128;
15167 int NumLaneElts = 128 / VT.getScalarSizeInBits();
15168 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
15169 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
15171 // First pass will try to fill in the RepeatMask from lanes that need two
15173 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15174 int Srcs[2] = {-1, -1};
15175 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
15176 for (int i = 0; i != NumLaneElts; ++i) {
15177 int M = Mask[(Lane * NumLaneElts) + i];
15180 // Determine which of the possible input lanes (NumLanes from each source)
15181 // this element comes from. Assign that as one of the sources for this
15182 // lane. We can assign up to 2 sources for this lane. If we run out
15183 // sources we can't do anything.
15184 int LaneSrc = M / NumLaneElts;
15186 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
15188 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
15193 Srcs[Src] = LaneSrc;
15194 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
15197 // If this lane has two sources, see if it fits with the repeat mask so far.
15201 LaneSrcs[Lane][0] = Srcs[0];
15202 LaneSrcs[Lane][1] = Srcs[1];
15204 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
15205 assert(M1.size() == M2.size() && "Unexpected mask size");
15206 for (int i = 0, e = M1.size(); i != e; ++i)
15207 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
15212 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
15213 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
15214 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
15218 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
15219 "Unexpected mask element");
15224 if (MatchMasks(InLaneMask, RepeatMask)) {
15225 // Merge this lane mask into the final repeat mask.
15226 MergeMasks(InLaneMask, RepeatMask);
15230 // Didn't find a match. Swap the operands and try again.
15231 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15232 ShuffleVectorSDNode::commuteMask(InLaneMask);
15234 if (MatchMasks(InLaneMask, RepeatMask)) {
15235 // Merge this lane mask into the final repeat mask.
15236 MergeMasks(InLaneMask, RepeatMask);
15240 // Couldn't find a match with the operands in either order.
15244 // Now handle any lanes with only one source.
15245 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15246 // If this lane has already been processed, skip it.
15247 if (LaneSrcs[Lane][0] >= 0)
15250 for (int i = 0; i != NumLaneElts; ++i) {
15251 int M = Mask[(Lane * NumLaneElts) + i];
15255 // If RepeatMask isn't defined yet we can define it ourself.
15256 if (RepeatMask[i] < 0)
15257 RepeatMask[i] = M % NumLaneElts;
15259 if (RepeatMask[i] < NumElts) {
15260 if (RepeatMask[i] != M % NumLaneElts)
15262 LaneSrcs[Lane][0] = M / NumLaneElts;
15264 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15266 LaneSrcs[Lane][1] = M / NumLaneElts;
15270 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15274 SmallVector<int, 16> NewMask(NumElts, -1);
15275 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15276 int Src = LaneSrcs[Lane][0];
15277 for (int i = 0; i != NumLaneElts; ++i) {
15280 M = Src * NumLaneElts + i;
15281 NewMask[Lane * NumLaneElts + i] = M;
15284 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15285 // Ensure we didn't get back the shuffle we started with.
15286 // FIXME: This is a hack to make up for some splat handling code in
15287 // getVectorShuffle.
15288 if (isa<ShuffleVectorSDNode>(NewV1) &&
15289 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15292 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15293 int Src = LaneSrcs[Lane][1];
15294 for (int i = 0; i != NumLaneElts; ++i) {
15297 M = Src * NumLaneElts + i;
15298 NewMask[Lane * NumLaneElts + i] = M;
15301 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15302 // Ensure we didn't get back the shuffle we started with.
15303 // FIXME: This is a hack to make up for some splat handling code in
15304 // getVectorShuffle.
15305 if (isa<ShuffleVectorSDNode>(NewV2) &&
15306 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15309 for (int i = 0; i != NumElts; ++i) {
15310 NewMask[i] = RepeatMask[i % NumLaneElts];
15311 if (NewMask[i] < 0)
15314 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15316 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15319 /// If the input shuffle mask results in a vector that is undefined in all upper
15320 /// or lower half elements and that mask accesses only 2 halves of the
15321 /// shuffle's operands, return true. A mask of half the width with mask indexes
15322 /// adjusted to access the extracted halves of the original shuffle operands is
15323 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15324 /// lower half of each input operand is accessed.
15326 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15327 int &HalfIdx1, int &HalfIdx2) {
15328 assert((Mask.size() == HalfMask.size() * 2) &&
15329 "Expected input mask to be twice as long as output");
15331 // Exactly one half of the result must be undef to allow narrowing.
15332 bool UndefLower = isUndefLowerHalf(Mask);
15333 bool UndefUpper = isUndefUpperHalf(Mask);
15334 if (UndefLower == UndefUpper)
15337 unsigned HalfNumElts = HalfMask.size();
15338 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15341 for (unsigned i = 0; i != HalfNumElts; ++i) {
15342 int M = Mask[i + MaskIndexOffset];
15348 // Determine which of the 4 half vectors this element is from.
15349 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15350 int HalfIdx = M / HalfNumElts;
15352 // Determine the element index into its half vector source.
15353 int HalfElt = M % HalfNumElts;
15355 // We can shuffle with up to 2 half vectors, set the new 'half'
15356 // shuffle mask accordingly.
15357 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15358 HalfMask[i] = HalfElt;
15359 HalfIdx1 = HalfIdx;
15362 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15363 HalfMask[i] = HalfElt + HalfNumElts;
15364 HalfIdx2 = HalfIdx;
15368 // Too many half vectors referenced.
15375 /// Given the output values from getHalfShuffleMask(), create a half width
15376 /// shuffle of extracted vectors followed by an insert back to full width.
15377 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15378 ArrayRef<int> HalfMask, int HalfIdx1,
15379 int HalfIdx2, bool UndefLower,
15380 SelectionDAG &DAG, bool UseConcat = false) {
15381 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15382 assert(V1.getValueType().isSimple() && "Expecting only simple types");
15384 MVT VT = V1.getSimpleValueType();
15385 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15386 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15388 auto getHalfVector = [&](int HalfIdx) {
15390 return DAG.getUNDEF(HalfVT);
15391 SDValue V = (HalfIdx < 2 ? V1 : V2);
15392 HalfIdx = (HalfIdx % 2) * HalfNumElts;
15393 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15394 DAG.getIntPtrConstant(HalfIdx, DL));
15397 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15398 SDValue Half1 = getHalfVector(HalfIdx1);
15399 SDValue Half2 = getHalfVector(HalfIdx2);
15400 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15403 SDValue Op1 = DAG.getUNDEF(HalfVT);
15405 std::swap(Op0, Op1);
15406 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15409 unsigned Offset = UndefLower ? HalfNumElts : 0;
15410 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15411 DAG.getIntPtrConstant(Offset, DL));
15414 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15415 /// This allows for fast cases such as subvector extraction/insertion
15416 /// or shuffling smaller vector types which can lower more efficiently.
15417 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15418 SDValue V2, ArrayRef<int> Mask,
15419 const X86Subtarget &Subtarget,
15420 SelectionDAG &DAG) {
15421 assert((VT.is256BitVector() || VT.is512BitVector()) &&
15422 "Expected 256-bit or 512-bit vector");
15424 bool UndefLower = isUndefLowerHalf(Mask);
15425 if (!UndefLower && !isUndefUpperHalf(Mask))
15428 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15429 "Completely undef shuffle mask should have been simplified already");
15431 // Upper half is undef and lower half is whole upper subvector.
15432 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15433 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15434 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15436 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15437 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15438 DAG.getIntPtrConstant(HalfNumElts, DL));
15439 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15440 DAG.getIntPtrConstant(0, DL));
15443 // Lower half is undef and upper half is whole lower subvector.
15444 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15446 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15447 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15448 DAG.getIntPtrConstant(0, DL));
15449 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15450 DAG.getIntPtrConstant(HalfNumElts, DL));
15453 int HalfIdx1, HalfIdx2;
15454 SmallVector<int, 8> HalfMask(HalfNumElts);
15455 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15458 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15460 // Only shuffle the halves of the inputs when useful.
15461 unsigned NumLowerHalves =
15462 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15463 unsigned NumUpperHalves =
15464 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15465 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15467 // Determine the larger pattern of undef/halves, then decide if it's worth
15468 // splitting the shuffle based on subtarget capabilities and types.
15469 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15471 // XXXXuuuu: no insert is needed.
15472 // Always extract lowers when setting lower - these are all free subreg ops.
15473 if (NumUpperHalves == 0)
15474 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15477 if (NumUpperHalves == 1) {
15478 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15479 if (Subtarget.hasAVX2()) {
15480 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15481 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15482 !is128BitUnpackShuffleMask(HalfMask) &&
15483 (!isSingleSHUFPSMask(HalfMask) ||
15484 Subtarget.hasFastVariableShuffle()))
15486 // If this is a unary shuffle (assume that the 2nd operand is
15487 // canonicalized to undef), then we can use vpermpd. Otherwise, we
15488 // are better off extracting the upper half of 1 operand and using a
15490 if (EltWidth == 64 && V2.isUndef())
15493 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15494 if (Subtarget.hasAVX512() && VT.is512BitVector())
15496 // Extract + narrow shuffle is better than the wide alternative.
15497 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15501 // Don't extract both uppers, instead shuffle and then extract.
15502 assert(NumUpperHalves == 2 && "Half vector count went wrong");
15506 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15507 if (NumUpperHalves == 0) {
15508 // AVX2 has efficient 64-bit element cross-lane shuffles.
15509 // TODO: Refine to account for unary shuffle, splat, and other masks?
15510 if (Subtarget.hasAVX2() && EltWidth == 64)
15512 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15513 if (Subtarget.hasAVX512() && VT.is512BitVector())
15515 // Narrow shuffle + insert is better than the wide alternative.
15516 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15520 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15524 /// Test whether the specified input (0 or 1) is in-place blended by the
15527 /// This returns true if the elements from a particular input are already in the
15528 /// slot required by the given mask and require no permutation.
15529 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15530 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
15531 int Size = Mask.size();
15532 for (int i = 0; i < Size; ++i)
15533 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15539 /// Handle case where shuffle sources are coming from the same 128-bit lane and
15540 /// every lane can be represented as the same repeating mask - allowing us to
15541 /// shuffle the sources with the repeating shuffle and then permute the result
15542 /// to the destination lanes.
15543 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15544 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15545 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15546 int NumElts = VT.getVectorNumElements();
15547 int NumLanes = VT.getSizeInBits() / 128;
15548 int NumLaneElts = NumElts / NumLanes;
15550 // On AVX2 we may be able to just shuffle the lowest elements and then
15551 // broadcast the result.
15552 if (Subtarget.hasAVX2()) {
15553 for (unsigned BroadcastSize : {16, 32, 64}) {
15554 if (BroadcastSize <= VT.getScalarSizeInBits())
15556 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15558 // Attempt to match a repeating pattern every NumBroadcastElts,
15559 // accounting for UNDEFs but only references the lowest 128-bit
15560 // lane of the inputs.
15561 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15562 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15563 for (int j = 0; j != NumBroadcastElts; ++j) {
15564 int M = Mask[i + j];
15567 int &R = RepeatMask[j];
15568 if (0 != ((M % NumElts) / NumLaneElts))
15570 if (0 <= R && R != M)
15577 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15578 if (!FindRepeatingBroadcastMask(RepeatMask))
15581 // Shuffle the (lowest) repeated elements in place for broadcast.
15582 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15584 // Shuffle the actual broadcast.
15585 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15586 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15587 for (int j = 0; j != NumBroadcastElts; ++j)
15588 BroadcastMask[i + j] = j;
15589 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15594 // Bail if the shuffle mask doesn't cross 128-bit lanes.
15595 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15598 // Bail if we already have a repeated lane shuffle mask.
15599 SmallVector<int, 8> RepeatedShuffleMask;
15600 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15603 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15604 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15605 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15606 int NumSubLanes = NumLanes * SubLaneScale;
15607 int NumSubLaneElts = NumLaneElts / SubLaneScale;
15609 // Check that all the sources are coming from the same lane and see if we can
15610 // form a repeating shuffle mask (local to each sub-lane). At the same time,
15611 // determine the source sub-lane for each destination sub-lane.
15612 int TopSrcSubLane = -1;
15613 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15614 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15615 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15616 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15618 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15619 // Extract the sub-lane mask, check that it all comes from the same lane
15620 // and normalize the mask entries to come from the first lane.
15622 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15623 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15624 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15627 int Lane = (M % NumElts) / NumLaneElts;
15628 if ((0 <= SrcLane) && (SrcLane != Lane))
15631 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15632 SubLaneMask[Elt] = LocalM;
15635 // Whole sub-lane is UNDEF.
15639 // Attempt to match against the candidate repeated sub-lane masks.
15640 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15641 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15642 for (int i = 0; i != NumSubLaneElts; ++i) {
15643 if (M1[i] < 0 || M2[i] < 0)
15645 if (M1[i] != M2[i])
15651 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15652 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15655 // Merge the sub-lane mask into the matching repeated sub-lane mask.
15656 for (int i = 0; i != NumSubLaneElts; ++i) {
15657 int M = SubLaneMask[i];
15660 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15661 "Unexpected mask element");
15662 RepeatedSubLaneMask[i] = M;
15665 // Track the top most source sub-lane - by setting the remaining to UNDEF
15666 // we can greatly simplify shuffle matching.
15667 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15668 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15669 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15673 // Bail if we failed to find a matching repeated sub-lane mask.
15674 if (Dst2SrcSubLanes[DstSubLane] < 0)
15677 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15678 "Unexpected source lane");
15680 // Create a repeating shuffle mask for the entire vector.
15681 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15682 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15683 int Lane = SubLane / SubLaneScale;
15684 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15685 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15686 int M = RepeatedSubLaneMask[Elt];
15689 int Idx = (SubLane * NumSubLaneElts) + Elt;
15690 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15693 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15695 // Shuffle each source sub-lane to its destination.
15696 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15697 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15698 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15699 if (SrcSubLane < 0)
15701 for (int j = 0; j != NumSubLaneElts; ++j)
15702 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15705 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15709 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15710 bool &ForceV1Zero, bool &ForceV2Zero,
15711 unsigned &ShuffleImm, ArrayRef<int> Mask,
15712 const APInt &Zeroable) {
15713 int NumElts = VT.getVectorNumElements();
15714 assert(VT.getScalarSizeInBits() == 64 &&
15715 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15716 "Unexpected data type for VSHUFPD");
15717 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15718 "Illegal shuffle mask");
15720 bool ZeroLane[2] = { true, true };
15721 for (int i = 0; i < NumElts; ++i)
15722 ZeroLane[i & 1] &= Zeroable[i];
15724 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
15725 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
15727 bool ShufpdMask = true;
15728 bool CommutableMask = true;
15729 for (int i = 0; i < NumElts; ++i) {
15730 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15734 int Val = (i & 6) + NumElts * (i & 1);
15735 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15736 if (Mask[i] < Val || Mask[i] > Val + 1)
15737 ShufpdMask = false;
15738 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15739 CommutableMask = false;
15740 ShuffleImm |= (Mask[i] % 2) << i;
15743 if (!ShufpdMask && !CommutableMask)
15746 if (!ShufpdMask && CommutableMask)
15749 ForceV1Zero = ZeroLane[0];
15750 ForceV2Zero = ZeroLane[1];
15754 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15755 SDValue V2, ArrayRef<int> Mask,
15756 const APInt &Zeroable,
15757 const X86Subtarget &Subtarget,
15758 SelectionDAG &DAG) {
15759 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15760 "Unexpected data type for VSHUFPD");
15762 unsigned Immediate = 0;
15763 bool ForceV1Zero = false, ForceV2Zero = false;
15764 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15768 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15770 V1 = getZeroVector(VT, Subtarget, DAG, DL);
15772 V2 = getZeroVector(VT, Subtarget, DAG, DL);
15774 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15775 DAG.getTargetConstant(Immediate, DL, MVT::i8));
15778 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15779 // by zeroable elements in the remaining 24 elements. Turn this into two
15780 // vmovqb instructions shuffled together.
15781 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15782 SDValue V1, SDValue V2,
15783 ArrayRef<int> Mask,
15784 const APInt &Zeroable,
15785 SelectionDAG &DAG) {
15786 assert(VT == MVT::v32i8 && "Unexpected type!");
15788 // The first 8 indices should be every 8th element.
15789 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15792 // Remaining elements need to be zeroable.
15793 if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
15796 V1 = DAG.getBitcast(MVT::v4i64, V1);
15797 V2 = DAG.getBitcast(MVT::v4i64, V2);
15799 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15800 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15802 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15803 // the upper bits of the result using an unpckldq.
15804 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15805 { 0, 1, 2, 3, 16, 17, 18, 19,
15806 4, 5, 6, 7, 20, 21, 22, 23 });
15807 // Insert the unpckldq into a zero vector to widen to v32i8.
15808 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15809 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15810 DAG.getIntPtrConstant(0, DL));
15814 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15816 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15817 /// isn't available.
15818 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15819 const APInt &Zeroable, SDValue V1, SDValue V2,
15820 const X86Subtarget &Subtarget,
15821 SelectionDAG &DAG) {
15822 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15823 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15824 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15826 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15830 if (V2.isUndef()) {
15831 // Check for being able to broadcast a single element.
15832 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15833 Mask, Subtarget, DAG))
15836 // Use low duplicate instructions for masks that match their pattern.
15837 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15838 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15840 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15841 // Non-half-crossing single input shuffles can be lowered with an
15842 // interleaved permutation.
15843 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15844 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15845 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15846 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15849 // With AVX2 we have direct support for this permutation.
15850 if (Subtarget.hasAVX2())
15851 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15852 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15854 // Try to create an in-lane repeating shuffle mask and then shuffle the
15855 // results into the target lanes.
15856 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15857 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15860 // Try to permute the lanes and then use a per-lane permute.
15861 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15862 Mask, DAG, Subtarget))
15865 // Otherwise, fall back.
15866 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15870 // Use dedicated unpack instructions for masks that match their pattern.
15871 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15874 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15875 Zeroable, Subtarget, DAG))
15878 // Check if the blend happens to exactly fit that of SHUFPD.
15879 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15880 Zeroable, Subtarget, DAG))
15883 // If we have lane crossing shuffles AND they don't all come from the lower
15884 // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15885 // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
15886 // canonicalize to a blend of splat which isn't necessary for this combine.
15887 if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
15888 !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
15889 (V1.getOpcode() != ISD::BUILD_VECTOR) &&
15890 (V2.getOpcode() != ISD::BUILD_VECTOR))
15891 if (SDValue Op = lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2,
15895 // If we have one input in place, then we can permute the other input and
15896 // blend the result.
15897 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15898 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15901 // Try to create an in-lane repeating shuffle mask and then shuffle the
15902 // results into the target lanes.
15903 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15904 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15907 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15908 // shuffle. However, if we have AVX2 and either inputs are already in place,
15909 // we will be able to shuffle even across lanes the other input in a single
15910 // instruction so skip this pattern.
15911 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15912 isShuffleMaskInputInPlace(1, Mask))))
15913 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15914 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15917 // If we have VLX support, we can use VEXPAND.
15918 if (Subtarget.hasVLX())
15919 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15923 // If we have AVX2 then we always want to lower with a blend because an v4 we
15924 // can fully permute the elements.
15925 if (Subtarget.hasAVX2())
15926 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15929 // Otherwise fall back on generic lowering.
15930 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15934 /// Handle lowering of 4-lane 64-bit integer shuffles.
15936 /// This routine is only called when we have AVX2 and thus a reasonable
15937 /// instruction set for v4i64 shuffling..
15938 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15939 const APInt &Zeroable, SDValue V1, SDValue V2,
15940 const X86Subtarget &Subtarget,
15941 SelectionDAG &DAG) {
15942 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15943 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15944 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15945 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15947 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15951 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15952 Zeroable, Subtarget, DAG))
15955 // Check for being able to broadcast a single element.
15956 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15960 if (V2.isUndef()) {
15961 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15962 // can use lower latency instructions that will operate on both lanes.
15963 SmallVector<int, 2> RepeatedMask;
15964 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15965 SmallVector<int, 4> PSHUFDMask;
15966 scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15967 return DAG.getBitcast(
15969 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15970 DAG.getBitcast(MVT::v8i32, V1),
15971 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15974 // AVX2 provides a direct instruction for permuting a single input across
15976 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15977 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15980 // Try to use shift instructions.
15981 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15982 Zeroable, Subtarget, DAG))
15985 // If we have VLX support, we can use VALIGN or VEXPAND.
15986 if (Subtarget.hasVLX()) {
15987 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15991 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15996 // Try to use PALIGNR.
15997 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
16001 // Use dedicated unpack instructions for masks that match their pattern.
16002 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
16005 // If we have one input in place, then we can permute the other input and
16006 // blend the result.
16007 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16008 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16011 // Try to create an in-lane repeating shuffle mask and then shuffle the
16012 // results into the target lanes.
16013 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16014 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16017 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16018 // shuffle. However, if we have AVX2 and either inputs are already in place,
16019 // we will be able to shuffle even across lanes the other input in a single
16020 // instruction so skip this pattern.
16021 if (!isShuffleMaskInputInPlace(0, Mask) &&
16022 !isShuffleMaskInputInPlace(1, Mask))
16023 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16024 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16027 // Otherwise fall back on generic blend lowering.
16028 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16032 /// Handle lowering of 8-lane 32-bit floating point shuffles.
16034 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
16035 /// isn't available.
16036 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16037 const APInt &Zeroable, SDValue V1, SDValue V2,
16038 const X86Subtarget &Subtarget,
16039 SelectionDAG &DAG) {
16040 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16041 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16042 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16044 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
16045 Zeroable, Subtarget, DAG))
16048 // Check for being able to broadcast a single element.
16049 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
16053 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16054 // options to efficiently lower the shuffle.
16055 SmallVector<int, 4> RepeatedMask;
16056 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
16057 assert(RepeatedMask.size() == 4 &&
16058 "Repeated masks must be half the mask width!");
16060 // Use even/odd duplicate instructions for masks that match their pattern.
16061 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16062 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
16063 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16064 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
16067 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
16068 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16070 // Use dedicated unpack instructions for masks that match their pattern.
16071 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
16074 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
16075 // have already handled any direct blends.
16076 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
16079 // Try to create an in-lane repeating shuffle mask and then shuffle the
16080 // results into the target lanes.
16081 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16082 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16085 // If we have a single input shuffle with different shuffle patterns in the
16086 // two 128-bit lanes use the variable mask to VPERMILPS.
16087 if (V2.isUndef()) {
16088 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16089 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
16090 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
16092 if (Subtarget.hasAVX2())
16093 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16095 // Otherwise, fall back.
16096 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16100 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16102 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16103 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16106 // If we have VLX support, we can use VEXPAND.
16107 if (Subtarget.hasVLX())
16108 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16112 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16113 // since after split we get a more efficient code using vpunpcklwd and
16114 // vpunpckhwd instrs than vblend.
16115 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
16116 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16120 // If we have AVX2 then we always want to lower with a blend because at v8 we
16121 // can fully permute the elements.
16122 if (Subtarget.hasAVX2())
16123 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
16126 // Otherwise fall back on generic lowering.
16127 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16131 /// Handle lowering of 8-lane 32-bit integer shuffles.
16133 /// This routine is only called when we have AVX2 and thus a reasonable
16134 /// instruction set for v8i32 shuffling..
16135 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16136 const APInt &Zeroable, SDValue V1, SDValue V2,
16137 const X86Subtarget &Subtarget,
16138 SelectionDAG &DAG) {
16139 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16140 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16141 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16142 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16144 // Whenever we can lower this as a zext, that instruction is strictly faster
16145 // than any alternative. It also allows us to fold memory operands into the
16146 // shuffle in many cases.
16147 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16148 Zeroable, Subtarget, DAG))
16151 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16152 // since after split we get a more efficient code than vblend by using
16153 // vpunpcklwd and vpunpckhwd instrs.
16154 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
16155 !Subtarget.hasAVX512())
16156 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
16160 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16161 Zeroable, Subtarget, DAG))
16164 // Check for being able to broadcast a single element.
16165 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16169 // If the shuffle mask is repeated in each 128-bit lane we can use more
16170 // efficient instructions that mirror the shuffles across the two 128-bit
16172 SmallVector<int, 4> RepeatedMask;
16173 bool Is128BitLaneRepeatedShuffle =
16174 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16175 if (Is128BitLaneRepeatedShuffle) {
16176 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16178 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16179 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16181 // Use dedicated unpack instructions for masks that match their pattern.
16182 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16186 // Try to use shift instructions.
16187 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
16188 Zeroable, Subtarget, DAG))
16191 // If we have VLX support, we can use VALIGN or EXPAND.
16192 if (Subtarget.hasVLX()) {
16193 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
16197 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16202 // Try to use byte rotation instructions.
16203 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16207 // Try to create an in-lane repeating shuffle mask and then shuffle the
16208 // results into the target lanes.
16209 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16210 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16213 // If the shuffle patterns aren't repeated but it is a single input, directly
16214 // generate a cross-lane VPERMD instruction.
16215 if (V2.isUndef()) {
16216 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16217 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16220 // Assume that a single SHUFPS is faster than an alternative sequence of
16221 // multiple instructions (even if the CPU has a domain penalty).
16222 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16223 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16224 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16225 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16226 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16227 CastV1, CastV2, DAG);
16228 return DAG.getBitcast(MVT::v8i32, ShufPS);
16231 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16233 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16234 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16237 // Otherwise fall back on generic blend lowering.
16238 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
16242 /// Handle lowering of 16-lane 16-bit integer shuffles.
16244 /// This routine is only called when we have AVX2 and thus a reasonable
16245 /// instruction set for v16i16 shuffling..
16246 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16247 const APInt &Zeroable, SDValue V1, SDValue V2,
16248 const X86Subtarget &Subtarget,
16249 SelectionDAG &DAG) {
16250 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16251 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16252 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16253 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16255 // Whenever we can lower this as a zext, that instruction is strictly faster
16256 // than any alternative. It also allows us to fold memory operands into the
16257 // shuffle in many cases.
16258 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16259 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16262 // Check for being able to broadcast a single element.
16263 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16267 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16268 Zeroable, Subtarget, DAG))
16271 // Use dedicated unpack instructions for masks that match their pattern.
16272 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16275 // Use dedicated pack instructions for masks that match their pattern.
16276 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16280 // Try to use shift instructions.
16281 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16282 Zeroable, Subtarget, DAG))
16285 // Try to use byte rotation instructions.
16286 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16290 // Try to create an in-lane repeating shuffle mask and then shuffle the
16291 // results into the target lanes.
16292 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16293 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16296 if (V2.isUndef()) {
16297 // There are no generalized cross-lane shuffle operations available on i16
16299 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16300 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16301 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16304 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16308 SmallVector<int, 8> RepeatedMask;
16309 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16310 // As this is a single-input shuffle, the repeated mask should be
16311 // a strictly valid v8i16 mask that we can pass through to the v8i16
16312 // lowering to handle even the v16 case.
16313 return lowerV8I16GeneralSingleInputShuffle(
16314 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16318 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16319 Zeroable, Subtarget, DAG))
16322 // AVX512BWVL can lower to VPERMW.
16323 if (Subtarget.hasBWI() && Subtarget.hasVLX())
16324 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16326 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16328 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16329 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16332 // Try to permute the lanes and then use a per-lane permute.
16333 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16334 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16337 // Otherwise fall back on generic lowering.
16338 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16342 /// Handle lowering of 32-lane 8-bit integer shuffles.
16344 /// This routine is only called when we have AVX2 and thus a reasonable
16345 /// instruction set for v32i8 shuffling..
16346 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16347 const APInt &Zeroable, SDValue V1, SDValue V2,
16348 const X86Subtarget &Subtarget,
16349 SelectionDAG &DAG) {
16350 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16351 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16352 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16353 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16355 // Whenever we can lower this as a zext, that instruction is strictly faster
16356 // than any alternative. It also allows us to fold memory operands into the
16357 // shuffle in many cases.
16358 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16359 Zeroable, Subtarget, DAG))
16362 // Check for being able to broadcast a single element.
16363 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16367 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16368 Zeroable, Subtarget, DAG))
16371 // Use dedicated unpack instructions for masks that match their pattern.
16372 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16375 // Use dedicated pack instructions for masks that match their pattern.
16376 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16380 // Try to use shift instructions.
16381 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16382 Zeroable, Subtarget, DAG))
16385 // Try to use byte rotation instructions.
16386 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16390 // Try to create an in-lane repeating shuffle mask and then shuffle the
16391 // results into the target lanes.
16392 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16393 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16396 // There are no generalized cross-lane shuffle operations available on i8
16398 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16399 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16400 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16403 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16407 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16408 Zeroable, Subtarget, DAG))
16411 // AVX512VBMIVL can lower to VPERMB.
16412 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16413 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16415 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16417 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16418 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16421 // Try to permute the lanes and then use a per-lane permute.
16422 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16423 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16426 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16427 // by zeroable elements in the remaining 24 elements. Turn this into two
16428 // vmovqb instructions shuffled together.
16429 if (Subtarget.hasVLX())
16430 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16431 Mask, Zeroable, DAG))
16434 // Otherwise fall back on generic lowering.
16435 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16439 /// High-level routine to lower various 256-bit x86 vector shuffles.
16441 /// This routine either breaks down the specific type of a 256-bit x86 vector
16442 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16443 /// together based on the available instructions.
16444 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16445 SDValue V1, SDValue V2, const APInt &Zeroable,
16446 const X86Subtarget &Subtarget,
16447 SelectionDAG &DAG) {
16448 // If we have a single input to the zero element, insert that into V1 if we
16449 // can do so cheaply.
16450 int NumElts = VT.getVectorNumElements();
16451 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16453 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16454 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16455 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16458 // Handle special cases where the lower or upper half is UNDEF.
16460 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16463 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16464 // can check for those subtargets here and avoid much of the subtarget
16465 // querying in the per-vector-type lowering routines. With AVX1 we have
16466 // essentially *zero* ability to manipulate a 256-bit vector with integer
16467 // types. Since we'll use floating point types there eventually, just
16468 // immediately cast everything to a float and operate entirely in that domain.
16469 if (VT.isInteger() && !Subtarget.hasAVX2()) {
16470 int ElementBits = VT.getScalarSizeInBits();
16471 if (ElementBits < 32) {
16472 // No floating point type available, if we can't use the bit operations
16473 // for masking/blending then decompose into 128-bit vectors.
16474 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16477 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16479 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16482 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16483 VT.getVectorNumElements());
16484 V1 = DAG.getBitcast(FpVT, V1);
16485 V2 = DAG.getBitcast(FpVT, V2);
16486 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16489 switch (VT.SimpleTy) {
16491 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16493 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16495 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16497 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16499 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16501 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16504 llvm_unreachable("Not a valid 256-bit x86 vector type!");
16508 /// Try to lower a vector shuffle as a 128-bit shuffles.
16509 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16510 const APInt &Zeroable, SDValue V1, SDValue V2,
16511 const X86Subtarget &Subtarget,
16512 SelectionDAG &DAG) {
16513 assert(VT.getScalarSizeInBits() == 64 &&
16514 "Unexpected element type size for 128bit shuffle.");
16516 // To handle 256 bit vector requires VLX and most probably
16517 // function lowerV2X128VectorShuffle() is better solution.
16518 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16520 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16521 SmallVector<int, 4> WidenedMask;
16522 if (!canWidenShuffleElements(Mask, WidenedMask))
16525 // Try to use an insert into a zero vector.
16526 if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16527 (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16528 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16529 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16530 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16531 DAG.getIntPtrConstant(0, DL));
16532 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16533 getZeroVector(VT, Subtarget, DAG, DL), LoV,
16534 DAG.getIntPtrConstant(0, DL));
16537 // Check for patterns which can be matched with a single insert of a 256-bit
16539 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
16540 {0, 1, 2, 3, 0, 1, 2, 3});
16541 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
16542 {0, 1, 2, 3, 8, 9, 10, 11})) {
16543 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16544 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
16545 OnlyUsesV1 ? V1 : V2,
16546 DAG.getIntPtrConstant(0, DL));
16547 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16548 DAG.getIntPtrConstant(4, DL));
16551 assert(WidenedMask.size() == 4);
16553 // See if this is an insertion of the lower 128-bits of V2 into V1.
16554 bool IsInsert = true;
16556 for (int i = 0; i < 4; ++i) {
16557 assert(WidenedMask[i] >= -1);
16558 if (WidenedMask[i] < 0)
16561 // Make sure all V1 subvectors are in place.
16562 if (WidenedMask[i] < 4) {
16563 if (WidenedMask[i] != i) {
16568 // Make sure we only have a single V2 index and its the lowest 128-bits.
16569 if (V2Index >= 0 || WidenedMask[i] != 4) {
16576 if (IsInsert && V2Index >= 0) {
16577 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16578 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16579 DAG.getIntPtrConstant(0, DL));
16580 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16583 // Try to lower to vshuf64x2/vshuf32x4.
16584 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16585 unsigned PermMask = 0;
16586 // Insure elements came from the same Op.
16587 for (int i = 0; i < 4; ++i) {
16588 assert(WidenedMask[i] >= -1);
16589 if (WidenedMask[i] < 0)
16592 SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
16593 unsigned OpIndex = i / 2;
16594 if (Ops[OpIndex].isUndef())
16596 else if (Ops[OpIndex] != Op)
16599 // Convert the 128-bit shuffle mask selection values into 128-bit selection
16600 // bits defined by a vshuf64x2 instruction's immediate control byte.
16601 PermMask |= (WidenedMask[i] % 4) << (i * 2);
16604 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16605 DAG.getTargetConstant(PermMask, DL, MVT::i8));
16608 /// Handle lowering of 8-lane 64-bit floating point shuffles.
16609 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16610 const APInt &Zeroable, SDValue V1, SDValue V2,
16611 const X86Subtarget &Subtarget,
16612 SelectionDAG &DAG) {
16613 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16614 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16615 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16617 if (V2.isUndef()) {
16618 // Use low duplicate instructions for masks that match their pattern.
16619 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
16620 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16622 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16623 // Non-half-crossing single input shuffles can be lowered with an
16624 // interleaved permutation.
16625 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16626 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16627 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16628 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16629 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16630 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16633 SmallVector<int, 4> RepeatedMask;
16634 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16635 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16636 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16639 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16640 V2, Subtarget, DAG))
16643 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16646 // Check if the blend happens to exactly fit that of SHUFPD.
16647 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16648 Zeroable, Subtarget, DAG))
16651 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16655 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16656 Zeroable, Subtarget, DAG))
16659 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16662 /// Handle lowering of 16-lane 32-bit floating point shuffles.
16663 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16664 const APInt &Zeroable, SDValue V1, SDValue V2,
16665 const X86Subtarget &Subtarget,
16666 SelectionDAG &DAG) {
16667 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16668 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16669 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16671 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16672 // options to efficiently lower the shuffle.
16673 SmallVector<int, 4> RepeatedMask;
16674 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16675 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16677 // Use even/odd duplicate instructions for masks that match their pattern.
16678 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16679 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16680 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16681 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16684 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16685 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16687 // Use dedicated unpack instructions for masks that match their pattern.
16688 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16691 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16692 Zeroable, Subtarget, DAG))
16695 // Otherwise, fall back to a SHUFPS sequence.
16696 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16699 // If we have a single input shuffle with different shuffle patterns in the
16700 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16701 if (V2.isUndef() &&
16702 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16703 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16704 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16707 // If we have AVX512F support, we can use VEXPAND.
16708 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16709 V1, V2, DAG, Subtarget))
16712 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16715 /// Handle lowering of 8-lane 64-bit integer shuffles.
16716 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16717 const APInt &Zeroable, SDValue V1, SDValue V2,
16718 const X86Subtarget &Subtarget,
16719 SelectionDAG &DAG) {
16720 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16721 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16722 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16724 if (V2.isUndef()) {
16725 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16726 // can use lower latency instructions that will operate on all four
16728 SmallVector<int, 2> Repeated128Mask;
16729 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16730 SmallVector<int, 4> PSHUFDMask;
16731 scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16732 return DAG.getBitcast(
16734 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16735 DAG.getBitcast(MVT::v16i32, V1),
16736 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16739 SmallVector<int, 4> Repeated256Mask;
16740 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16741 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16742 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16745 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16746 V2, Subtarget, DAG))
16749 // Try to use shift instructions.
16750 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16751 Zeroable, Subtarget, DAG))
16754 // Try to use VALIGN.
16755 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16759 // Try to use PALIGNR.
16760 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16764 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16766 // If we have AVX512F support, we can use VEXPAND.
16767 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16771 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16772 Zeroable, Subtarget, DAG))
16775 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16778 /// Handle lowering of 16-lane 32-bit integer shuffles.
16779 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16780 const APInt &Zeroable, SDValue V1, SDValue V2,
16781 const X86Subtarget &Subtarget,
16782 SelectionDAG &DAG) {
16783 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16784 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16785 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16787 // Whenever we can lower this as a zext, that instruction is strictly faster
16788 // than any alternative. It also allows us to fold memory operands into the
16789 // shuffle in many cases.
16790 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16791 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16794 // If the shuffle mask is repeated in each 128-bit lane we can use more
16795 // efficient instructions that mirror the shuffles across the four 128-bit
16797 SmallVector<int, 4> RepeatedMask;
16798 bool Is128BitLaneRepeatedShuffle =
16799 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16800 if (Is128BitLaneRepeatedShuffle) {
16801 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16803 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16804 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16806 // Use dedicated unpack instructions for masks that match their pattern.
16807 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16811 // Try to use shift instructions.
16812 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16813 Zeroable, Subtarget, DAG))
16816 // Try to use VALIGN.
16817 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16821 // Try to use byte rotation instructions.
16822 if (Subtarget.hasBWI())
16823 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16827 // Assume that a single SHUFPS is faster than using a permv shuffle.
16828 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16829 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16830 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16831 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16832 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16833 CastV1, CastV2, DAG);
16834 return DAG.getBitcast(MVT::v16i32, ShufPS);
16836 // If we have AVX512F support, we can use VEXPAND.
16837 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16841 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16842 Zeroable, Subtarget, DAG))
16844 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16847 /// Handle lowering of 32-lane 16-bit integer shuffles.
16848 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16849 const APInt &Zeroable, SDValue V1, SDValue V2,
16850 const X86Subtarget &Subtarget,
16851 SelectionDAG &DAG) {
16852 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16853 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16854 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16855 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16857 // Whenever we can lower this as a zext, that instruction is strictly faster
16858 // than any alternative. It also allows us to fold memory operands into the
16859 // shuffle in many cases.
16860 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16861 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16864 // Use dedicated unpack instructions for masks that match their pattern.
16865 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16868 // Try to use shift instructions.
16869 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16870 Zeroable, Subtarget, DAG))
16873 // Try to use byte rotation instructions.
16874 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16878 if (V2.isUndef()) {
16879 SmallVector<int, 8> RepeatedMask;
16880 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16881 // As this is a single-input shuffle, the repeated mask should be
16882 // a strictly valid v8i16 mask that we can pass through to the v8i16
16883 // lowering to handle even the v32 case.
16884 return lowerV8I16GeneralSingleInputShuffle(
16885 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16889 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16890 Zeroable, Subtarget, DAG))
16893 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16894 Zeroable, Subtarget, DAG))
16897 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16900 /// Handle lowering of 64-lane 8-bit integer shuffles.
16901 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16902 const APInt &Zeroable, SDValue V1, SDValue V2,
16903 const X86Subtarget &Subtarget,
16904 SelectionDAG &DAG) {
16905 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16906 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16907 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16908 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16910 // Whenever we can lower this as a zext, that instruction is strictly faster
16911 // than any alternative. It also allows us to fold memory operands into the
16912 // shuffle in many cases.
16913 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16914 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16917 // Use dedicated unpack instructions for masks that match their pattern.
16918 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16921 // Use dedicated pack instructions for masks that match their pattern.
16922 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16926 // Try to use shift instructions.
16927 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16928 Zeroable, Subtarget, DAG))
16931 // Try to use byte rotation instructions.
16932 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16936 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16937 Zeroable, Subtarget, DAG))
16940 // VBMI can use VPERMV/VPERMV3 byte shuffles.
16941 if (Subtarget.hasVBMI())
16942 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16944 // Try to create an in-lane repeating shuffle mask and then shuffle the
16945 // results into the target lanes.
16946 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16947 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16950 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16951 Zeroable, Subtarget, DAG))
16954 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16957 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16958 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16961 // FIXME: Implement direct support for this type!
16962 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16965 /// High-level routine to lower various 512-bit x86 vector shuffles.
16967 /// This routine either breaks down the specific type of a 512-bit x86 vector
16968 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
16969 /// together based on the available instructions.
16970 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16971 MVT VT, SDValue V1, SDValue V2,
16972 const APInt &Zeroable,
16973 const X86Subtarget &Subtarget,
16974 SelectionDAG &DAG) {
16975 assert(Subtarget.hasAVX512() &&
16976 "Cannot lower 512-bit vectors w/ basic ISA!");
16978 // If we have a single input to the zero element, insert that into V1 if we
16979 // can do so cheaply.
16980 int NumElts = Mask.size();
16981 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16983 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16984 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16985 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16988 // Handle special cases where the lower or upper half is UNDEF.
16990 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16993 // Check for being able to broadcast a single element.
16994 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16998 // Dispatch to each element type for lowering. If we don't have support for
16999 // specific element type shuffles at 512 bits, immediately split them and
17000 // lower them. Each lowering routine of a given type is allowed to assume that
17001 // the requisite ISA extensions for that element type are available.
17002 switch (VT.SimpleTy) {
17004 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17006 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17008 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17010 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17012 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17014 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17017 llvm_unreachable("Not a valid 512-bit x86 vector type!");
17021 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17022 MVT VT, SDValue V1, SDValue V2,
17023 const X86Subtarget &Subtarget,
17024 SelectionDAG &DAG) {
17025 // Shuffle should be unary.
17030 int NumElts = Mask.size();
17031 for (int i = 0; i != NumElts; ++i) {
17033 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17034 "Unexpected mask index.");
17038 // The first non-undef element determines our shift amount.
17039 if (ShiftAmt < 0) {
17041 // Need to be shifting right.
17045 // All non-undef elements must shift by the same amount.
17046 if (ShiftAmt != M - i)
17049 assert(ShiftAmt >= 0 && "All undef?");
17051 // Great we found a shift right.
17053 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17054 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17055 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17056 DAG.getUNDEF(WideVT), V1,
17057 DAG.getIntPtrConstant(0, DL));
17058 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
17059 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17060 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17061 DAG.getIntPtrConstant(0, DL));
17064 // Determine if this shuffle can be implemented with a KSHIFT instruction.
17065 // Returns the shift amount if possible or -1 if not. This is a simplified
17066 // version of matchShuffleAsShift.
17067 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17068 int MaskOffset, const APInt &Zeroable) {
17069 int Size = Mask.size();
17071 auto CheckZeros = [&](int Shift, bool Left) {
17072 for (int j = 0; j < Shift; ++j)
17073 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17079 auto MatchShift = [&](int Shift, bool Left) {
17080 unsigned Pos = Left ? Shift : 0;
17081 unsigned Low = Left ? 0 : Shift;
17082 unsigned Len = Size - Shift;
17083 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17086 for (int Shift = 1; Shift != Size; ++Shift)
17087 for (bool Left : {true, false})
17088 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17089 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17097 // Lower vXi1 vector shuffles.
17098 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
17099 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
17100 // vector, shuffle and then truncate it back.
17101 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17102 MVT VT, SDValue V1, SDValue V2,
17103 const APInt &Zeroable,
17104 const X86Subtarget &Subtarget,
17105 SelectionDAG &DAG) {
17106 assert(Subtarget.hasAVX512() &&
17107 "Cannot lower 512-bit vectors w/o basic ISA!");
17109 int NumElts = Mask.size();
17111 // Try to recognize shuffles that are just padding a subvector with zeros.
17112 int SubvecElts = 0;
17114 for (int i = 0; i != NumElts; ++i) {
17115 if (Mask[i] >= 0) {
17116 // Grab the source from the first valid mask. All subsequent elements need
17117 // to use this same source.
17119 Src = Mask[i] / NumElts;
17120 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17126 assert(SubvecElts != NumElts && "Identity shuffle?");
17128 // Clip to a power 2.
17129 SubvecElts = PowerOf2Floor(SubvecElts);
17131 // Make sure the number of zeroable bits in the top at least covers the bits
17132 // not covered by the subvector.
17133 if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
17134 assert(Src >= 0 && "Expected a source!");
17135 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17136 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17137 Src == 0 ? V1 : V2,
17138 DAG.getIntPtrConstant(0, DL));
17139 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17140 DAG.getConstant(0, DL, VT),
17141 Extract, DAG.getIntPtrConstant(0, DL));
17144 // Try a simple shift right with undef elements. Later we'll try with zeros.
17145 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17149 // Try to match KSHIFTs.
17150 unsigned Offset = 0;
17151 for (SDValue V : { V1, V2 }) {
17153 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17154 if (ShiftAmt >= 0) {
17156 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17157 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17158 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17159 DAG.getUNDEF(WideVT), V,
17160 DAG.getIntPtrConstant(0, DL));
17161 // Widened right shifts need two shifts to ensure we shift in zeroes.
17162 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17163 int WideElts = WideVT.getVectorNumElements();
17164 // Shift left to put the original vector in the MSBs of the new size.
17165 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17166 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17167 // Increase the shift amount to account for the left shift.
17168 ShiftAmt += WideElts - NumElts;
17171 Res = DAG.getNode(Opcode, DL, WideVT, Res,
17172 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17173 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17174 DAG.getIntPtrConstant(0, DL));
17176 Offset += NumElts; // Increment for next iteration.
17182 switch (VT.SimpleTy) {
17184 llvm_unreachable("Expected a vector of i1 elements");
17186 ExtVT = MVT::v2i64;
17189 ExtVT = MVT::v4i32;
17192 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17194 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17197 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17198 // 256-bit operation available.
17199 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17202 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17203 // 256-bit operation available.
17204 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17205 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17208 // Fall back to scalarization. FIXME: We can do better if the shuffle
17209 // can be partitioned cleanly.
17210 if (!Subtarget.useBWIRegs())
17212 ExtVT = MVT::v64i8;
17216 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17217 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17219 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17220 // i1 was sign extended we can use X86ISD::CVT2MASK.
17221 int NumElems = VT.getVectorNumElements();
17222 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17223 (Subtarget.hasDQI() && (NumElems < 32)))
17224 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17225 Shuffle, ISD::SETGT);
17227 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17230 /// Helper function that returns true if the shuffle mask should be
17231 /// commuted to improve canonicalization.
17232 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17233 int NumElements = Mask.size();
17235 int NumV1Elements = 0, NumV2Elements = 0;
17239 else if (M < NumElements)
17244 // Commute the shuffle as needed such that more elements come from V1 than
17245 // V2. This allows us to match the shuffle pattern strictly on how many
17246 // elements come from V1 without handling the symmetric cases.
17247 if (NumV2Elements > NumV1Elements)
17250 assert(NumV1Elements > 0 && "No V1 indices");
17252 if (NumV2Elements == 0)
17255 // When the number of V1 and V2 elements are the same, try to minimize the
17256 // number of uses of V2 in the low half of the vector. When that is tied,
17257 // ensure that the sum of indices for V1 is equal to or lower than the sum
17258 // indices for V2. When those are equal, try to ensure that the number of odd
17259 // indices for V1 is lower than the number of odd indices for V2.
17260 if (NumV1Elements == NumV2Elements) {
17261 int LowV1Elements = 0, LowV2Elements = 0;
17262 for (int M : Mask.slice(0, NumElements / 2))
17263 if (M >= NumElements)
17267 if (LowV2Elements > LowV1Elements)
17269 if (LowV2Elements == LowV1Elements) {
17270 int SumV1Indices = 0, SumV2Indices = 0;
17271 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17272 if (Mask[i] >= NumElements)
17274 else if (Mask[i] >= 0)
17276 if (SumV2Indices < SumV1Indices)
17278 if (SumV2Indices == SumV1Indices) {
17279 int NumV1OddIndices = 0, NumV2OddIndices = 0;
17280 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17281 if (Mask[i] >= NumElements)
17282 NumV2OddIndices += i % 2;
17283 else if (Mask[i] >= 0)
17284 NumV1OddIndices += i % 2;
17285 if (NumV2OddIndices < NumV1OddIndices)
17294 /// Top-level lowering for x86 vector shuffles.
17296 /// This handles decomposition, canonicalization, and lowering of all x86
17297 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17298 /// above in helper routines. The canonicalization attempts to widen shuffles
17299 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17300 /// s.t. only one of the two inputs needs to be tested, etc.
17301 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17302 SelectionDAG &DAG) {
17303 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17304 ArrayRef<int> OrigMask = SVOp->getMask();
17305 SDValue V1 = Op.getOperand(0);
17306 SDValue V2 = Op.getOperand(1);
17307 MVT VT = Op.getSimpleValueType();
17308 int NumElements = VT.getVectorNumElements();
17310 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17312 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17313 "Can't lower MMX shuffles");
17315 bool V1IsUndef = V1.isUndef();
17316 bool V2IsUndef = V2.isUndef();
17317 if (V1IsUndef && V2IsUndef)
17318 return DAG.getUNDEF(VT);
17320 // When we create a shuffle node we put the UNDEF node to second operand,
17321 // but in some cases the first operand may be transformed to UNDEF.
17322 // In this case we should just commute the node.
17324 return DAG.getCommutedVectorShuffle(*SVOp);
17326 // Check for non-undef masks pointing at an undef vector and make the masks
17327 // undef as well. This makes it easier to match the shuffle based solely on
17330 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17331 SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17332 for (int &M : NewMask)
17333 if (M >= NumElements)
17335 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17338 // Check for illegal shuffle mask element index values.
17339 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17340 (void)MaskUpperLimit;
17341 assert(llvm::all_of(OrigMask,
17342 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17343 "Out of bounds shuffle index");
17345 // We actually see shuffles that are entirely re-arrangements of a set of
17346 // zero inputs. This mostly happens while decomposing complex shuffles into
17347 // simple ones. Directly lower these as a buildvector of zeros.
17348 APInt KnownUndef, KnownZero;
17349 computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17351 APInt Zeroable = KnownUndef | KnownZero;
17352 if (Zeroable.isAllOnesValue())
17353 return getZeroVector(VT, Subtarget, DAG, DL);
17355 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17357 // Try to collapse shuffles into using a vector type with fewer elements but
17358 // wider element types. We cap this to not form integers or floating point
17359 // elements wider than 64 bits, but it might be interesting to form i128
17360 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17361 SmallVector<int, 16> WidenedMask;
17362 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17363 canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17364 // Shuffle mask widening should not interfere with a broadcast opportunity
17365 // by obfuscating the operands with bitcasts.
17366 // TODO: Avoid lowering directly from this top-level function: make this
17367 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17368 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17372 MVT NewEltVT = VT.isFloatingPoint()
17373 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17374 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17375 int NewNumElts = NumElements / 2;
17376 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17377 // Make sure that the new vector type is legal. For example, v2f64 isn't
17379 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17381 // Modify the new Mask to take all zeros from the all-zero vector.
17382 // Choose indices that are blend-friendly.
17383 bool UsedZeroVector = false;
17384 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
17385 "V2's non-undef elements are used?!");
17386 for (int i = 0; i != NewNumElts; ++i)
17387 if (WidenedMask[i] == SM_SentinelZero) {
17388 WidenedMask[i] = i + NewNumElts;
17389 UsedZeroVector = true;
17391 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17392 // some elements to be undef.
17393 if (UsedZeroVector)
17394 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17396 V1 = DAG.getBitcast(NewVT, V1);
17397 V2 = DAG.getBitcast(NewVT, V2);
17398 return DAG.getBitcast(
17399 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17403 // Commute the shuffle if it will improve canonicalization.
17404 SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17405 if (canonicalizeShuffleMaskWithCommute(Mask)) {
17406 ShuffleVectorSDNode::commuteMask(Mask);
17410 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17413 // For each vector width, delegate to a specialized lowering routine.
17414 if (VT.is128BitVector())
17415 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17417 if (VT.is256BitVector())
17418 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17420 if (VT.is512BitVector())
17421 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17424 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17426 llvm_unreachable("Unimplemented!");
17429 /// Try to lower a VSELECT instruction to a vector shuffle.
17430 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17431 const X86Subtarget &Subtarget,
17432 SelectionDAG &DAG) {
17433 SDValue Cond = Op.getOperand(0);
17434 SDValue LHS = Op.getOperand(1);
17435 SDValue RHS = Op.getOperand(2);
17436 MVT VT = Op.getSimpleValueType();
17438 // Only non-legal VSELECTs reach this lowering, convert those into generic
17439 // shuffles and re-use the shuffle lowering path for blends.
17440 SmallVector<int, 32> Mask;
17441 if (createShuffleMaskFromVSELECT(Mask, Cond))
17442 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17447 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17448 SDValue Cond = Op.getOperand(0);
17449 SDValue LHS = Op.getOperand(1);
17450 SDValue RHS = Op.getOperand(2);
17452 // A vselect where all conditions and data are constants can be optimized into
17453 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17454 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17455 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17456 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17459 // Try to lower this to a blend-style vector shuffle. This can handle all
17460 // constant condition cases.
17461 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17464 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17465 // with patterns on the mask registers on AVX-512.
17466 MVT CondVT = Cond.getSimpleValueType();
17467 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17468 if (CondEltSize == 1)
17471 // Variable blends are only legal from SSE4.1 onward.
17472 if (!Subtarget.hasSSE41())
17476 MVT VT = Op.getSimpleValueType();
17477 unsigned EltSize = VT.getScalarSizeInBits();
17478 unsigned NumElts = VT.getVectorNumElements();
17480 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17481 // into an i1 condition so that we can use the mask-based 512-bit blend
17483 if (VT.getSizeInBits() == 512) {
17484 // Build a mask by testing the condition against zero.
17485 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17486 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17487 DAG.getConstant(0, dl, CondVT),
17489 // Now return a new VSELECT using the mask.
17490 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17493 // SEXT/TRUNC cases where the mask doesn't match the destination size.
17494 if (CondEltSize != EltSize) {
17495 // If we don't have a sign splat, rely on the expansion.
17496 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17499 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17500 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17501 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17502 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17505 // Only some types will be legal on some subtargets. If we can emit a legal
17506 // VSELECT-matching blend, return Op, and but if we need to expand, return
17508 switch (VT.SimpleTy) {
17510 // Most of the vector types have blends past SSE4.1.
17514 // The byte blends for AVX vectors were introduced only in AVX2.
17515 if (Subtarget.hasAVX2())
17521 case MVT::v16i16: {
17522 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17523 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17524 Cond = DAG.getBitcast(CastVT, Cond);
17525 LHS = DAG.getBitcast(CastVT, LHS);
17526 RHS = DAG.getBitcast(CastVT, RHS);
17527 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17528 return DAG.getBitcast(VT, Select);
17533 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17534 MVT VT = Op.getSimpleValueType();
17537 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
17540 if (VT.getSizeInBits() == 8) {
17541 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
17542 Op.getOperand(0), Op.getOperand(1));
17543 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17546 if (VT == MVT::f32) {
17547 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17548 // the result back to FR32 register. It's only worth matching if the
17549 // result has a single use which is a store or a bitcast to i32. And in
17550 // the case of a store, it's not worth it if the index is a constant 0,
17551 // because a MOVSSmr can be used instead, which is smaller and faster.
17552 if (!Op.hasOneUse())
17554 SDNode *User = *Op.getNode()->use_begin();
17555 if ((User->getOpcode() != ISD::STORE ||
17556 isNullConstant(Op.getOperand(1))) &&
17557 (User->getOpcode() != ISD::BITCAST ||
17558 User->getValueType(0) != MVT::i32))
17560 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17561 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
17563 return DAG.getBitcast(MVT::f32, Extract);
17566 if (VT == MVT::i32 || VT == MVT::i64) {
17567 // ExtractPS/pextrq works with constant index.
17568 if (isa<ConstantSDNode>(Op.getOperand(1)))
17575 /// Extract one bit from mask vector, like v16i1 or v8i1.
17576 /// AVX-512 feature.
17577 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17578 const X86Subtarget &Subtarget) {
17579 SDValue Vec = Op.getOperand(0);
17581 MVT VecVT = Vec.getSimpleValueType();
17582 SDValue Idx = Op.getOperand(1);
17583 MVT EltVT = Op.getSimpleValueType();
17585 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17586 "Unexpected vector type in ExtractBitFromMaskVector");
17588 // variable index can't be handled in mask registers,
17589 // extend vector to VR512/128
17590 if (!isa<ConstantSDNode>(Idx)) {
17591 unsigned NumElts = VecVT.getVectorNumElements();
17592 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17593 // than extending to 128/256bit.
17594 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17595 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17596 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17597 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17598 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17601 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17602 if (IdxVal == 0) // the operation is legal
17605 // Extend to natively supported kshift.
17606 unsigned NumElems = VecVT.getVectorNumElements();
17607 MVT WideVecVT = VecVT;
17608 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17609 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17610 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17611 DAG.getUNDEF(WideVecVT), Vec,
17612 DAG.getIntPtrConstant(0, dl));
17615 // Use kshiftr instruction to move to the lower element.
17616 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17617 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17619 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17620 DAG.getIntPtrConstant(0, dl));
17624 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17625 SelectionDAG &DAG) const {
17627 SDValue Vec = Op.getOperand(0);
17628 MVT VecVT = Vec.getSimpleValueType();
17629 SDValue Idx = Op.getOperand(1);
17631 if (VecVT.getVectorElementType() == MVT::i1)
17632 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17634 if (!isa<ConstantSDNode>(Idx)) {
17635 // Its more profitable to go through memory (1 cycles throughput)
17636 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
17637 // IACA tool was used to get performance estimation
17638 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17640 // example : extractelement <16 x i8> %a, i32 %i
17642 // Block Throughput: 3.00 Cycles
17643 // Throughput Bottleneck: Port5
17645 // | Num Of | Ports pressure in cycles | |
17646 // | Uops | 0 - DV | 5 | 6 | 7 | |
17647 // ---------------------------------------------
17648 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
17649 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
17650 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
17651 // Total Num Of Uops: 4
17654 // Block Throughput: 1.00 Cycles
17655 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17657 // | | Ports pressure in cycles | |
17658 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
17659 // ---------------------------------------------------------
17660 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17661 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
17662 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
17663 // Total Num Of Uops: 4
17668 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17670 // If this is a 256-bit vector result, first extract the 128-bit vector and
17671 // then extract the element from the 128-bit vector.
17672 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17673 // Get the 128-bit vector.
17674 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17675 MVT EltVT = VecVT.getVectorElementType();
17677 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17678 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17680 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17681 // this can be done with a mask.
17682 IdxVal &= ElemsPerChunk - 1;
17683 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17684 DAG.getIntPtrConstant(IdxVal, dl));
17687 assert(VecVT.is128BitVector() && "Unexpected vector length");
17689 MVT VT = Op.getSimpleValueType();
17691 if (VT.getSizeInBits() == 16) {
17692 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17693 // we're going to zero extend the register or fold the store (SSE41 only).
17694 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
17695 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
17696 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17697 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17698 DAG.getBitcast(MVT::v4i32, Vec), Idx));
17700 // Transform it so it match pextrw which produces a 32-bit result.
17701 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
17702 Op.getOperand(0), Op.getOperand(1));
17703 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17706 if (Subtarget.hasSSE41())
17707 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17710 // TODO: We only extract a single element from v16i8, we can probably afford
17711 // to be more aggressive here before using the default approach of spilling to
17713 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
17714 // Extract either the lowest i32 or any i16, and extract the sub-byte.
17715 int DWordIdx = IdxVal / 4;
17716 if (DWordIdx == 0) {
17717 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17718 DAG.getBitcast(MVT::v4i32, Vec),
17719 DAG.getIntPtrConstant(DWordIdx, dl));
17720 int ShiftVal = (IdxVal % 4) * 8;
17722 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17723 DAG.getConstant(ShiftVal, dl, MVT::i8));
17724 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17727 int WordIdx = IdxVal / 2;
17728 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17729 DAG.getBitcast(MVT::v8i16, Vec),
17730 DAG.getIntPtrConstant(WordIdx, dl));
17731 int ShiftVal = (IdxVal % 2) * 8;
17733 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17734 DAG.getConstant(ShiftVal, dl, MVT::i8));
17735 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17738 if (VT.getSizeInBits() == 32) {
17742 // SHUFPS the element to the lowest double word, then movss.
17743 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17744 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17745 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17746 DAG.getIntPtrConstant(0, dl));
17749 if (VT.getSizeInBits() == 64) {
17750 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17751 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17752 // to match extract_elt for f64.
17756 // UNPCKHPD the element to the lowest double word, then movsd.
17757 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17758 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17759 int Mask[2] = { 1, -1 };
17760 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17761 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17762 DAG.getIntPtrConstant(0, dl));
17768 /// Insert one bit to mask vector, like v16i1 or v8i1.
17769 /// AVX-512 feature.
17770 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17771 const X86Subtarget &Subtarget) {
17773 SDValue Vec = Op.getOperand(0);
17774 SDValue Elt = Op.getOperand(1);
17775 SDValue Idx = Op.getOperand(2);
17776 MVT VecVT = Vec.getSimpleValueType();
17778 if (!isa<ConstantSDNode>(Idx)) {
17779 // Non constant index. Extend source and destination,
17780 // insert element and then truncate the result.
17781 unsigned NumElts = VecVT.getVectorNumElements();
17782 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17783 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17784 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17785 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17786 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17787 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17790 // Copy into a k-register, extract to v1i1 and insert_subvector.
17791 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17793 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17797 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17798 SelectionDAG &DAG) const {
17799 MVT VT = Op.getSimpleValueType();
17800 MVT EltVT = VT.getVectorElementType();
17801 unsigned NumElts = VT.getVectorNumElements();
17803 if (EltVT == MVT::i1)
17804 return InsertBitToMaskVector(Op, DAG, Subtarget);
17807 SDValue N0 = Op.getOperand(0);
17808 SDValue N1 = Op.getOperand(1);
17809 SDValue N2 = Op.getOperand(2);
17811 auto *N2C = dyn_cast<ConstantSDNode>(N2);
17812 if (!N2C || N2C->getAPIntValue().uge(NumElts))
17814 uint64_t IdxVal = N2C->getZExtValue();
17816 bool IsZeroElt = X86::isZeroNode(N1);
17817 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17819 // If we are inserting a element, see if we can do this more efficiently with
17820 // a blend shuffle with a rematerializable vector than a costly integer
17822 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17823 16 <= EltVT.getSizeInBits()) {
17824 SmallVector<int, 8> BlendMask;
17825 for (unsigned i = 0; i != NumElts; ++i)
17826 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17827 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17828 : getOnesVector(VT, DAG, dl);
17829 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17832 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17833 // into that, and then insert the subvector back into the result.
17834 if (VT.is256BitVector() || VT.is512BitVector()) {
17835 // With a 256-bit vector, we can insert into the zero element efficiently
17836 // using a blend if we have AVX or AVX2 and the right data type.
17837 if (VT.is256BitVector() && IdxVal == 0) {
17838 // TODO: It is worthwhile to cast integer to floating point and back
17839 // and incur a domain crossing penalty if that's what we'll end up
17840 // doing anyway after extracting to a 128-bit vector.
17841 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17842 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17843 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17844 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
17845 DAG.getTargetConstant(1, dl, MVT::i8));
17849 // Get the desired 128-bit vector chunk.
17850 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17852 // Insert the element into the desired chunk.
17853 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17854 assert(isPowerOf2_32(NumEltsIn128));
17855 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17856 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17858 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17859 DAG.getIntPtrConstant(IdxIn128, dl));
17861 // Insert the changed part back into the bigger vector
17862 return insert128BitVector(N0, V, IdxVal, DAG, dl);
17864 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
17866 // This will be just movd/movq/movss/movsd.
17867 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17868 (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17869 EltVT == MVT::i64)) {
17870 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17871 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17874 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17875 // argument. SSE41 required for pinsrb.
17876 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17878 if (VT == MVT::v8i16) {
17879 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
17880 Opc = X86ISD::PINSRW;
17882 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
17883 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
17884 Opc = X86ISD::PINSRB;
17887 if (N1.getValueType() != MVT::i32)
17888 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17889 if (N2.getValueType() != MVT::i32)
17890 N2 = DAG.getIntPtrConstant(IdxVal, dl);
17891 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17894 if (Subtarget.hasSSE41()) {
17895 if (EltVT == MVT::f32) {
17896 // Bits [7:6] of the constant are the source select. This will always be
17897 // zero here. The DAG Combiner may combine an extract_elt index into
17898 // these bits. For example (insert (extract, 3), 2) could be matched by
17899 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17900 // Bits [5:4] of the constant are the destination select. This is the
17901 // value of the incoming immediate.
17902 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17903 // combine either bitwise AND or insert of float 0.0 to set these bits.
17905 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17906 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17907 // If this is an insertion of 32-bits into the low 32-bits of
17908 // a vector, we prefer to generate a blend with immediate rather
17909 // than an insertps. Blends are simpler operations in hardware and so
17910 // will always have equal or better performance than insertps.
17911 // But if optimizing for size and there's a load folding opportunity,
17912 // generate insertps because blendps does not have a 32-bit memory
17914 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17915 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
17916 DAG.getTargetConstant(1, dl, MVT::i8));
17918 // Create this as a scalar to vector..
17919 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17920 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
17921 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
17924 // PINSR* works with constant index.
17925 if (EltVT == MVT::i32 || EltVT == MVT::i64)
17932 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17933 SelectionDAG &DAG) {
17935 MVT OpVT = Op.getSimpleValueType();
17937 // It's always cheaper to replace a xor+movd with xorps and simplifies further
17939 if (X86::isZeroNode(Op.getOperand(0)))
17940 return getZeroVector(OpVT, Subtarget, DAG, dl);
17942 // If this is a 256-bit vector result, first insert into a 128-bit
17943 // vector and then insert into the 256-bit vector.
17944 if (!OpVT.is128BitVector()) {
17945 // Insert into a 128-bit vector.
17946 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17947 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17948 OpVT.getVectorNumElements() / SizeFactor);
17950 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17952 // Insert the 128-bit vector.
17953 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17955 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
17956 "Expected an SSE type!");
17958 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17959 if (OpVT == MVT::v4i32)
17962 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17963 return DAG.getBitcast(
17964 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17967 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
17968 // simple superregister reference or explicit instructions to insert
17969 // the upper bits of a vector.
17970 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17971 SelectionDAG &DAG) {
17972 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
17974 return insert1BitVector(Op, DAG, Subtarget);
17977 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17978 SelectionDAG &DAG) {
17979 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
17980 "Only vXi1 extract_subvectors need custom lowering");
17983 SDValue Vec = Op.getOperand(0);
17984 SDValue Idx = Op.getOperand(1);
17986 if (!isa<ConstantSDNode>(Idx))
17989 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17990 if (IdxVal == 0) // the operation is legal
17993 MVT VecVT = Vec.getSimpleValueType();
17994 unsigned NumElems = VecVT.getVectorNumElements();
17996 // Extend to natively supported kshift.
17997 MVT WideVecVT = VecVT;
17998 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17999 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18000 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18001 DAG.getUNDEF(WideVecVT), Vec,
18002 DAG.getIntPtrConstant(0, dl));
18005 // Shift to the LSB.
18006 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18007 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18009 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18010 DAG.getIntPtrConstant(0, dl));
18013 // Returns the appropriate wrapper opcode for a global reference.
18014 unsigned X86TargetLowering::getGlobalWrapperKind(
18015 const GlobalValue *GV, const unsigned char OpFlags) const {
18016 // References to absolute symbols are never PC-relative.
18017 if (GV && GV->isAbsoluteSymbolRef())
18018 return X86ISD::Wrapper;
18020 CodeModel::Model M = getTargetMachine().getCodeModel();
18021 if (Subtarget.isPICStyleRIPRel() &&
18022 (M == CodeModel::Small || M == CodeModel::Kernel))
18023 return X86ISD::WrapperRIP;
18025 // GOTPCREL references must always use RIP.
18026 if (OpFlags == X86II::MO_GOTPCREL)
18027 return X86ISD::WrapperRIP;
18029 return X86ISD::Wrapper;
18032 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18033 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18034 // one of the above mentioned nodes. It has to be wrapped because otherwise
18035 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18036 // be used to form addressing mode. These wrapped nodes will be selected
18039 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18040 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18042 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18043 // global base reg.
18044 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18046 auto PtrVT = getPointerTy(DAG.getDataLayout());
18047 SDValue Result = DAG.getTargetConstantPool(
18048 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
18050 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18051 // With PIC, the address is actually $g + Offset.
18054 DAG.getNode(ISD::ADD, DL, PtrVT,
18055 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18061 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18062 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18064 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18065 // global base reg.
18066 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18068 auto PtrVT = getPointerTy(DAG.getDataLayout());
18069 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18071 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18073 // With PIC, the address is actually $g + Offset.
18076 DAG.getNode(ISD::ADD, DL, PtrVT,
18077 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18082 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18083 SelectionDAG &DAG) const {
18084 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18088 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18089 // Create the TargetBlockAddressAddress node.
18090 unsigned char OpFlags =
18091 Subtarget.classifyBlockAddressReference();
18092 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18093 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18095 auto PtrVT = getPointerTy(DAG.getDataLayout());
18096 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18097 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
18099 // With PIC, the address is actually $g + Offset.
18100 if (isGlobalRelativeToPICBase(OpFlags)) {
18101 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18102 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18108 /// Creates target global address or external symbol nodes for calls or
18110 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18111 bool ForCall) const {
18112 // Unpack the global address or external symbol.
18113 const SDLoc &dl = SDLoc(Op);
18114 const GlobalValue *GV = nullptr;
18115 int64_t Offset = 0;
18116 const char *ExternalSym = nullptr;
18117 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18118 GV = G->getGlobal();
18119 Offset = G->getOffset();
18121 const auto *ES = cast<ExternalSymbolSDNode>(Op);
18122 ExternalSym = ES->getSymbol();
18125 // Calculate some flags for address lowering.
18126 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18127 unsigned char OpFlags;
18129 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18131 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18132 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18133 bool NeedsLoad = isGlobalStubReference(OpFlags);
18135 CodeModel::Model M = DAG.getTarget().getCodeModel();
18136 auto PtrVT = getPointerTy(DAG.getDataLayout());
18140 // Create a target global address if this is a global. If possible, fold the
18141 // offset into the global address reference. Otherwise, ADD it on later.
18142 int64_t GlobalOffset = 0;
18143 if (OpFlags == X86II::MO_NO_FLAG &&
18144 X86::isOffsetSuitableForCodeModel(Offset, M)) {
18145 std::swap(GlobalOffset, Offset);
18147 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18149 // If this is not a global address, this must be an external symbol.
18150 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18153 // If this is a direct call, avoid the wrapper if we don't need to do any
18154 // loads or adds. This allows SDAG ISel to match direct calls.
18155 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18158 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18160 // With PIC, the address is actually $g + Offset.
18162 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18163 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18166 // For globals that require a load from a stub to get the address, emit the
18169 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18170 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18172 // If there was a non-zero offset that we didn't fold, create an explicit
18173 // addition for it.
18175 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18176 DAG.getConstant(Offset, dl, PtrVT));
18182 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18183 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18187 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18188 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
18189 unsigned char OperandFlags, bool LocalDynamic = false) {
18190 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18191 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18193 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18194 GA->getValueType(0),
18198 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18202 SDValue Ops[] = { Chain, TGA, *InFlag };
18203 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18205 SDValue Ops[] = { Chain, TGA };
18206 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18209 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18210 MFI.setAdjustsStack(true);
18211 MFI.setHasCalls(true);
18213 SDValue Flag = Chain.getValue(1);
18214 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
18217 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18219 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18222 SDLoc dl(GA); // ? function entry point might be better
18223 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18224 DAG.getNode(X86ISD::GlobalBaseReg,
18225 SDLoc(), PtrVT), InFlag);
18226 InFlag = Chain.getValue(1);
18228 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
18231 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
18233 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18235 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18236 X86::RAX, X86II::MO_TLSGD);
18239 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18245 // Get the start address of the TLS block for this module.
18246 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18247 .getInfo<X86MachineFunctionInfo>();
18248 MFI->incNumLocalDynamicTLSAccesses();
18252 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
18253 X86II::MO_TLSLD, /*LocalDynamic=*/true);
18256 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18257 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
18258 InFlag = Chain.getValue(1);
18259 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
18260 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18263 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18267 unsigned char OperandFlags = X86II::MO_DTPOFF;
18268 unsigned WrapperKind = X86ISD::Wrapper;
18269 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18270 GA->getValueType(0),
18271 GA->getOffset(), OperandFlags);
18272 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18274 // Add x@dtpoff with the base.
18275 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18278 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18279 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18280 const EVT PtrVT, TLSModel::Model model,
18281 bool is64Bit, bool isPIC) {
18284 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18285 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18286 is64Bit ? 257 : 256));
18288 SDValue ThreadPointer =
18289 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18290 MachinePointerInfo(Ptr));
18292 unsigned char OperandFlags = 0;
18293 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
18295 unsigned WrapperKind = X86ISD::Wrapper;
18296 if (model == TLSModel::LocalExec) {
18297 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18298 } else if (model == TLSModel::InitialExec) {
18300 OperandFlags = X86II::MO_GOTTPOFF;
18301 WrapperKind = X86ISD::WrapperRIP;
18303 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18306 llvm_unreachable("Unexpected model");
18309 // emit "addl x@ntpoff,%eax" (local exec)
18310 // or "addl x@indntpoff,%eax" (initial exec)
18311 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18313 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18314 GA->getOffset(), OperandFlags);
18315 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18317 if (model == TLSModel::InitialExec) {
18318 if (isPIC && !is64Bit) {
18319 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18320 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18324 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18325 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18328 // The address of the thread local variable is the add of the thread
18329 // pointer with the offset of the variable.
18330 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18334 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18336 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18338 if (DAG.getTarget().useEmulatedTLS())
18339 return LowerToTLSEmulatedModel(GA, DAG);
18341 const GlobalValue *GV = GA->getGlobal();
18342 auto PtrVT = getPointerTy(DAG.getDataLayout());
18343 bool PositionIndependent = isPositionIndependent();
18345 if (Subtarget.isTargetELF()) {
18346 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18348 case TLSModel::GeneralDynamic:
18349 if (Subtarget.is64Bit())
18350 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18351 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18352 case TLSModel::LocalDynamic:
18353 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18354 Subtarget.is64Bit());
18355 case TLSModel::InitialExec:
18356 case TLSModel::LocalExec:
18357 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18358 PositionIndependent);
18360 llvm_unreachable("Unknown TLS model.");
18363 if (Subtarget.isTargetDarwin()) {
18364 // Darwin only has one model of TLS. Lower to that.
18365 unsigned char OpFlag = 0;
18366 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18367 X86ISD::WrapperRIP : X86ISD::Wrapper;
18369 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18370 // global base reg.
18371 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18373 OpFlag = X86II::MO_TLVP_PIC_BASE;
18375 OpFlag = X86II::MO_TLVP;
18377 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18378 GA->getValueType(0),
18379 GA->getOffset(), OpFlag);
18380 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18382 // With PIC32, the address is actually $g + Offset.
18384 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18385 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18388 // Lowering the machine isd will make sure everything is in the right
18390 SDValue Chain = DAG.getEntryNode();
18391 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18392 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18393 SDValue Args[] = { Chain, Offset };
18394 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18395 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18396 DAG.getIntPtrConstant(0, DL, true),
18397 Chain.getValue(1), DL);
18399 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18400 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18401 MFI.setAdjustsStack(true);
18403 // And our return value (tls address) is in the standard call return value
18405 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18406 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18409 if (Subtarget.isOSWindows()) {
18410 // Just use the implicit TLS architecture
18411 // Need to generate something similar to:
18412 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18414 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
18415 // mov rcx, qword [rdx+rcx*8]
18416 // mov eax, .tls$:tlsvar
18417 // [rax+rcx] contains the address
18418 // Windows 64bit: gs:0x58
18419 // Windows 32bit: fs:__tls_array
18422 SDValue Chain = DAG.getEntryNode();
18424 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18425 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18426 // use its literal value of 0x2C.
18427 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18428 ? Type::getInt8PtrTy(*DAG.getContext(),
18430 : Type::getInt32PtrTy(*DAG.getContext(),
18433 SDValue TlsArray = Subtarget.is64Bit()
18434 ? DAG.getIntPtrConstant(0x58, dl)
18435 : (Subtarget.isTargetWindowsGNU()
18436 ? DAG.getIntPtrConstant(0x2C, dl)
18437 : DAG.getExternalSymbol("_tls_array", PtrVT));
18439 SDValue ThreadPointer =
18440 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18443 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18444 res = ThreadPointer;
18446 // Load the _tls_index variable
18447 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18448 if (Subtarget.is64Bit())
18449 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18450 MachinePointerInfo(), MVT::i32);
18452 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18454 auto &DL = DAG.getDataLayout();
18456 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18457 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18459 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18462 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18464 // Get the offset of start of .tls section
18465 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18466 GA->getValueType(0),
18467 GA->getOffset(), X86II::MO_SECREL);
18468 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18470 // The address of the thread local variable is the add of the thread
18471 // pointer with the offset of the variable.
18472 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18475 llvm_unreachable("TLS not implemented for this target.");
18478 /// Lower SRA_PARTS and friends, which return two i32 values
18479 /// and take a 2 x i32 value to shift plus a shift amount.
18480 /// TODO: Can this be moved to general expansion code?
18481 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18482 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
18483 MVT VT = Op.getSimpleValueType();
18484 unsigned VTBits = VT.getSizeInBits();
18486 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
18487 SDValue ShOpLo = Op.getOperand(0);
18488 SDValue ShOpHi = Op.getOperand(1);
18489 SDValue ShAmt = Op.getOperand(2);
18490 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
18491 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
18493 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18494 DAG.getConstant(VTBits - 1, dl, MVT::i8));
18495 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
18496 DAG.getConstant(VTBits - 1, dl, MVT::i8))
18497 : DAG.getConstant(0, dl, VT);
18499 SDValue Tmp2, Tmp3;
18500 if (Op.getOpcode() == ISD::SHL_PARTS) {
18501 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
18502 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
18504 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
18505 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
18508 // If the shift amount is larger or equal than the width of a part we can't
18509 // rely on the results of shld/shrd. Insert a test and select the appropriate
18510 // values for large shift amounts.
18511 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18512 DAG.getConstant(VTBits, dl, MVT::i8));
18513 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
18514 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
18517 if (Op.getOpcode() == ISD::SHL_PARTS) {
18518 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18519 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18521 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18522 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18525 return DAG.getMergeValues({ Lo, Hi }, dl);
18528 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
18529 SelectionDAG &DAG) {
18530 MVT VT = Op.getSimpleValueType();
18531 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
18532 "Unexpected funnel shift opcode!");
18535 SDValue Op0 = Op.getOperand(0);
18536 SDValue Op1 = Op.getOperand(1);
18537 SDValue Amt = Op.getOperand(2);
18539 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
18541 if (VT.isVector()) {
18542 assert(Subtarget.hasVBMI2() && "Expected VBMI2");
18545 std::swap(Op0, Op1);
18547 APInt APIntShiftAmt;
18548 if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
18549 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
18550 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
18551 Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
18554 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
18558 assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
18559 "Unexpected funnel shift type!");
18561 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
18562 bool OptForSize = DAG.shouldOptForSize();
18563 if (!OptForSize && Subtarget.isSHLDSlow())
18567 std::swap(Op0, Op1);
18569 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
18570 if (VT == MVT::i16)
18571 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
18572 DAG.getConstant(15, DL, Amt.getValueType()));
18574 unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
18575 return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
18578 // Try to use a packed vector operation to handle i64 on 32-bit targets when
18579 // AVX512DQ is enabled.
18580 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18581 const X86Subtarget &Subtarget) {
18582 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18583 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18584 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18585 Op.getOpcode() == ISD::UINT_TO_FP) &&
18586 "Unexpected opcode!");
18587 bool IsStrict = Op->isStrictFPOpcode();
18588 unsigned OpNo = IsStrict ? 1 : 0;
18589 SDValue Src = Op.getOperand(OpNo);
18590 MVT SrcVT = Src.getSimpleValueType();
18591 MVT VT = Op.getSimpleValueType();
18593 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18594 (VT != MVT::f32 && VT != MVT::f64))
18597 // Pack the i64 into a vector, do the operation and extract.
18599 // Using 256-bit to ensure result is 128-bits for f32 case.
18600 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18601 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18602 MVT VecVT = MVT::getVectorVT(VT, NumElts);
18605 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18607 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
18608 {Op.getOperand(0), InVec});
18609 SDValue Chain = CvtVec.getValue(1);
18610 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18611 DAG.getIntPtrConstant(0, dl));
18612 return DAG.getMergeValues({Value, Chain}, dl);
18615 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18617 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18618 DAG.getIntPtrConstant(0, dl));
18621 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18622 const X86Subtarget &Subtarget) {
18624 case ISD::SINT_TO_FP:
18625 // TODO: Handle wider types with AVX/AVX512.
18626 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18628 // CVTDQ2PS or (V)CVTDQ2PD
18629 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18631 case ISD::UINT_TO_FP:
18632 // TODO: Handle wider types and i64 elements.
18633 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18635 // VCVTUDQ2PS or VCVTUDQ2PD
18636 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18643 /// Given a scalar cast operation that is extracted from a vector, try to
18644 /// vectorize the cast op followed by extraction. This will avoid an expensive
18645 /// round-trip between XMM and GPR.
18646 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18647 const X86Subtarget &Subtarget) {
18648 // TODO: This could be enhanced to handle smaller integer types by peeking
18649 // through an extend.
18650 SDValue Extract = Cast.getOperand(0);
18651 MVT DestVT = Cast.getSimpleValueType();
18652 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18653 !isa<ConstantSDNode>(Extract.getOperand(1)))
18656 // See if we have a 128-bit vector cast op for this type of cast.
18657 SDValue VecOp = Extract.getOperand(0);
18658 MVT FromVT = VecOp.getSimpleValueType();
18659 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18660 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18661 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18662 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18665 // If we are extracting from a non-zero element, first shuffle the source
18666 // vector to allow extracting from element zero.
18668 if (!isNullConstant(Extract.getOperand(1))) {
18669 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18670 Mask[0] = Extract.getConstantOperandVal(1);
18671 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18673 // If the source vector is wider than 128-bits, extract the low part. Do not
18674 // create an unnecessarily wide vector cast op.
18675 if (FromVT != Vec128VT)
18676 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18678 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18679 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18680 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18681 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18682 DAG.getIntPtrConstant(0, DL));
18685 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
18686 const X86Subtarget &Subtarget) {
18688 bool IsStrict = Op->isStrictFPOpcode();
18689 MVT VT = Op->getSimpleValueType(0);
18690 SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
18692 if (Subtarget.hasDQI()) {
18693 assert(!Subtarget.hasVLX() && "Unexpected features");
18695 assert((Src.getSimpleValueType() == MVT::v2i64 ||
18696 Src.getSimpleValueType() == MVT::v4i64) &&
18697 "Unsupported custom type");
18699 // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
18700 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
18702 MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
18704 // Need to concat with zero vector for strict fp to avoid spurious
18706 SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
18707 : DAG.getUNDEF(MVT::v8i64);
18708 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
18709 DAG.getIntPtrConstant(0, DL));
18710 SDValue Res, Chain;
18712 Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
18713 {Op->getOperand(0), Src});
18714 Chain = Res.getValue(1);
18716 Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
18719 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
18720 DAG.getIntPtrConstant(0, DL));
18723 return DAG.getMergeValues({Res, Chain}, DL);
18727 bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
18728 Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
18729 if (VT != MVT::v4f32 || IsSigned)
18732 SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
18733 SDValue One = DAG.getConstant(1, DL, MVT::v4i64);
18734 SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
18735 DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
18736 DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
18737 SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
18738 SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
18739 SmallVector<SDValue, 4> SignCvts(4);
18740 SmallVector<SDValue, 4> Chains(4);
18741 for (int i = 0; i != 4; ++i) {
18742 SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
18743 DAG.getIntPtrConstant(i, DL));
18746 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
18747 {Op.getOperand(0), Src});
18748 Chains[i] = SignCvts[i].getValue(1);
18750 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Src);
18753 SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
18755 SDValue Slow, Chain;
18757 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
18758 Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
18759 {Chain, SignCvt, SignCvt});
18760 Chain = Slow.getValue(1);
18762 Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
18765 IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
18766 SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
18769 return DAG.getMergeValues({Cvt, Chain}, DL);
18774 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
18775 SelectionDAG &DAG) const {
18776 bool IsStrict = Op->isStrictFPOpcode();
18777 unsigned OpNo = IsStrict ? 1 : 0;
18778 SDValue Src = Op.getOperand(OpNo);
18779 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
18780 MVT SrcVT = Src.getSimpleValueType();
18781 MVT VT = Op.getSimpleValueType();
18784 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18787 if (SrcVT.isVector()) {
18788 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
18789 // Note: Since v2f64 is a legal type. We don't need to zero extend the
18790 // source for strict FP.
18792 return DAG.getNode(
18793 X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
18794 {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18795 DAG.getUNDEF(SrcVT))});
18796 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
18797 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18798 DAG.getUNDEF(SrcVT)));
18800 if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
18801 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
18806 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
18807 "Unknown SINT_TO_FP to lower!");
18809 bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
18811 // These are really Legal; return the operand so the caller accepts it as
18813 if (SrcVT == MVT::i32 && UseSSEReg)
18815 if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
18818 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18821 // SSE doesn't have an i16 conversion so we need to promote.
18822 if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
18823 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
18825 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
18828 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
18831 if (VT == MVT::f128)
18832 return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
18834 SDValue ValueToStore = Src;
18835 if (SrcVT == MVT::i64 && UseSSEReg && !Subtarget.is64Bit())
18836 // Bitcasting to f64 here allows us to do a single 64-bit store from
18837 // an SSE register, avoiding the store forwarding penalty that would come
18838 // with two 32-bit stores.
18839 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18841 unsigned Size = SrcVT.getSizeInBits()/8;
18842 MachineFunction &MF = DAG.getMachineFunction();
18843 auto PtrVT = getPointerTy(MF.getDataLayout());
18844 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
18845 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18846 Chain = DAG.getStore(
18847 Chain, dl, ValueToStore, StackSlot,
18848 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18849 std::pair<SDValue, SDValue> Tmp = BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18852 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
18857 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18859 SelectionDAG &DAG) const {
18863 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18865 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18867 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18869 unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18871 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18872 MachineMemOperand *LoadMMO;
18874 int SSFI = FI->getIndex();
18875 LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18876 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18877 MachineMemOperand::MOLoad, ByteSize, ByteSize);
18879 LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18880 StackSlot = StackSlot.getOperand(1);
18882 SDValue FILDOps[] = {Chain, StackSlot};
18884 DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18885 Tys, FILDOps, SrcVT, LoadMMO);
18886 Chain = Result.getValue(1);
18889 SDValue InFlag = Result.getValue(2);
18891 // FIXME: Currently the FST is glued to the FILD_FLAG. This
18892 // shouldn't be necessary except that RFP cannot be live across
18893 // multiple blocks. When stackifier is fixed, they can be uncoupled.
18894 MachineFunction &MF = DAG.getMachineFunction();
18895 unsigned SSFISize = Op.getValueSizeInBits() / 8;
18896 int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18897 auto PtrVT = getPointerTy(MF.getDataLayout());
18898 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18899 Tys = DAG.getVTList(MVT::Other);
18900 SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18901 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18902 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18903 MachineMemOperand::MOStore, SSFISize, SSFISize);
18905 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18906 Op.getValueType(), StoreMMO);
18907 Result = DAG.getLoad(
18908 Op.getValueType(), DL, Chain, StackSlot,
18909 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18910 Chain = Result.getValue(1);
18913 return { Result, Chain };
18916 /// Horizontal vector math instructions may be slower than normal math with
18917 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
18918 /// implementation, and likely shuffle complexity of the alternate sequence.
18919 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
18920 const X86Subtarget &Subtarget) {
18921 bool IsOptimizingSize = DAG.shouldOptForSize();
18922 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
18923 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
18926 /// 64-bit unsigned integer to double expansion.
18927 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18928 const X86Subtarget &Subtarget) {
18929 // This algorithm is not obvious. Here it is what we're trying to output:
18932 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18933 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18935 haddpd %xmm0, %xmm0
18937 pshufd $0x4e, %xmm0, %xmm1
18942 bool IsStrict = Op->isStrictFPOpcode();
18943 unsigned OpNo = IsStrict ? 1 : 0;
18945 LLVMContext *Context = DAG.getContext();
18947 // Build some magic constants.
18948 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18949 Constant *C0 = ConstantDataVector::get(*Context, CV0);
18950 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18951 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18953 SmallVector<Constant*,2> CV1;
18955 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18956 APInt(64, 0x4330000000000000ULL))));
18958 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18959 APInt(64, 0x4530000000000000ULL))));
18960 Constant *C1 = ConstantVector::get(CV1);
18961 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18963 // Load the 64-bit value into an XMM register.
18965 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(OpNo));
18967 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18968 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18969 /* Alignment = */ 16);
18971 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18974 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18975 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18976 /* Alignment = */ 16);
18977 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18980 // TODO: Are there any fast-math-flags to propagate here?
18982 Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
18983 {Op.getOperand(0), XR2F, CLod1});
18984 Chain = Sub.getValue(1);
18986 Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18989 if (!IsStrict && Subtarget.hasSSE3() &&
18990 shouldUseHorizontalOp(true, DAG, Subtarget)) {
18991 // FIXME: Do we need a STRICT version of FHADD?
18992 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18994 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18996 Result = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v2f64, MVT::Other},
18997 {Chain, Shuffle, Sub});
18998 Chain = Result.getValue(1);
19000 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19002 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19003 DAG.getIntPtrConstant(0, dl));
19005 return DAG.getMergeValues({Result, Chain}, dl);
19010 /// 32-bit unsigned integer to float expansion.
19011 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19012 const X86Subtarget &Subtarget) {
19013 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19015 // FP constant to bias correct the final result.
19016 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
19019 // Load the 32-bit value into an XMM register.
19021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19023 // Zero out the upper parts of the register.
19024 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19026 // Or the load with the bias.
19027 SDValue Or = DAG.getNode(
19028 ISD::OR, dl, MVT::v2i64,
19029 DAG.getBitcast(MVT::v2i64, Load),
19030 DAG.getBitcast(MVT::v2i64,
19031 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19033 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19034 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19036 if (Op.getNode()->isStrictFPOpcode()) {
19037 // Subtract the bias.
19038 // TODO: Are there any fast-math-flags to propagate here?
19039 SDValue Chain = Op.getOperand(0);
19040 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19041 {Chain, Or, Bias});
19043 if (Op.getValueType() == Sub.getValueType())
19046 // Handle final rounding.
19047 std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19048 Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19050 return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19053 // Subtract the bias.
19054 // TODO: Are there any fast-math-flags to propagate here?
19055 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19057 // Handle final rounding.
19058 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19061 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19062 const X86Subtarget &Subtarget,
19064 if (Op.getSimpleValueType() != MVT::v2f64)
19067 bool IsStrict = Op->isStrictFPOpcode();
19069 SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19070 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19072 if (Subtarget.hasAVX512()) {
19073 if (!Subtarget.hasVLX()) {
19074 // Let generic type legalization widen this.
19077 // Otherwise pad the integer input with 0s and widen the operation.
19078 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19079 DAG.getConstant(0, DL, MVT::v2i32));
19080 SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19081 {Op.getOperand(0), N0});
19082 SDValue Chain = Res.getValue(1);
19083 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19084 DAG.getIntPtrConstant(0, DL));
19085 return DAG.getMergeValues({Res, Chain}, DL);
19088 // Legalize to v4i32 type.
19089 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19090 DAG.getUNDEF(MVT::v2i32));
19092 return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19093 {Op.getOperand(0), N0});
19094 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19097 // Zero extend to 2i64, OR with the floating point representation of 2^52.
19098 // This gives us the floating point equivalent of 2^52 + the i32 integer
19099 // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19100 // point leaving just our i32 integers in double format.
19101 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19103 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
19104 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19105 DAG.getBitcast(MVT::v2i64, VBias));
19106 Or = DAG.getBitcast(MVT::v2f64, Or);
19109 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19110 {Op.getOperand(0), Or, VBias});
19111 return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19114 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19115 const X86Subtarget &Subtarget) {
19117 bool IsStrict = Op->isStrictFPOpcode();
19118 SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19119 MVT VecIntVT = V.getSimpleValueType();
19120 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19121 "Unsupported custom type");
19123 if (Subtarget.hasAVX512()) {
19124 // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19125 assert(!Subtarget.hasVLX() && "Unexpected features");
19126 MVT VT = Op->getSimpleValueType(0);
19128 // v8i32->v8f64 is legal with AVX512 so just return it.
19129 if (VT == MVT::v8f64)
19132 assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19134 MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19135 MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19136 // Need to concat with zero vector for strict fp to avoid spurious
19139 IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19140 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19141 DAG.getIntPtrConstant(0, DL));
19142 SDValue Res, Chain;
19144 Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19145 {Op->getOperand(0), V});
19146 Chain = Res.getValue(1);
19148 Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19151 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19152 DAG.getIntPtrConstant(0, DL));
19155 return DAG.getMergeValues({Res, Chain}, DL);
19159 if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19160 Op->getSimpleValueType(0) == MVT::v4f64) {
19161 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19162 Constant *Bias = ConstantFP::get(
19164 APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19165 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19166 SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, /*Alignment*/ 8);
19167 SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19168 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19169 SDValue VBias = DAG.getMemIntrinsicNode(
19170 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19171 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19172 /*Alignment*/ 8, MachineMemOperand::MOLoad);
19174 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19175 DAG.getBitcast(MVT::v4i64, VBias));
19176 Or = DAG.getBitcast(MVT::v4f64, Or);
19179 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19180 {Op.getOperand(0), Or, VBias});
19181 return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19184 // The algorithm is the following:
19185 // #ifdef __SSE4_1__
19186 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19187 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19188 // (uint4) 0x53000000, 0xaa);
19190 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19191 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19193 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19194 // return (float4) lo + fhi;
19196 bool Is128 = VecIntVT == MVT::v4i32;
19197 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19198 // If we convert to something else than the supported type, e.g., to v4f64,
19200 if (VecFloatVT != Op->getSimpleValueType(0))
19203 // In the #idef/#else code, we have in common:
19204 // - The vector of constants:
19210 // Create the splat vector for 0x4b000000.
19211 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19212 // Create the splat vector for 0x53000000.
19213 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19215 // Create the right shift.
19216 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19217 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19220 if (Subtarget.hasSSE41()) {
19221 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19222 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19223 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19224 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19225 // Low will be bitcasted right away, so do not bother bitcasting back to its
19227 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19228 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19229 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19230 // (uint4) 0x53000000, 0xaa);
19231 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19232 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19233 // High will be bitcasted right away, so do not bother bitcasting back to
19234 // its original type.
19235 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19236 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19238 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19239 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19240 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19241 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19243 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19244 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19247 // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19248 SDValue VecCstFSub = DAG.getConstantFP(
19249 APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19251 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19252 // NOTE: By using fsub of a positive constant instead of fadd of a negative
19253 // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19254 // enabled. See PR24512.
19255 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19256 // TODO: Are there any fast-math-flags to propagate here?
19258 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19259 // return (float4) lo + fhi;
19261 SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19262 {Op.getOperand(0), HighBitcast, VecCstFSub});
19263 return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19264 {FHigh.getValue(1), LowBitcast, FHigh});
19268 DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19269 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19272 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19273 const X86Subtarget &Subtarget) {
19274 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19275 SDValue N0 = Op.getOperand(OpNo);
19276 MVT SrcVT = N0.getSimpleValueType();
19279 switch (SrcVT.SimpleTy) {
19281 llvm_unreachable("Custom UINT_TO_FP is not supported!");
19283 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19286 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19289 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19293 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19294 SelectionDAG &DAG) const {
19295 bool IsStrict = Op->isStrictFPOpcode();
19296 unsigned OpNo = IsStrict ? 1 : 0;
19297 SDValue Src = Op.getOperand(OpNo);
19299 auto PtrVT = getPointerTy(DAG.getDataLayout());
19300 MVT SrcVT = Src.getSimpleValueType();
19301 MVT DstVT = Op->getSimpleValueType(0);
19302 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19304 if (DstVT == MVT::f128)
19305 return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
19307 if (DstVT.isVector())
19308 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19310 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19313 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19314 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19315 // Conversions from unsigned i32 to f32/f64 are legal,
19316 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
19320 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19321 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19322 Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19324 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19326 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19329 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19332 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
19333 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19334 if (SrcVT == MVT::i32 && X86ScalarSSEf64 && DstVT != MVT::f80)
19335 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19336 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
19339 // Make a 64-bit buffer, and use it to build an FILD.
19340 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
19341 if (SrcVT == MVT::i32) {
19342 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
19344 DAG.getStore(Chain, dl, Src, StackSlot, MachinePointerInfo());
19345 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19346 OffsetSlot, MachinePointerInfo());
19347 std::pair<SDValue, SDValue> Tmp =
19348 BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
19350 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19355 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19356 SDValue ValueToStore = Src;
19357 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19358 // Bitcasting to f64 here allows us to do a single 64-bit store from
19359 // an SSE register, avoiding the store forwarding penalty that would come
19360 // with two 32-bit stores.
19361 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19364 DAG.getStore(Chain, dl, ValueToStore, StackSlot, MachinePointerInfo());
19365 // For i64 source, we need to add the appropriate power of 2 if the input
19366 // was negative. This is the same as the optimization in
19367 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
19368 // we must be careful to do the computation in x87 extended precision, not
19369 // in SSE. (The generic code can't know it's OK to do this, or how to.)
19370 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19371 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
19372 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19373 MachineMemOperand::MOLoad, 8, 8);
19375 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19376 SDValue Ops[] = { Store, StackSlot };
19377 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
19379 Chain = Fild.getValue(1);
19382 // Check whether the sign bit is set.
19383 SDValue SignSet = DAG.getSetCC(
19384 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
19385 Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
19387 // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
19388 APInt FF(64, 0x5F80000000000000ULL);
19389 SDValue FudgePtr = DAG.getConstantPool(
19390 ConstantInt::get(*DAG.getContext(), FF), PtrVT);
19392 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
19393 SDValue Zero = DAG.getIntPtrConstant(0, dl);
19394 SDValue Four = DAG.getIntPtrConstant(4, dl);
19395 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
19396 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
19398 // Load the value out, extending it from f32 to f80.
19399 SDValue Fudge = DAG.getExtLoad(
19400 ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
19401 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
19402 /* Alignment = */ 4);
19403 Chain = Fudge.getValue(1);
19404 // Extend everything to 80 bits to force it to be done on x87.
19405 // TODO: Are there any fast-math-flags to propagate here?
19407 SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
19408 {Chain, Fild, Fudge});
19409 // STRICT_FP_ROUND can't handle equal types.
19410 if (DstVT == MVT::f80)
19412 return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
19413 {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
19415 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
19416 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
19417 DAG.getIntPtrConstant(0, dl));
19420 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
19421 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
19422 // just return an SDValue().
19423 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
19424 // to i16, i32 or i64, and we lower it to a legal sequence and return the
19427 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
19428 bool IsSigned, SDValue &Chain) const {
19429 bool IsStrict = Op->isStrictFPOpcode();
19432 EVT DstTy = Op.getValueType();
19433 SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
19434 EVT TheVT = Value.getValueType();
19435 auto PtrVT = getPointerTy(DAG.getDataLayout());
19437 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
19438 // f16 must be promoted before using the lowering in this routine.
19439 // fp128 does not use this lowering.
19443 // If using FIST to compute an unsigned i64, we'll need some fixup
19444 // to handle values above the maximum signed i64. A FIST is always
19445 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
19446 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
19448 // FIXME: This does not generate an invalid exception if the input does not
19449 // fit in i32. PR44019
19450 if (!IsSigned && DstTy != MVT::i64) {
19451 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
19452 // The low 32 bits of the fist result will have the correct uint32 result.
19453 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
19457 assert(DstTy.getSimpleVT() <= MVT::i64 &&
19458 DstTy.getSimpleVT() >= MVT::i16 &&
19459 "Unknown FP_TO_INT to lower!");
19461 // We lower FP->int64 into FISTP64 followed by a load from a temporary
19463 MachineFunction &MF = DAG.getMachineFunction();
19464 unsigned MemSize = DstTy.getStoreSize();
19465 int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
19466 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19468 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19470 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
19472 if (UnsignedFixup) {
19474 // Conversion to unsigned i64 is implemented with a select,
19475 // depending on whether the source value fits in the range
19476 // of a signed i64. Let Thresh be the FP equivalent of
19477 // 0x8000000000000000ULL.
19479 // Adjust = (Value < Thresh) ? 0 : 0x80000000;
19480 // FltOfs = (Value < Thresh) ? 0 : 0x80000000;
19481 // FistSrc = (Value - FltOfs);
19482 // Fist-to-mem64 FistSrc
19483 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
19484 // to XOR'ing the high 32 bits with Adjust.
19486 // Being a power of 2, Thresh is exactly representable in all FP formats.
19487 // For X87 we'd like to use the smallest FP type for this constant, but
19488 // for DAG type consistency we have to match the FP operand type.
19490 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
19491 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
19492 bool LosesInfo = false;
19493 if (TheVT == MVT::f64)
19494 // The rounding mode is irrelevant as the conversion should be exact.
19495 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
19497 else if (TheVT == MVT::f80)
19498 Status = Thresh.convert(APFloat::x87DoubleExtended(),
19499 APFloat::rmNearestTiesToEven, &LosesInfo);
19501 assert(Status == APFloat::opOK && !LosesInfo &&
19502 "FP conversion should have been exact");
19504 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
19506 EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
19507 *DAG.getContext(), TheVT);
19510 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT,
19511 Chain, /*IsSignaling*/ true);
19512 Chain = Cmp.getValue(1);
19514 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT);
19517 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
19518 DAG.getConstant(0, DL, MVT::i64),
19519 DAG.getConstant(APInt::getSignMask(64),
19521 SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp,
19522 DAG.getConstantFP(0.0, DL, TheVT),
19526 Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
19527 { Chain, Value, FltOfs });
19528 Chain = Value.getValue(1);
19530 Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
19533 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
19535 // FIXME This causes a redundant load/store if the SSE-class value is already
19536 // in memory, such as if it is on the callstack.
19537 if (isScalarFPTypeInSSEReg(TheVT)) {
19538 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
19539 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19540 SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
19541 SDValue Ops[] = { Chain, StackSlot };
19543 unsigned FLDSize = TheVT.getStoreSize();
19544 assert(FLDSize <= MemSize && "Stack slot not big enough");
19545 MachineMemOperand *MMO = MF.getMachineMemOperand(
19546 MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
19547 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19548 Chain = Value.getValue(1);
19551 // Build the FP_TO_INT*_IN_MEM
19552 MachineMemOperand *MMO = MF.getMachineMemOperand(
19553 MPI, MachineMemOperand::MOStore, MemSize, MemSize);
19554 SDValue Ops[] = { Chain, Value, StackSlot };
19555 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19556 DAG.getVTList(MVT::Other),
19559 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19560 Chain = Res.getValue(1);
19562 // If we need an unsigned fixup, XOR the result with adjust.
19564 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19569 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19570 const X86Subtarget &Subtarget) {
19571 MVT VT = Op.getSimpleValueType();
19572 SDValue In = Op.getOperand(0);
19573 MVT InVT = In.getSimpleValueType();
19575 unsigned Opc = Op.getOpcode();
19577 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19578 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19579 "Unexpected extension opcode");
19580 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19581 "Expected same number of elements");
19582 assert((VT.getVectorElementType() == MVT::i16 ||
19583 VT.getVectorElementType() == MVT::i32 ||
19584 VT.getVectorElementType() == MVT::i64) &&
19585 "Unexpected element type");
19586 assert((InVT.getVectorElementType() == MVT::i8 ||
19587 InVT.getVectorElementType() == MVT::i16 ||
19588 InVT.getVectorElementType() == MVT::i32) &&
19589 "Unexpected element type");
19591 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
19593 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
19594 if (InVT == MVT::v8i8) {
19595 if (VT != MVT::v8i64)
19598 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
19599 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
19600 return DAG.getNode(ExtendInVecOpc, dl, VT, In);
19603 if (Subtarget.hasInt256())
19606 // Optimize vectors in AVX mode:
19609 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
19610 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
19611 // Concat upper and lower parts.
19614 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
19615 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
19616 // Concat upper and lower parts.
19618 MVT HalfVT = VT.getHalfNumVectorElementsVT();
19619 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
19621 // Short-circuit if we can determine that each 128-bit half is the same value.
19622 // Otherwise, this is difficult to match and optimize.
19623 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
19624 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
19625 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
19627 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
19628 SDValue Undef = DAG.getUNDEF(InVT);
19629 bool NeedZero = Opc == ISD::ZERO_EXTEND;
19630 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
19631 OpHi = DAG.getBitcast(HalfVT, OpHi);
19633 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
19636 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
19637 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
19638 const SDLoc &dl, SelectionDAG &DAG) {
19639 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
19640 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19641 DAG.getIntPtrConstant(0, dl));
19642 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19643 DAG.getIntPtrConstant(8, dl));
19644 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
19645 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
19646 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
19647 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19650 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
19651 const X86Subtarget &Subtarget,
19652 SelectionDAG &DAG) {
19653 MVT VT = Op->getSimpleValueType(0);
19654 SDValue In = Op->getOperand(0);
19655 MVT InVT = In.getSimpleValueType();
19656 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
19658 unsigned NumElts = VT.getVectorNumElements();
19660 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
19661 // avoids a constant pool load.
19662 if (VT.getVectorElementType() != MVT::i8) {
19663 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
19664 return DAG.getNode(ISD::SRL, DL, VT, Extend,
19665 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
19668 // Extend VT if BWI is not supported.
19670 if (!Subtarget.hasBWI()) {
19671 // If v16i32 is to be avoided, we'll need to split and concatenate.
19672 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
19673 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
19675 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
19678 // Widen to 512-bits if VLX is not supported.
19679 MVT WideVT = ExtVT;
19680 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
19681 NumElts *= 512 / ExtVT.getSizeInBits();
19682 InVT = MVT::getVectorVT(MVT::i1, NumElts);
19683 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
19684 In, DAG.getIntPtrConstant(0, DL));
19685 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
19689 SDValue One = DAG.getConstant(1, DL, WideVT);
19690 SDValue Zero = DAG.getConstant(0, DL, WideVT);
19692 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
19694 // Truncate if we had to extend above.
19696 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
19697 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
19700 // Extract back to 128/256-bit if we widened.
19702 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
19703 DAG.getIntPtrConstant(0, DL));
19705 return SelectedVal;
19708 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
19709 SelectionDAG &DAG) {
19710 SDValue In = Op.getOperand(0);
19711 MVT SVT = In.getSimpleValueType();
19713 if (SVT.getVectorElementType() == MVT::i1)
19714 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
19716 assert(Subtarget.hasAVX() && "Expected AVX support");
19717 return LowerAVXExtend(Op, DAG, Subtarget);
19720 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
19721 /// It makes use of the fact that vectors with enough leading sign/zero bits
19722 /// prevent the PACKSS/PACKUS from saturating the results.
19723 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
19724 /// within each 128-bit lane.
19725 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
19726 const SDLoc &DL, SelectionDAG &DAG,
19727 const X86Subtarget &Subtarget) {
19728 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
19729 "Unexpected PACK opcode");
19730 assert(DstVT.isVector() && "VT not a vector?");
19732 // Requires SSE2 but AVX512 has fast vector truncate.
19733 if (!Subtarget.hasSSE2())
19736 EVT SrcVT = In.getValueType();
19738 // No truncation required, we might get here due to recursive calls.
19739 if (SrcVT == DstVT)
19742 // We only support vector truncation to 64bits or greater from a
19743 // 128bits or greater source.
19744 unsigned DstSizeInBits = DstVT.getSizeInBits();
19745 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
19746 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
19749 unsigned NumElems = SrcVT.getVectorNumElements();
19750 if (!isPowerOf2_32(NumElems))
19753 LLVMContext &Ctx = *DAG.getContext();
19754 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
19755 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
19757 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
19759 // Pack to the largest type possible:
19760 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
19761 EVT InVT = MVT::i16, OutVT = MVT::i8;
19762 if (SrcVT.getScalarSizeInBits() > 16 &&
19763 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
19768 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
19769 if (SrcVT.is128BitVector()) {
19770 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
19771 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
19772 In = DAG.getBitcast(InVT, In);
19773 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
19774 Res = extractSubVector(Res, 0, DAG, DL, 64);
19775 return DAG.getBitcast(DstVT, Res);
19778 // Extract lower/upper subvectors.
19779 unsigned NumSubElts = NumElems / 2;
19780 SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19781 SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19783 unsigned SubSizeInBits = SrcSizeInBits / 2;
19784 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
19785 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
19787 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
19788 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
19789 Lo = DAG.getBitcast(InVT, Lo);
19790 Hi = DAG.getBitcast(InVT, Hi);
19791 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19792 return DAG.getBitcast(DstVT, Res);
19795 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
19796 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
19797 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
19798 Lo = DAG.getBitcast(InVT, Lo);
19799 Hi = DAG.getBitcast(InVT, Hi);
19800 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19802 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
19803 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
19804 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
19805 SmallVector<int, 64> Mask;
19806 int Scale = 64 / OutVT.getScalarSizeInBits();
19807 scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
19808 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
19810 if (DstVT.is256BitVector())
19811 return DAG.getBitcast(DstVT, Res);
19813 // If 512bit -> 128bit truncate another stage.
19814 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19815 Res = DAG.getBitcast(PackedVT, Res);
19816 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19819 // Recursively pack lower/upper subvectors, concat result and pack again.
19820 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
19821 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
19822 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
19823 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
19825 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19826 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
19827 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19830 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
19831 const X86Subtarget &Subtarget) {
19834 MVT VT = Op.getSimpleValueType();
19835 SDValue In = Op.getOperand(0);
19836 MVT InVT = In.getSimpleValueType();
19838 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
19840 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
19841 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
19842 if (InVT.getScalarSizeInBits() <= 16) {
19843 if (Subtarget.hasBWI()) {
19844 // legal, will go to VPMOVB2M, VPMOVW2M
19845 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19846 // We need to shift to get the lsb into sign position.
19847 // Shift packed bytes not supported natively, bitcast to word
19848 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
19849 In = DAG.getNode(ISD::SHL, DL, ExtVT,
19850 DAG.getBitcast(ExtVT, In),
19851 DAG.getConstant(ShiftInx, DL, ExtVT));
19852 In = DAG.getBitcast(InVT, In);
19854 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
19857 // Use TESTD/Q, extended vector to packed dword/qword.
19858 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
19859 "Unexpected vector type.");
19860 unsigned NumElts = InVT.getVectorNumElements();
19861 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
19862 // We need to change to a wider element type that we have support for.
19863 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
19864 // For 16 element vectors we extend to v16i32 unless we are explicitly
19865 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
19866 // we need to split into two 8 element vectors which we can extend to v8i32,
19867 // truncate and concat the results. There's an additional complication if
19868 // the original type is v16i8. In that case we can't split the v16i8 so
19869 // first we pre-extend it to v16i16 which we can split to v8i16, then extend
19870 // to v8i32, truncate that to v8i1 and concat the two halves.
19871 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
19872 if (InVT == MVT::v16i8) {
19873 // First we need to sign extend up to 256-bits so we can split that.
19874 InVT = MVT::v16i16;
19875 In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
19877 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
19878 SDValue Hi = extract128BitVector(In, 8, DAG, DL);
19879 // We're split now, just emit two truncates and a concat. The two
19880 // truncates will trigger legalization to come back to this function.
19881 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
19882 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
19883 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19885 // We either have 8 elements or we're allowed to use 512-bit vectors.
19886 // If we have VLX, we want to use the narrowest vector that can get the
19887 // job done so we use vXi32.
19888 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
19889 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
19890 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
19892 ShiftInx = InVT.getScalarSizeInBits() - 1;
19895 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19896 // We need to shift to get the lsb into sign position.
19897 In = DAG.getNode(ISD::SHL, DL, InVT, In,
19898 DAG.getConstant(ShiftInx, DL, InVT));
19900 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
19901 if (Subtarget.hasDQI())
19902 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
19903 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
19906 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
19908 MVT VT = Op.getSimpleValueType();
19909 SDValue In = Op.getOperand(0);
19910 MVT InVT = In.getSimpleValueType();
19911 unsigned InNumEltBits = InVT.getScalarSizeInBits();
19913 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19914 "Invalid TRUNCATE operation");
19916 // If we're called by the type legalizer, handle a few cases.
19917 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19918 if (!TLI.isTypeLegal(InVT)) {
19919 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
19920 VT.is128BitVector()) {
19921 assert(Subtarget.hasVLX() && "Unexpected subtarget!");
19922 // The default behavior is to truncate one step, concatenate, and then
19923 // truncate the remainder. We'd rather produce two 64-bit results and
19924 // concatenate those.
19926 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
19929 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
19931 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
19932 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
19933 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19936 // Otherwise let default legalization handle it.
19940 if (VT.getVectorElementType() == MVT::i1)
19941 return LowerTruncateVecI1(Op, DAG, Subtarget);
19943 // vpmovqb/w/d, vpmovdb/w, vpmovwb
19944 if (Subtarget.hasAVX512()) {
19945 // word to byte only under BWI. Otherwise we have to promoted to v16i32
19946 // and then truncate that. But we should only do that if we haven't been
19947 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
19948 // handled by isel patterns.
19949 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
19950 Subtarget.canExtendTo512DQ())
19954 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
19955 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
19957 // Truncate with PACKUS if we are truncating a vector with leading zero bits
19958 // that extend all the way to the packed/truncated value.
19959 // Pre-SSE41 we can only use PACKUSWB.
19960 KnownBits Known = DAG.computeKnownBits(In);
19961 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
19963 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
19966 // Truncate with PACKSS if we are truncating a vector with sign-bits that
19967 // extend all the way to the packed/truncated value.
19968 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
19970 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
19973 // Handle truncation of V256 to V128 using shuffles.
19974 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
19976 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
19977 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
19978 if (Subtarget.hasInt256()) {
19979 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
19980 In = DAG.getBitcast(MVT::v8i32, In);
19981 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
19982 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
19983 DAG.getIntPtrConstant(0, DL));
19986 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19987 DAG.getIntPtrConstant(0, DL));
19988 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19989 DAG.getIntPtrConstant(2, DL));
19990 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19991 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19992 static const int ShufMask[] = {0, 2, 4, 6};
19993 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
19996 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
19997 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
19998 if (Subtarget.hasInt256()) {
19999 In = DAG.getBitcast(MVT::v32i8, In);
20001 // The PSHUFB mask:
20002 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
20003 -1, -1, -1, -1, -1, -1, -1, -1,
20004 16, 17, 20, 21, 24, 25, 28, 29,
20005 -1, -1, -1, -1, -1, -1, -1, -1 };
20006 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20007 In = DAG.getBitcast(MVT::v4i64, In);
20009 static const int ShufMask2[] = {0, 2, -1, -1};
20010 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
20011 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20012 DAG.getIntPtrConstant(0, DL));
20013 return DAG.getBitcast(VT, In);
20016 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20017 DAG.getIntPtrConstant(0, DL));
20019 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20020 DAG.getIntPtrConstant(4, DL));
20022 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
20023 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
20025 // The PSHUFB mask:
20026 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
20027 -1, -1, -1, -1, -1, -1, -1, -1};
20029 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
20030 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
20032 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20033 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20035 // The MOVLHPS Mask:
20036 static const int ShufMask2[] = {0, 1, 4, 5};
20037 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
20038 return DAG.getBitcast(MVT::v8i16, res);
20041 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
20042 // Use an AND to zero uppper bits for PACKUS.
20043 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
20045 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20046 DAG.getIntPtrConstant(0, DL));
20047 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20048 DAG.getIntPtrConstant(8, DL));
20049 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
20052 llvm_unreachable("All 256->128 cases should have been handled above!");
20055 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20056 bool IsStrict = Op->isStrictFPOpcode();
20057 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20058 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20059 MVT VT = Op->getSimpleValueType(0);
20060 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20061 MVT SrcVT = Src.getSimpleValueType();
20064 if (VT.isVector()) {
20065 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20066 MVT ResVT = MVT::v4i32;
20067 MVT TruncVT = MVT::v4i1;
20070 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20072 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20074 if (!IsSigned && !Subtarget.hasVLX()) {
20075 assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20076 // Widen to 512-bits.
20077 ResVT = MVT::v8i32;
20078 TruncVT = MVT::v8i1;
20079 Opc = Op.getOpcode();
20080 // Need to concat with zero vector for strict fp to avoid spurious
20082 // TODO: Should we just do this for non-strict as well?
20083 SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20084 : DAG.getUNDEF(MVT::v8f64);
20085 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20086 DAG.getIntPtrConstant(0, dl));
20088 SDValue Res, Chain;
20091 DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Op->getOperand(0), Src});
20092 Chain = Res.getValue(1);
20094 Res = DAG.getNode(Opc, dl, ResVT, Src);
20097 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20098 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20099 DAG.getIntPtrConstant(0, dl));
20101 return DAG.getMergeValues({Res, Chain}, dl);
20105 // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20106 if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20107 assert(!IsSigned && "Expected unsigned conversion!");
20108 assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20112 // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20113 if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20114 (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32)) {
20115 assert(!IsSigned && "Expected unsigned conversion!");
20116 assert(Subtarget.useAVX512Regs() && !Subtarget.hasVLX() &&
20117 "Unexpected features!");
20118 MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20119 MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20120 // Need to concat with zero vector for strict fp to avoid spurious
20122 // TODO: Should we just do this for non-strict as well?
20124 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20125 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20126 DAG.getIntPtrConstant(0, dl));
20128 SDValue Res, Chain;
20130 Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20131 {Op->getOperand(0), Src});
20132 Chain = Res.getValue(1);
20134 Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20137 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20138 DAG.getIntPtrConstant(0, dl));
20141 return DAG.getMergeValues({Res, Chain}, dl);
20145 // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20146 if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20147 (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32)) {
20148 assert(Subtarget.useAVX512Regs() && Subtarget.hasDQI() &&
20149 !Subtarget.hasVLX() && "Unexpected features!");
20150 MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20151 // Need to concat with zero vector for strict fp to avoid spurious
20153 // TODO: Should we just do this for non-strict as well?
20155 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20156 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20157 DAG.getIntPtrConstant(0, dl));
20159 SDValue Res, Chain;
20161 Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20162 {Op->getOperand(0), Src});
20163 Chain = Res.getValue(1);
20165 Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20168 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20169 DAG.getIntPtrConstant(0, dl));
20172 return DAG.getMergeValues({Res, Chain}, dl);
20176 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
20177 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20178 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20179 DAG.getUNDEF(MVT::v2f32));
20181 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20182 : X86ISD::STRICT_CVTTP2UI;
20183 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20185 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20186 return DAG.getNode(Opc, dl, VT, Tmp);
20192 assert(!VT.isVector());
20194 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20196 if (!IsSigned && UseSSEReg) {
20197 // Conversions from f32/f64 with AVX512 should be legal.
20198 if (Subtarget.hasAVX512())
20201 // Use default expansion for i64.
20202 if (VT == MVT::i64)
20205 assert(VT == MVT::i32 && "Unexpected VT!");
20207 // Promote i32 to i64 and use a signed operation on 64-bit targets.
20208 // FIXME: This does not generate an invalid exception if the input does not
20209 // fit in i32. PR44019
20210 if (Subtarget.is64Bit()) {
20211 SDValue Res, Chain;
20213 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other},
20214 { Op.getOperand(0), Src });
20215 Chain = Res.getValue(1);
20217 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20219 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20221 return DAG.getMergeValues({ Res, Chain }, dl);
20225 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20226 // use fisttp which will be handled later.
20227 if (!Subtarget.hasSSE3())
20231 // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20232 // FIXME: This does not generate an invalid exception if the input does not
20233 // fit in i16. PR44019
20234 if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20235 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20236 SDValue Res, Chain;
20238 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other},
20239 { Op.getOperand(0), Src });
20240 Chain = Res.getValue(1);
20242 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20244 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20246 return DAG.getMergeValues({ Res, Chain }, dl);
20250 // If this is a FP_TO_SINT using SSEReg we're done.
20251 if (UseSSEReg && IsSigned)
20254 // fp128 needs to use a libcall.
20255 if (SrcVT == MVT::f128) {
20258 LC = RTLIB::getFPTOSINT(SrcVT, VT);
20260 LC = RTLIB::getFPTOUINT(SrcVT, VT);
20262 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20263 MakeLibCallOptions CallOptions;
20264 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
20268 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20273 // Fall back to X87.
20275 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
20277 return DAG.getMergeValues({V, Chain}, dl);
20281 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
20284 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
20285 bool IsStrict = Op->isStrictFPOpcode();
20288 MVT VT = Op.getSimpleValueType();
20289 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20290 MVT SVT = In.getSimpleValueType();
20292 if (VT == MVT::f128) {
20293 RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
20294 return LowerF128Call(Op, DAG, LC);
20297 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
20300 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
20302 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
20303 {Op->getOperand(0), Res});
20304 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
20307 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
20308 bool IsStrict = Op->isStrictFPOpcode();
20310 MVT VT = Op.getSimpleValueType();
20311 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20312 MVT SVT = In.getSimpleValueType();
20314 // It's legal except when f128 is involved
20315 if (SVT != MVT::f128)
20318 RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
20320 // FP_ROUND node has a second operand indicating whether it is known to be
20321 // precise. That doesn't take part in the LibCall so we can't directly use
20325 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20326 MakeLibCallOptions CallOptions;
20327 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, In, CallOptions,
20331 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20336 /// Depending on uarch and/or optimizing for size, we might prefer to use a
20337 /// vector operation in place of the typical scalar operation.
20338 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
20339 const X86Subtarget &Subtarget) {
20340 // If both operands have other uses, this is probably not profitable.
20341 SDValue LHS = Op.getOperand(0);
20342 SDValue RHS = Op.getOperand(1);
20343 if (!LHS.hasOneUse() && !RHS.hasOneUse())
20346 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
20347 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
20348 if (IsFP && !Subtarget.hasSSE3())
20350 if (!IsFP && !Subtarget.hasSSSE3())
20353 // Extract from a common vector.
20354 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20355 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20356 LHS.getOperand(0) != RHS.getOperand(0) ||
20357 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
20358 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
20359 !shouldUseHorizontalOp(true, DAG, Subtarget))
20362 // Allow commuted 'hadd' ops.
20363 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
20365 switch (Op.getOpcode()) {
20366 case ISD::ADD: HOpcode = X86ISD::HADD; break;
20367 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
20368 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
20369 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
20371 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
20373 unsigned LExtIndex = LHS.getConstantOperandVal(1);
20374 unsigned RExtIndex = RHS.getConstantOperandVal(1);
20375 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
20376 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
20377 std::swap(LExtIndex, RExtIndex);
20379 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
20382 SDValue X = LHS.getOperand(0);
20383 EVT VecVT = X.getValueType();
20384 unsigned BitWidth = VecVT.getSizeInBits();
20385 unsigned NumLanes = BitWidth / 128;
20386 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
20387 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
20388 "Not expecting illegal vector widths here");
20390 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
20391 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
20393 if (BitWidth == 256 || BitWidth == 512) {
20394 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
20395 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
20396 LExtIndex %= NumEltsPerLane;
20399 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
20400 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
20401 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
20402 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
20403 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
20404 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
20405 DAG.getIntPtrConstant(LExtIndex / 2, DL));
20408 /// Depending on uarch and/or optimizing for size, we might prefer to use a
20409 /// vector operation in place of the typical scalar operation.
20410 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
20411 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
20412 "Only expecting float/double");
20413 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
20416 /// The only differences between FABS and FNEG are the mask and the logic op.
20417 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
20418 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
20419 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
20420 "Wrong opcode for lowering FABS or FNEG.");
20422 bool IsFABS = (Op.getOpcode() == ISD::FABS);
20424 // If this is a FABS and it has an FNEG user, bail out to fold the combination
20425 // into an FNABS. We'll lower the FABS after that if it is still in use.
20427 for (SDNode *User : Op->uses())
20428 if (User->getOpcode() == ISD::FNEG)
20432 MVT VT = Op.getSimpleValueType();
20434 bool IsF128 = (VT == MVT::f128);
20435 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
20436 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
20437 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
20438 "Unexpected type in LowerFABSorFNEG");
20440 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
20441 // decide if we should generate a 16-byte constant mask when we only need 4 or
20442 // 8 bytes for the scalar case.
20444 // There are no scalar bitwise logical SSE/AVX instructions, so we
20445 // generate a 16-byte vector constant and logic op even for the scalar case.
20446 // Using a 16-byte mask allows folding the load of the mask with
20447 // the logic op, so it can save (~4 bytes) on code size.
20448 bool IsFakeVector = !VT.isVector() && !IsF128;
20451 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
20453 unsigned EltBits = VT.getScalarSizeInBits();
20454 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
20455 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
20456 APInt::getSignMask(EltBits);
20457 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
20458 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
20460 SDValue Op0 = Op.getOperand(0);
20461 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
20462 unsigned LogicOp = IsFABS ? X86ISD::FAND :
20463 IsFNABS ? X86ISD::FOR :
20465 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
20467 if (VT.isVector() || IsF128)
20468 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
20470 // For the scalar case extend to a 128-bit vector, perform the logic op,
20471 // and extract the scalar result back out.
20472 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
20473 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
20474 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
20475 DAG.getIntPtrConstant(0, dl));
20478 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
20479 SDValue Mag = Op.getOperand(0);
20480 SDValue Sign = Op.getOperand(1);
20483 // If the sign operand is smaller, extend it first.
20484 MVT VT = Op.getSimpleValueType();
20485 if (Sign.getSimpleValueType().bitsLT(VT))
20486 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
20488 // And if it is bigger, shrink it first.
20489 if (Sign.getSimpleValueType().bitsGT(VT))
20490 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
20492 // At this point the operands and the result should have the same
20493 // type, and that won't be f80 since that is not custom lowered.
20494 bool IsF128 = (VT == MVT::f128);
20495 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
20496 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
20497 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
20498 "Unexpected type in LowerFCOPYSIGN");
20500 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
20502 // Perform all scalar logic operations as 16-byte vectors because there are no
20503 // scalar FP logic instructions in SSE.
20504 // TODO: This isn't necessary. If we used scalar types, we might avoid some
20505 // unnecessary splats, but we might miss load folding opportunities. Should
20506 // this decision be based on OptimizeForSize?
20507 bool IsFakeVector = !VT.isVector() && !IsF128;
20510 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
20512 // The mask constants are automatically splatted for vector types.
20513 unsigned EltSizeInBits = VT.getScalarSizeInBits();
20514 SDValue SignMask = DAG.getConstantFP(
20515 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
20516 SDValue MagMask = DAG.getConstantFP(
20517 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
20519 // First, clear all bits but the sign bit from the second operand (sign).
20521 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
20522 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
20524 // Next, clear the sign bit from the first operand (magnitude).
20525 // TODO: If we had general constant folding for FP logic ops, this check
20526 // wouldn't be necessary.
20528 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
20529 APFloat APF = Op0CN->getValueAPF();
20531 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
20533 // If the magnitude operand wasn't a constant, we need to AND out the sign.
20535 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
20536 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
20539 // OR the magnitude value with the sign bit.
20540 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
20541 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
20542 DAG.getIntPtrConstant(0, dl));
20545 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
20546 SDValue N0 = Op.getOperand(0);
20548 MVT VT = Op.getSimpleValueType();
20550 MVT OpVT = N0.getSimpleValueType();
20551 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
20552 "Unexpected type for FGETSIGN");
20554 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
20555 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
20556 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
20557 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
20558 Res = DAG.getZExtOrTrunc(Res, dl, VT);
20559 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
20563 /// Helper for creating a X86ISD::SETCC node.
20564 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
20565 SelectionDAG &DAG) {
20566 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20567 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
20570 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
20571 /// style scalarized (associative) reduction patterns.
20572 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
20573 SmallVectorImpl<SDValue> &SrcOps) {
20574 SmallVector<SDValue, 8> Opnds;
20575 DenseMap<SDValue, APInt> SrcOpMap;
20576 EVT VT = MVT::Other;
20578 // Recognize a special case where a vector is casted into wide integer to
20580 assert(Op.getOpcode() == unsigned(BinOp) &&
20581 "Unexpected bit reduction opcode");
20582 Opnds.push_back(Op.getOperand(0));
20583 Opnds.push_back(Op.getOperand(1));
20585 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
20586 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
20587 // BFS traverse all BinOp operands.
20588 if (I->getOpcode() == unsigned(BinOp)) {
20589 Opnds.push_back(I->getOperand(0));
20590 Opnds.push_back(I->getOperand(1));
20591 // Re-evaluate the number of nodes to be traversed.
20592 e += 2; // 2 more nodes (LHS and RHS) are pushed.
20596 // Quit if a non-EXTRACT_VECTOR_ELT
20597 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
20600 // Quit if without a constant index.
20601 SDValue Idx = I->getOperand(1);
20602 if (!isa<ConstantSDNode>(Idx))
20605 SDValue Src = I->getOperand(0);
20606 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
20607 if (M == SrcOpMap.end()) {
20608 VT = Src.getValueType();
20609 // Quit if not the same type.
20610 if (SrcOpMap.begin() != SrcOpMap.end() &&
20611 VT != SrcOpMap.begin()->first.getValueType())
20613 unsigned NumElts = VT.getVectorNumElements();
20614 APInt EltCount = APInt::getNullValue(NumElts);
20615 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
20616 SrcOps.push_back(Src);
20618 // Quit if element already used.
20619 unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
20620 if (M->second[CIdx])
20622 M->second.setBit(CIdx);
20625 // Quit if not all elements are used.
20626 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
20627 E = SrcOpMap.end();
20629 if (!I->second.isAllOnesValue())
20636 // Check whether an OR'd tree is PTEST-able.
20637 static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
20638 const X86Subtarget &Subtarget,
20639 SelectionDAG &DAG, SDValue &X86CC) {
20640 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
20642 if (!Subtarget.hasSSE41() || !Op->hasOneUse())
20645 SmallVector<SDValue, 8> VecIns;
20646 if (!matchScalarReduction(Op, ISD::OR, VecIns))
20649 // Quit if not 128/256-bit vector.
20650 EVT VT = VecIns[0].getValueType();
20651 if (!VT.is128BitVector() && !VT.is256BitVector())
20655 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
20657 // Cast all vectors into TestVT for PTEST.
20658 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
20659 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
20661 // If more than one full vector is evaluated, OR them first before PTEST.
20662 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
20663 // Each iteration will OR 2 nodes and append the result until there is only
20664 // 1 node left, i.e. the final OR'd value of all vectors.
20665 SDValue LHS = VecIns[Slot];
20666 SDValue RHS = VecIns[Slot + 1];
20667 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
20670 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
20672 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
20675 /// return true if \c Op has a use that doesn't just read flags.
20676 static bool hasNonFlagsUse(SDValue Op) {
20677 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
20679 SDNode *User = *UI;
20680 unsigned UOpNo = UI.getOperandNo();
20681 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
20682 // Look pass truncate.
20683 UOpNo = User->use_begin().getOperandNo();
20684 User = *User->use_begin();
20687 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
20688 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
20694 // Transform to an x86-specific ALU node with flags if there is a chance of
20695 // using an RMW op or only the flags are used. Otherwise, leave
20696 // the node alone and emit a 'cmp' or 'test' instruction.
20697 static bool isProfitableToUseFlagOp(SDValue Op) {
20698 for (SDNode *U : Op->uses())
20699 if (U->getOpcode() != ISD::CopyToReg &&
20700 U->getOpcode() != ISD::SETCC &&
20701 U->getOpcode() != ISD::STORE)
20707 /// Emit nodes that will be selected as "test Op0,Op0", or something
20709 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
20710 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
20711 // CF and OF aren't always set the way we want. Determine which
20712 // of these we need.
20713 bool NeedCF = false;
20714 bool NeedOF = false;
20717 case X86::COND_A: case X86::COND_AE:
20718 case X86::COND_B: case X86::COND_BE:
20721 case X86::COND_G: case X86::COND_GE:
20722 case X86::COND_L: case X86::COND_LE:
20723 case X86::COND_O: case X86::COND_NO: {
20724 // Check if we really need to set the
20725 // Overflow flag. If NoSignedWrap is present
20726 // that is not actually needed.
20727 switch (Op->getOpcode()) {
20732 if (Op.getNode()->getFlags().hasNoSignedWrap())
20742 // See if we can use the EFLAGS value from the operand instead of
20743 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
20744 // we prove that the arithmetic won't overflow, we can't use OF or CF.
20745 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
20746 // Emit a CMP with 0, which is the TEST pattern.
20747 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20748 DAG.getConstant(0, dl, Op.getValueType()));
20750 unsigned Opcode = 0;
20751 unsigned NumOperands = 0;
20753 SDValue ArithOp = Op;
20755 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
20756 // which may be the result of a CAST. We use the variable 'Op', which is the
20757 // non-casted variable when we check for possible users.
20758 switch (ArithOp.getOpcode()) {
20760 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
20761 // because a TEST instruction will be better.
20762 if (!hasNonFlagsUse(Op))
20770 if (!isProfitableToUseFlagOp(Op))
20773 // Otherwise use a regular EFLAGS-setting instruction.
20774 switch (ArithOp.getOpcode()) {
20775 default: llvm_unreachable("unexpected operator!");
20776 case ISD::ADD: Opcode = X86ISD::ADD; break;
20777 case ISD::SUB: Opcode = X86ISD::SUB; break;
20778 case ISD::XOR: Opcode = X86ISD::XOR; break;
20779 case ISD::AND: Opcode = X86ISD::AND; break;
20780 case ISD::OR: Opcode = X86ISD::OR; break;
20790 return SDValue(Op.getNode(), 1);
20793 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
20794 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20795 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
20796 Op->getOperand(1)).getValue(1);
20803 // Emit a CMP with 0, which is the TEST pattern.
20804 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20805 DAG.getConstant(0, dl, Op.getValueType()));
20807 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20808 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
20810 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
20811 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
20812 return SDValue(New.getNode(), 1);
20815 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
20817 static std::pair<SDValue, SDValue> EmitCmp(SDValue Op0, SDValue Op1,
20818 unsigned X86CC, const SDLoc &dl,
20820 const X86Subtarget &Subtarget,
20821 SDValue Chain, bool IsSignaling) {
20822 if (isNullConstant(Op1))
20823 return std::make_pair(EmitTest(Op0, X86CC, dl, DAG, Subtarget), Chain);
20825 EVT CmpVT = Op0.getValueType();
20827 if (CmpVT.isFloatingPoint()) {
20830 DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
20831 dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
20832 return std::make_pair(Res, Res.getValue(1));
20834 return std::make_pair(DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1),
20838 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
20839 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
20841 // Only promote the compare up to I32 if it is a 16 bit operation
20842 // with an immediate. 16 bit immediates are to be avoided.
20843 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
20844 !DAG.getMachineFunction().getFunction().hasMinSize()) {
20845 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
20846 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
20847 // Don't do this if the immediate can fit in 8-bits.
20848 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
20849 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
20850 unsigned ExtendOp =
20851 isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
20852 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
20853 // For equality comparisons try to use SIGN_EXTEND if the input was
20854 // truncate from something with enough sign bits.
20855 if (Op0.getOpcode() == ISD::TRUNCATE) {
20856 SDValue In = Op0.getOperand(0);
20858 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20860 ExtendOp = ISD::SIGN_EXTEND;
20861 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
20862 SDValue In = Op1.getOperand(0);
20864 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20866 ExtendOp = ISD::SIGN_EXTEND;
20871 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
20872 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
20876 // Try to shrink i64 compares if the input has enough zero bits.
20877 // FIXME: Do this for non-constant compares for constant on LHS?
20878 if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
20879 Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
20880 cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
20881 DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
20883 Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
20884 Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
20887 // Use SUB instead of CMP to enable CSE between SUB and CMP.
20888 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
20889 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
20890 return std::make_pair(Sub.getValue(1), SDValue());
20893 /// Convert a comparison if required by the subtarget.
20894 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
20895 SelectionDAG &DAG) const {
20896 // If the subtarget does not support the FUCOMI instruction, floating-point
20897 // comparisons have to be converted.
20898 bool IsCmp = Cmp.getOpcode() == X86ISD::CMP;
20899 bool IsStrictCmp = Cmp.getOpcode() == X86ISD::STRICT_FCMP ||
20900 Cmp.getOpcode() == X86ISD::STRICT_FCMPS;
20902 if (Subtarget.hasCMov() || (!IsCmp && !IsStrictCmp) ||
20903 !Cmp.getOperand(IsStrictCmp ? 1 : 0).getValueType().isFloatingPoint() ||
20904 !Cmp.getOperand(IsStrictCmp ? 2 : 1).getValueType().isFloatingPoint())
20907 // The instruction selector will select an FUCOM instruction instead of
20908 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
20909 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
20910 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86any_fcmp ...)), 8))))
20912 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
20913 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
20914 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
20915 DAG.getConstant(8, dl, MVT::i8));
20916 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
20918 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
20919 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
20920 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
20923 /// Check if replacement of SQRT with RSQRT should be disabled.
20924 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
20925 EVT VT = Op.getValueType();
20927 // We never want to use both SQRT and RSQRT instructions for the same input.
20928 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
20932 return Subtarget.hasFastVectorFSQRT();
20933 return Subtarget.hasFastScalarFSQRT();
20936 /// The minimum architected relative accuracy is 2^-12. We need one
20937 /// Newton-Raphson step to have a good float result (24 bits of precision).
20938 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
20939 SelectionDAG &DAG, int Enabled,
20940 int &RefinementSteps,
20941 bool &UseOneConstNR,
20942 bool Reciprocal) const {
20943 EVT VT = Op.getValueType();
20945 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
20946 // It is likely not profitable to do this for f64 because a double-precision
20947 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
20948 // instructions: convert to single, rsqrtss, convert back to double, refine
20949 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
20950 // along with FMA, this could be a throughput win.
20951 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
20952 // after legalize types.
20953 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20954 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
20955 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
20956 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20957 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20958 if (RefinementSteps == ReciprocalEstimate::Unspecified)
20959 RefinementSteps = 1;
20961 UseOneConstNR = false;
20962 // There is no FSQRT for 512-bits, but there is RSQRT14.
20963 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
20964 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20969 /// The minimum architected relative accuracy is 2^-12. We need one
20970 /// Newton-Raphson step to have a good float result (24 bits of precision).
20971 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
20973 int &RefinementSteps) const {
20974 EVT VT = Op.getValueType();
20976 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
20977 // It is likely not profitable to do this for f64 because a double-precision
20978 // reciprocal estimate with refinement on x86 prior to FMA requires
20979 // 15 instructions: convert to single, rcpss, convert back to double, refine
20980 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
20981 // along with FMA, this could be a throughput win.
20983 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20984 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
20985 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20986 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20987 // Enable estimate codegen with 1 refinement step for vector division.
20988 // Scalar division estimates are disabled because they break too much
20989 // real-world code. These defaults are intended to match GCC behavior.
20990 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
20993 if (RefinementSteps == ReciprocalEstimate::Unspecified)
20994 RefinementSteps = 1;
20996 // There is no FSQRT for 512-bits, but there is RCP14.
20997 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
20998 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
21003 /// If we have at least two divisions that use the same divisor, convert to
21004 /// multiplication by a reciprocal. This may need to be adjusted for a given
21005 /// CPU if a division's cost is not at least twice the cost of a multiplication.
21006 /// This is because we still need one division to calculate the reciprocal and
21007 /// then we need two multiplies by that reciprocal as replacements for the
21008 /// original divisions.
21009 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
21014 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
21016 SmallVectorImpl<SDNode *> &Created) const {
21017 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
21018 if (isIntDivCheap(N->getValueType(0), Attr))
21019 return SDValue(N,0); // Lower SDIV as SDIV
21021 assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
21022 "Unexpected divisor!");
21024 // Only perform this transform if CMOV is supported otherwise the select
21025 // below will become a branch.
21026 if (!Subtarget.hasCMov())
21029 // fold (sdiv X, pow2)
21030 EVT VT = N->getValueType(0);
21031 // FIXME: Support i8.
21032 if (VT != MVT::i16 && VT != MVT::i32 &&
21033 !(Subtarget.is64Bit() && VT == MVT::i64))
21036 unsigned Lg2 = Divisor.countTrailingZeros();
21038 // If the divisor is 2 or -2, the default expansion is better.
21043 SDValue N0 = N->getOperand(0);
21044 SDValue Zero = DAG.getConstant(0, DL, VT);
21045 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
21046 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
21048 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
21049 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
21050 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
21051 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
21053 Created.push_back(Cmp.getNode());
21054 Created.push_back(Add.getNode());
21055 Created.push_back(CMov.getNode());
21059 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
21061 // If we're dividing by a positive value, we're done. Otherwise, we must
21062 // negate the result.
21063 if (Divisor.isNonNegative())
21066 Created.push_back(SRA.getNode());
21067 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
21070 /// Result of 'and' is compared against zero. Change to a BT node if possible.
21071 /// Returns the BT node and the condition code needed to use it.
21072 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
21073 const SDLoc &dl, SelectionDAG &DAG,
21075 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
21076 SDValue Op0 = And.getOperand(0);
21077 SDValue Op1 = And.getOperand(1);
21078 if (Op0.getOpcode() == ISD::TRUNCATE)
21079 Op0 = Op0.getOperand(0);
21080 if (Op1.getOpcode() == ISD::TRUNCATE)
21081 Op1 = Op1.getOperand(0);
21083 SDValue Src, BitNo;
21084 if (Op1.getOpcode() == ISD::SHL)
21085 std::swap(Op0, Op1);
21086 if (Op0.getOpcode() == ISD::SHL) {
21087 if (isOneConstant(Op0.getOperand(0))) {
21088 // If we looked past a truncate, check that it's only truncating away
21090 unsigned BitWidth = Op0.getValueSizeInBits();
21091 unsigned AndBitWidth = And.getValueSizeInBits();
21092 if (BitWidth > AndBitWidth) {
21093 KnownBits Known = DAG.computeKnownBits(Op0);
21094 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
21098 BitNo = Op0.getOperand(1);
21100 } else if (Op1.getOpcode() == ISD::Constant) {
21101 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
21102 uint64_t AndRHSVal = AndRHS->getZExtValue();
21103 SDValue AndLHS = Op0;
21105 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
21106 Src = AndLHS.getOperand(0);
21107 BitNo = AndLHS.getOperand(1);
21109 // Use BT if the immediate can't be encoded in a TEST instruction or we
21110 // are optimizing for size and the immedaite won't fit in a byte.
21111 bool OptForSize = DAG.shouldOptForSize();
21112 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
21113 isPowerOf2_64(AndRHSVal)) {
21115 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
21116 Src.getValueType());
21121 // No patterns found, give up.
21122 if (!Src.getNode())
21125 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
21126 // instruction. Since the shift amount is in-range-or-undefined, we know
21127 // that doing a bittest on the i32 value is ok. We extend to i32 because
21128 // the encoding for the i16 version is larger than the i32 version.
21129 // Also promote i16 to i32 for performance / code size reason.
21130 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
21131 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
21133 // See if we can use the 32-bit instruction instead of the 64-bit one for a
21134 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
21135 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
21136 // known to be zero.
21137 if (Src.getValueType() == MVT::i64 &&
21138 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
21139 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
21141 // If the operand types disagree, extend the shift amount to match. Since
21142 // BT ignores high bits (like shifts) we can use anyextend.
21143 if (Src.getValueType() != BitNo.getValueType())
21144 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
21146 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
21148 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
21151 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
21153 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
21154 SDValue &Op1, bool &IsAlwaysSignaling) {
21158 // SSE Condition code mapping:
21167 switch (SetCCOpcode) {
21168 default: llvm_unreachable("Unexpected SETCC condition");
21170 case ISD::SETEQ: SSECC = 0; break;
21172 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
21174 case ISD::SETOLT: SSECC = 1; break;
21176 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
21178 case ISD::SETOLE: SSECC = 2; break;
21179 case ISD::SETUO: SSECC = 3; break;
21181 case ISD::SETNE: SSECC = 4; break;
21182 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
21183 case ISD::SETUGE: SSECC = 5; break;
21184 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
21185 case ISD::SETUGT: SSECC = 6; break;
21186 case ISD::SETO: SSECC = 7; break;
21187 case ISD::SETUEQ: SSECC = 8; break;
21188 case ISD::SETONE: SSECC = 12; break;
21191 std::swap(Op0, Op1);
21193 switch (SetCCOpcode) {
21195 IsAlwaysSignaling = true;
21205 IsAlwaysSignaling = false;
21212 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
21213 /// concatenate the result back.
21214 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
21215 MVT VT = Op.getSimpleValueType();
21217 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
21218 "Unsupported value type for operation");
21220 unsigned NumElems = VT.getVectorNumElements();
21222 SDValue CC = Op.getOperand(2);
21224 // Extract the LHS vectors
21225 SDValue LHS = Op.getOperand(0);
21226 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
21227 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
21229 // Extract the RHS vectors
21230 SDValue RHS = Op.getOperand(1);
21231 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
21232 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
21234 // Issue the operation on the smaller types and concatenate the result back
21235 MVT EltVT = VT.getVectorElementType();
21236 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
21237 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
21238 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
21239 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
21242 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
21244 SDValue Op0 = Op.getOperand(0);
21245 SDValue Op1 = Op.getOperand(1);
21246 SDValue CC = Op.getOperand(2);
21247 MVT VT = Op.getSimpleValueType();
21250 assert(VT.getVectorElementType() == MVT::i1 &&
21251 "Cannot set masked compare for this operation");
21253 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
21255 // Prefer SETGT over SETLT.
21256 if (SetCCOpcode == ISD::SETLT) {
21257 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
21258 std::swap(Op0, Op1);
21261 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
21264 /// Given a buildvector constant, return a new vector constant with each element
21265 /// incremented or decremented. If incrementing or decrementing would result in
21266 /// unsigned overflow or underflow or this is not a simple vector constant,
21267 /// return an empty value.
21268 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
21269 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
21273 MVT VT = V.getSimpleValueType();
21274 MVT EltVT = VT.getVectorElementType();
21275 unsigned NumElts = VT.getVectorNumElements();
21276 SmallVector<SDValue, 8> NewVecC;
21278 for (unsigned i = 0; i < NumElts; ++i) {
21279 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
21280 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
21283 // Avoid overflow/underflow.
21284 const APInt &EltC = Elt->getAPIntValue();
21285 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
21288 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
21291 return DAG.getBuildVector(VT, DL, NewVecC);
21294 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
21296 /// t = psubus Op0, Op1
21297 /// pcmpeq t, <0..0>
21298 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
21299 ISD::CondCode Cond, const SDLoc &dl,
21300 const X86Subtarget &Subtarget,
21301 SelectionDAG &DAG) {
21302 if (!Subtarget.hasSSE2())
21305 MVT VET = VT.getVectorElementType();
21306 if (VET != MVT::i8 && VET != MVT::i16)
21312 case ISD::SETULT: {
21313 // If the comparison is against a constant we can turn this into a
21314 // setule. With psubus, setule does not require a swap. This is
21315 // beneficial because the constant in the register is no longer
21316 // destructed as the destination so it can be hoisted out of a loop.
21317 // Only do this pre-AVX since vpcmp* is no longer destructive.
21318 if (Subtarget.hasAVX())
21320 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
21326 case ISD::SETUGT: {
21327 // If the comparison is against a constant, we can turn this into a setuge.
21328 // This is beneficial because materializing a constant 0 for the PCMPEQ is
21329 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
21330 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
21331 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
21338 // Psubus is better than flip-sign because it requires no inversion.
21340 std::swap(Op0, Op1);
21346 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
21347 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
21348 DAG.getConstant(0, dl, VT));
21351 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
21352 SelectionDAG &DAG) {
21353 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
21354 Op.getOpcode() == ISD::STRICT_FSETCCS;
21355 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
21356 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
21357 SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
21358 MVT VT = Op->getSimpleValueType(0);
21359 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
21360 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
21365 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
21366 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
21369 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
21370 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21373 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
21374 assert(VT.getVectorNumElements() <= 16);
21375 Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
21377 Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
21378 // The SSE/AVX packed FP comparison nodes are defined with a
21379 // floating-point vector result that matches the operand type. This allows
21380 // them to work with an SSE1 target (integer vector types are not legal).
21381 VT = Op0.getSimpleValueType();
21385 bool IsAlwaysSignaling;
21386 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
21387 if (!Subtarget.hasAVX()) {
21388 // TODO: We could use following steps to handle a quiet compare with
21389 // signaling encodings.
21390 // 1. Get ordered masks from a quiet ISD::SETO
21391 // 2. Use the masks to mask potential unordered elements in operand A, B
21392 // 3. Get the compare results of masked A, B
21393 // 4. Calculating final result using the mask and result from 3
21394 // But currently, we just fall back to scalar operations.
21395 if (IsStrict && IsAlwaysSignaling && !IsSignaling)
21398 // Insert an extra signaling instruction to raise exception.
21399 if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
21400 SDValue SignalCmp = DAG.getNode(
21401 Opc, dl, {VT, MVT::Other},
21402 {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
21403 // FIXME: It seems we need to update the flags of all new strict nodes.
21404 // Otherwise, mayRaiseFPException in MI will return false due to
21405 // NoFPExcept = false by default. However, I didn't find it in other
21407 SignalCmp->setFlags(Op->getFlags());
21408 Chain = SignalCmp.getValue(1);
21411 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
21412 // emit two comparisons and a logic op to tie them together.
21414 // LLVM predicate is SETUEQ or SETONE.
21416 unsigned CombineOpc;
21417 if (Cond == ISD::SETUEQ) {
21420 CombineOpc = X86ISD::FOR;
21422 assert(Cond == ISD::SETONE);
21425 CombineOpc = X86ISD::FAND;
21428 SDValue Cmp0, Cmp1;
21430 Cmp0 = DAG.getNode(
21431 Opc, dl, {VT, MVT::Other},
21432 {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
21433 Cmp1 = DAG.getNode(
21434 Opc, dl, {VT, MVT::Other},
21435 {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
21436 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
21439 Cmp0 = DAG.getNode(
21440 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
21441 Cmp1 = DAG.getNode(
21442 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
21444 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
21448 Opc, dl, {VT, MVT::Other},
21449 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
21450 Chain = Cmp.getValue(1);
21453 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
21456 // Handle all other FP comparisons here.
21458 // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
21459 SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
21461 Opc, dl, {VT, MVT::Other},
21462 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
21463 Chain = Cmp.getValue(1);
21466 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
21469 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
21470 // result type of SETCC. The bitcast is expected to be optimized away
21471 // during combining/isel.
21472 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
21475 return DAG.getMergeValues({Cmp, Chain}, dl);
21480 assert(!IsStrict && "Strict SETCC only handles FP operands.");
21482 MVT VTOp0 = Op0.getSimpleValueType();
21484 assert(VTOp0 == Op1.getSimpleValueType() &&
21485 "Expected operands with same type!");
21486 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
21487 "Invalid number of packed elements for source and destination!");
21489 // The non-AVX512 code below works under the assumption that source and
21490 // destination types are the same.
21491 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
21492 "Value types for source and destination must be the same!");
21494 // The result is boolean, but operands are int/float
21495 if (VT.getVectorElementType() == MVT::i1) {
21496 // In AVX-512 architecture setcc returns mask with i1 elements,
21497 // But there is no compare instruction for i8 and i16 elements in KNL.
21498 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
21499 "Unexpected operand type");
21500 return LowerIntVSETCC_AVX512(Op, DAG);
21503 // Lower using XOP integer comparisons.
21504 if (VT.is128BitVector() && Subtarget.hasXOP()) {
21505 // Translate compare code to XOP PCOM compare mode.
21506 unsigned CmpMode = 0;
21508 default: llvm_unreachable("Unexpected SETCC condition");
21510 case ISD::SETLT: CmpMode = 0x00; break;
21512 case ISD::SETLE: CmpMode = 0x01; break;
21514 case ISD::SETGT: CmpMode = 0x02; break;
21516 case ISD::SETGE: CmpMode = 0x03; break;
21517 case ISD::SETEQ: CmpMode = 0x04; break;
21518 case ISD::SETNE: CmpMode = 0x05; break;
21521 // Are we comparing unsigned or signed integers?
21523 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
21525 return DAG.getNode(Opc, dl, VT, Op0, Op1,
21526 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
21529 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
21530 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
21531 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
21532 SDValue BC0 = peekThroughBitcasts(Op0);
21533 if (BC0.getOpcode() == ISD::AND) {
21535 SmallVector<APInt, 64> EltBits;
21536 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
21537 VT.getScalarSizeInBits(), UndefElts,
21538 EltBits, false, false)) {
21539 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
21541 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
21547 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
21548 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
21549 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
21550 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
21551 if (C1 && C1->getAPIntValue().isPowerOf2()) {
21552 unsigned BitWidth = VT.getScalarSizeInBits();
21553 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
21555 SDValue Result = Op0.getOperand(0);
21556 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
21557 DAG.getConstant(ShiftAmt, dl, VT));
21558 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
21559 DAG.getConstant(BitWidth - 1, dl, VT));
21564 // Break 256-bit integer vector compare into smaller ones.
21565 if (VT.is256BitVector() && !Subtarget.hasInt256())
21566 return Lower256IntVSETCC(Op, DAG);
21568 // If this is a SETNE against the signed minimum value, change it to SETGT.
21569 // If this is a SETNE against the signed maximum value, change it to SETLT.
21570 // which will be swapped to SETGT.
21571 // Otherwise we use PCMPEQ+invert.
21573 if (Cond == ISD::SETNE &&
21574 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
21575 if (ConstValue.isMinSignedValue())
21577 else if (ConstValue.isMaxSignedValue())
21581 // If both operands are known non-negative, then an unsigned compare is the
21582 // same as a signed compare and there's no need to flip signbits.
21583 // TODO: We could check for more general simplifications here since we're
21584 // computing known bits.
21585 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
21586 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
21588 // Special case: Use min/max operations for unsigned compares.
21589 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21590 if (ISD::isUnsignedIntSetCC(Cond) &&
21591 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
21592 TLI.isOperationLegal(ISD::UMIN, VT)) {
21593 // If we have a constant operand, increment/decrement it and change the
21594 // condition to avoid an invert.
21595 if (Cond == ISD::SETUGT) {
21596 // X > C --> X >= (C+1) --> X == umax(X, C+1)
21597 if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
21599 Cond = ISD::SETUGE;
21602 if (Cond == ISD::SETULT) {
21603 // X < C --> X <= (C-1) --> X == umin(X, C-1)
21604 if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
21606 Cond = ISD::SETULE;
21609 bool Invert = false;
21612 default: llvm_unreachable("Unexpected condition code");
21613 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
21614 case ISD::SETULE: Opc = ISD::UMIN; break;
21615 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
21616 case ISD::SETUGE: Opc = ISD::UMAX; break;
21619 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
21620 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
21622 // If the logical-not of the result is required, perform that now.
21624 Result = DAG.getNOT(dl, Result, VT);
21629 // Try to use SUBUS and PCMPEQ.
21630 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
21633 // We are handling one of the integer comparisons here. Since SSE only has
21634 // GT and EQ comparisons for integer, swapping operands and multiple
21635 // operations may be required for some comparisons.
21636 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
21638 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
21639 Cond == ISD::SETGE || Cond == ISD::SETUGE;
21640 bool Invert = Cond == ISD::SETNE ||
21641 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
21644 std::swap(Op0, Op1);
21646 // Check that the operation in question is available (most are plain SSE2,
21647 // but PCMPGTQ and PCMPEQQ have different requirements).
21648 if (VT == MVT::v2i64) {
21649 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
21650 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
21652 // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
21653 // the odd elements over the even elements.
21654 if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
21655 Op0 = DAG.getConstant(0, dl, MVT::v4i32);
21656 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21658 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21659 static const int MaskHi[] = { 1, 1, 3, 3 };
21660 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21662 return DAG.getBitcast(VT, Result);
21665 if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
21666 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21667 Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
21669 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21670 static const int MaskHi[] = { 1, 1, 3, 3 };
21671 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21673 return DAG.getBitcast(VT, Result);
21676 // Since SSE has no unsigned integer comparisons, we need to flip the sign
21677 // bits of the inputs before performing those operations. The lower
21678 // compare is always unsigned.
21681 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
21683 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
21685 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
21686 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
21688 // Cast everything to the right type.
21689 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21690 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21692 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
21693 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21694 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
21696 // Create masks for only the low parts/high parts of the 64 bit integers.
21697 static const int MaskHi[] = { 1, 1, 3, 3 };
21698 static const int MaskLo[] = { 0, 0, 2, 2 };
21699 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
21700 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
21701 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21703 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
21704 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
21707 Result = DAG.getNOT(dl, Result, MVT::v4i32);
21709 return DAG.getBitcast(VT, Result);
21712 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
21713 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
21714 // pcmpeqd + pshufd + pand.
21715 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
21717 // First cast everything to the right type.
21718 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21719 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21722 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
21724 // Make sure the lower and upper halves are both all-ones.
21725 static const int Mask[] = { 1, 0, 3, 2 };
21726 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
21727 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
21730 Result = DAG.getNOT(dl, Result, MVT::v4i32);
21732 return DAG.getBitcast(VT, Result);
21736 // Since SSE has no unsigned integer comparisons, we need to flip the sign
21737 // bits of the inputs before performing those operations.
21739 MVT EltVT = VT.getVectorElementType();
21740 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
21742 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
21743 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
21746 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
21748 // If the logical-not of the result is required, perform that now.
21750 Result = DAG.getNOT(dl, Result, VT);
21755 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
21756 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
21757 const SDLoc &dl, SelectionDAG &DAG,
21758 const X86Subtarget &Subtarget,
21760 // Only support equality comparisons.
21761 if (CC != ISD::SETEQ && CC != ISD::SETNE)
21764 // Must be a bitcast from vXi1.
21765 if (Op0.getOpcode() != ISD::BITCAST)
21768 Op0 = Op0.getOperand(0);
21769 MVT VT = Op0.getSimpleValueType();
21770 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
21771 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
21772 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
21775 X86::CondCode X86Cond;
21776 if (isNullConstant(Op1)) {
21777 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
21778 } else if (isAllOnesConstant(Op1)) {
21779 // C flag is set for all ones.
21780 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
21784 // If the input is an AND, we can combine it's operands into the KTEST.
21785 bool KTestable = false;
21786 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
21788 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
21790 if (!isNullConstant(Op1))
21792 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
21793 SDValue LHS = Op0.getOperand(0);
21794 SDValue RHS = Op0.getOperand(1);
21795 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
21796 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
21799 // If the input is an OR, we can combine it's operands into the KORTEST.
21802 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
21803 LHS = Op0.getOperand(0);
21804 RHS = Op0.getOperand(1);
21807 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
21808 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
21811 /// Emit flags for the given setcc condition and operands. Also returns the
21812 /// corresponding X86 condition code constant in X86CC.
21813 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
21814 ISD::CondCode CC, const SDLoc &dl,
21815 SelectionDAG &DAG, SDValue &X86CC,
21817 bool IsSignaling) const {
21818 // Optimize to BT if possible.
21819 // Lower (X & (1 << N)) == 0 to BT(X, N).
21820 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
21821 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
21822 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
21823 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21824 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
21828 // Try to use PTEST for a tree ORs equality compared with 0.
21829 // TODO: We could do AND tree with all 1s as well by using the C flag.
21830 if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
21831 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21832 if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
21836 // Try to lower using KORTEST or KTEST.
21837 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
21840 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
21842 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
21843 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21844 // If the input is a setcc, then reuse the input setcc or use a new one with
21845 // the inverted condition.
21846 if (Op0.getOpcode() == X86ISD::SETCC) {
21847 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
21849 X86CC = Op0.getOperand(0);
21851 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
21852 CCode = X86::GetOppositeBranchCondition(CCode);
21853 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21856 return Op0.getOperand(1);
21860 // Try to use the carry flag from the add in place of an separate CMP for:
21861 // (seteq (add X, -1), -1). Similar for setne.
21862 if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
21863 Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21864 if (isProfitableToUseFlagOp(Op0)) {
21865 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
21867 SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
21868 Op0.getOperand(1));
21869 DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
21870 X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
21871 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21872 return SDValue(New.getNode(), 1);
21876 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
21877 X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
21878 if (CondCode == X86::COND_INVALID)
21881 std::pair<SDValue, SDValue> Tmp =
21882 EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget, Chain, IsSignaling);
21883 SDValue EFLAGS = Tmp.first;
21885 Chain = Tmp.second;
21886 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
21887 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
21891 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
21893 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
21894 Op.getOpcode() == ISD::STRICT_FSETCCS;
21895 MVT VT = Op->getSimpleValueType(0);
21897 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
21899 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
21900 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21901 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
21902 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
21905 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
21907 // Handle f128 first, since one possible outcome is a normal integer
21908 // comparison which gets handled by emitFlagsForSetcc.
21909 if (Op0.getValueType() == MVT::f128) {
21910 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
21911 Op.getOpcode() == ISD::STRICT_FSETCCS);
21913 // If softenSetCCOperands returned a scalar, use it.
21914 if (!Op1.getNode()) {
21915 assert(Op0.getValueType() == Op.getValueType() &&
21916 "Unexpected setcc expansion!");
21918 return DAG.getMergeValues({Op0, Chain}, dl);
21924 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC, Chain,
21925 Op.getOpcode() == ISD::STRICT_FSETCCS);
21929 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
21932 return DAG.getMergeValues({Res, Chain}, dl);
21937 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
21938 SDValue LHS = Op.getOperand(0);
21939 SDValue RHS = Op.getOperand(1);
21940 SDValue Carry = Op.getOperand(2);
21941 SDValue Cond = Op.getOperand(3);
21944 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
21945 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
21947 // Recreate the carry if needed.
21948 EVT CarryVT = Carry.getValueType();
21949 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
21950 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
21951 Carry, DAG.getConstant(NegOne, DL, CarryVT));
21953 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
21954 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
21955 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
21958 // This function returns three things: the arithmetic computation itself
21959 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
21960 // flag and the condition code define the case in which the arithmetic
21961 // computation overflows.
21962 static std::pair<SDValue, SDValue>
21963 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
21964 assert(Op.getResNo() == 0 && "Unexpected result number!");
21965 SDValue Value, Overflow;
21966 SDValue LHS = Op.getOperand(0);
21967 SDValue RHS = Op.getOperand(1);
21968 unsigned BaseOp = 0;
21970 switch (Op.getOpcode()) {
21971 default: llvm_unreachable("Unknown ovf instruction!");
21973 BaseOp = X86ISD::ADD;
21974 Cond = X86::COND_O;
21977 BaseOp = X86ISD::ADD;
21978 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
21981 BaseOp = X86ISD::SUB;
21982 Cond = X86::COND_O;
21985 BaseOp = X86ISD::SUB;
21986 Cond = X86::COND_B;
21989 BaseOp = X86ISD::SMUL;
21990 Cond = X86::COND_O;
21993 BaseOp = X86ISD::UMUL;
21994 Cond = X86::COND_O;
21999 // Also sets EFLAGS.
22000 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22001 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
22002 Overflow = Value.getValue(1);
22005 return std::make_pair(Value, Overflow);
22008 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
22009 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
22010 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
22011 // looks for this combo and may remove the "setcc" instruction if the "setcc"
22012 // has only one use.
22014 X86::CondCode Cond;
22015 SDValue Value, Overflow;
22016 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
22018 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
22019 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
22020 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
22023 /// Return true if opcode is a X86 logical comparison.
22024 static bool isX86LogicalCmp(SDValue Op) {
22025 unsigned Opc = Op.getOpcode();
22026 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
22027 Opc == X86ISD::SAHF)
22029 if (Op.getResNo() == 1 &&
22030 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
22031 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
22032 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
22038 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
22039 if (V.getOpcode() != ISD::TRUNCATE)
22042 SDValue VOp0 = V.getOperand(0);
22043 unsigned InBits = VOp0.getValueSizeInBits();
22044 unsigned Bits = V.getValueSizeInBits();
22045 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
22048 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
22049 bool AddTest = true;
22050 SDValue Cond = Op.getOperand(0);
22051 SDValue Op1 = Op.getOperand(1);
22052 SDValue Op2 = Op.getOperand(2);
22054 MVT VT = Op1.getSimpleValueType();
22057 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
22058 // are available or VBLENDV if AVX is available.
22059 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
22060 if (Cond.getOpcode() == ISD::SETCC &&
22061 ((Subtarget.hasSSE2() && VT == MVT::f64) ||
22062 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
22063 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
22064 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
22065 bool IsAlwaysSignaling;
22067 translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
22068 CondOp0, CondOp1, IsAlwaysSignaling);
22070 if (Subtarget.hasAVX512()) {
22072 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
22073 DAG.getTargetConstant(SSECC, DL, MVT::i8));
22074 assert(!VT.isVector() && "Not a scalar type?");
22075 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22078 if (SSECC < 8 || Subtarget.hasAVX()) {
22079 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
22080 DAG.getTargetConstant(SSECC, DL, MVT::i8));
22082 // If we have AVX, we can use a variable vector select (VBLENDV) instead
22083 // of 3 logic instructions for size savings and potentially speed.
22084 // Unfortunately, there is no scalar form of VBLENDV.
22086 // If either operand is a +0.0 constant, don't try this. We can expect to
22087 // optimize away at least one of the logic instructions later in that
22088 // case, so that sequence would be faster than a variable blend.
22090 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
22091 // uses XMM0 as the selection register. That may need just as many
22092 // instructions as the AND/ANDN/OR sequence due to register moves, so
22094 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
22095 !isNullFPConstant(Op2)) {
22096 // Convert to vectors, do a VSELECT, and convert back to scalar.
22097 // All of the conversions should be optimized away.
22098 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
22099 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
22100 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
22101 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
22103 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
22104 VCmp = DAG.getBitcast(VCmpVT, VCmp);
22106 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
22108 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
22109 VSel, DAG.getIntPtrConstant(0, DL));
22111 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
22112 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
22113 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
22117 // AVX512 fallback is to lower selects of scalar floats to masked moves.
22118 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
22119 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
22120 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22123 // For v64i1 without 64-bit support we need to split and rejoin.
22124 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
22125 assert(Subtarget.hasBWI() && "Expected BWI to be legal");
22126 SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
22127 SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
22128 SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
22129 SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
22130 SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
22131 SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
22132 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22135 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
22137 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
22138 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
22139 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
22140 Op1Scalar = Op1.getOperand(0);
22142 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
22143 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
22144 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
22145 Op2Scalar = Op2.getOperand(0);
22146 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
22147 SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
22148 Op1Scalar, Op2Scalar);
22149 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
22150 return DAG.getBitcast(VT, newSelect);
22151 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
22152 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
22153 DAG.getIntPtrConstant(0, DL));
22157 if (Cond.getOpcode() == ISD::SETCC) {
22158 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
22160 // If the condition was updated, it's possible that the operands of the
22161 // select were also updated (for example, EmitTest has a RAUW). Refresh
22162 // the local references to the select operands in case they got stale.
22163 Op1 = Op.getOperand(1);
22164 Op2 = Op.getOperand(2);
22168 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
22169 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
22170 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
22171 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
22172 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
22173 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
22174 if (Cond.getOpcode() == X86ISD::SETCC &&
22175 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
22176 isNullConstant(Cond.getOperand(1).getOperand(1))) {
22177 SDValue Cmp = Cond.getOperand(1);
22178 unsigned CondCode = Cond.getConstantOperandVal(0);
22180 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
22181 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
22182 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
22183 SDValue CmpOp0 = Cmp.getOperand(0);
22185 // Apply further optimizations for special cases
22186 // (select (x != 0), -1, 0) -> neg & sbb
22187 // (select (x == 0), 0, -1) -> neg & sbb
22188 if (isNullConstant(Y) &&
22189 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
22190 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
22191 SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
22192 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22193 Zero = DAG.getConstant(0, DL, Op.getValueType());
22194 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
22197 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22198 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
22199 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22201 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22202 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
22203 SDValue Res = // Res = 0 or -1.
22204 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
22206 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
22207 Res = DAG.getNOT(DL, Res, Res.getValueType());
22209 if (!isNullConstant(Op2))
22210 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
22212 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
22213 Cmp.getOperand(0).getOpcode() == ISD::AND &&
22214 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
22215 SDValue CmpOp0 = Cmp.getOperand(0);
22216 SDValue Src1, Src2;
22217 // true if Op2 is XOR or OR operator and one of its operands
22219 // ( a , a op b) || ( b , a op b)
22220 auto isOrXorPattern = [&]() {
22221 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
22222 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
22224 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
22231 if (isOrXorPattern()) {
22233 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
22234 // we need mask of all zeros or ones with same size of the other
22236 if (CmpSz > VT.getSizeInBits())
22237 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
22238 else if (CmpSz < VT.getSizeInBits())
22239 Neg = DAG.getNode(ISD::AND, DL, VT,
22240 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
22241 DAG.getConstant(1, DL, VT));
22244 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
22245 Neg); // -(and (x, 0x1))
22246 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
22247 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
22252 // Look past (and (setcc_carry (cmp ...)), 1).
22253 if (Cond.getOpcode() == ISD::AND &&
22254 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22255 isOneConstant(Cond.getOperand(1)))
22256 Cond = Cond.getOperand(0);
22258 // If condition flag is set by a X86ISD::CMP, then use it as the condition
22259 // setting operand in place of the X86ISD::SETCC.
22260 unsigned CondOpcode = Cond.getOpcode();
22261 if (CondOpcode == X86ISD::SETCC ||
22262 CondOpcode == X86ISD::SETCC_CARRY) {
22263 CC = Cond.getOperand(0);
22265 SDValue Cmp = Cond.getOperand(1);
22266 bool IllegalFPCMov = false;
22267 if (VT.isFloatingPoint() && !VT.isVector() &&
22268 !isScalarFPTypeInSSEReg(VT)) // FPStack?
22269 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
22271 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
22272 Cmp.getOpcode() == X86ISD::BT) { // FIXME
22276 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22277 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22278 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22280 X86::CondCode X86Cond;
22281 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22283 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
22288 // Look past the truncate if the high bits are known zero.
22289 if (isTruncWithZeroHighBitsInput(Cond, DAG))
22290 Cond = Cond.getOperand(0);
22292 // We know the result of AND is compared against zero. Try to match
22294 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
22296 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
22305 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
22306 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
22309 // a < b ? -1 : 0 -> RES = ~setcc_carry
22310 // a < b ? 0 : -1 -> RES = setcc_carry
22311 // a >= b ? -1 : 0 -> RES = setcc_carry
22312 // a >= b ? 0 : -1 -> RES = ~setcc_carry
22313 if (Cond.getOpcode() == X86ISD::SUB) {
22314 Cond = ConvertCmpIfNecessary(Cond, DAG);
22315 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
22317 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
22318 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
22319 (isNullConstant(Op1) || isNullConstant(Op2))) {
22321 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
22322 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
22323 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
22324 return DAG.getNOT(DL, Res, Res.getValueType());
22329 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
22330 // widen the cmov and push the truncate through. This avoids introducing a new
22331 // branch during isel and doesn't add any extensions.
22332 if (Op.getValueType() == MVT::i8 &&
22333 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
22334 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
22335 if (T1.getValueType() == T2.getValueType() &&
22336 // Blacklist CopyFromReg to avoid partial register stalls.
22337 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
22338 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
22340 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
22344 // Or finally, promote i8 cmovs if we have CMOV,
22345 // or i16 cmovs if it won't prevent folding a load.
22346 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
22347 // legal, but EmitLoweredSelect() can not deal with these extensions
22348 // being inserted between two CMOV's. (in i16 case too TBN)
22349 // https://bugs.llvm.org/show_bug.cgi?id=40974
22350 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
22351 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
22352 !MayFoldLoad(Op2))) {
22353 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
22354 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
22355 SDValue Ops[] = { Op2, Op1, CC, Cond };
22356 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
22357 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
22360 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
22361 // condition is true.
22362 SDValue Ops[] = { Op2, Op1, CC, Cond };
22363 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
22366 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
22367 const X86Subtarget &Subtarget,
22368 SelectionDAG &DAG) {
22369 MVT VT = Op->getSimpleValueType(0);
22370 SDValue In = Op->getOperand(0);
22371 MVT InVT = In.getSimpleValueType();
22372 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
22373 MVT VTElt = VT.getVectorElementType();
22376 unsigned NumElts = VT.getVectorNumElements();
22378 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
22380 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
22381 // If v16i32 is to be avoided, we'll need to split and concatenate.
22382 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
22383 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
22385 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
22388 // Widen to 512-bits if VLX is not supported.
22389 MVT WideVT = ExtVT;
22390 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
22391 NumElts *= 512 / ExtVT.getSizeInBits();
22392 InVT = MVT::getVectorVT(MVT::i1, NumElts);
22393 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
22394 In, DAG.getIntPtrConstant(0, dl));
22395 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
22399 MVT WideEltVT = WideVT.getVectorElementType();
22400 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
22401 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
22402 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
22404 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
22405 SDValue Zero = DAG.getConstant(0, dl, WideVT);
22406 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
22409 // Truncate if we had to extend i16/i8 above.
22411 WideVT = MVT::getVectorVT(VTElt, NumElts);
22412 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
22415 // Extract back to 128/256-bit if we widened.
22417 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
22418 DAG.getIntPtrConstant(0, dl));
22423 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22424 SelectionDAG &DAG) {
22425 SDValue In = Op->getOperand(0);
22426 MVT InVT = In.getSimpleValueType();
22428 if (InVT.getVectorElementType() == MVT::i1)
22429 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
22431 assert(Subtarget.hasAVX() && "Expected AVX support");
22432 return LowerAVXExtend(Op, DAG, Subtarget);
22435 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
22436 // For sign extend this needs to handle all vector sizes and SSE4.1 and
22437 // non-SSE4.1 targets. For zero extend this should only handle inputs of
22438 // MVT::v64i8 when BWI is not supported, but AVX512 is.
22439 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
22440 const X86Subtarget &Subtarget,
22441 SelectionDAG &DAG) {
22442 SDValue In = Op->getOperand(0);
22443 MVT VT = Op->getSimpleValueType(0);
22444 MVT InVT = In.getSimpleValueType();
22446 MVT SVT = VT.getVectorElementType();
22447 MVT InSVT = InVT.getVectorElementType();
22448 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
22450 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
22452 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
22454 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
22455 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
22456 !(VT.is512BitVector() && Subtarget.hasAVX512()))
22460 unsigned Opc = Op.getOpcode();
22461 unsigned NumElts = VT.getVectorNumElements();
22463 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
22464 // For 512-bit vectors, we need 128-bits or 256-bits.
22465 if (InVT.getSizeInBits() > 128) {
22466 // Input needs to be at least the same number of elements as output, and
22467 // at least 128-bits.
22468 int InSize = InSVT.getSizeInBits() * NumElts;
22469 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
22470 InVT = In.getSimpleValueType();
22473 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
22474 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
22475 // need to be handled here for 256/512-bit results.
22476 if (Subtarget.hasInt256()) {
22477 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
22479 if (InVT.getVectorNumElements() != NumElts)
22480 return DAG.getNode(Op.getOpcode(), dl, VT, In);
22482 // FIXME: Apparently we create inreg operations that could be regular
22485 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
22486 : ISD::ZERO_EXTEND;
22487 return DAG.getNode(ExtOpc, dl, VT, In);
22490 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
22491 if (Subtarget.hasAVX()) {
22492 assert(VT.is256BitVector() && "256-bit vector expected");
22493 MVT HalfVT = VT.getHalfNumVectorElementsVT();
22494 int HalfNumElts = HalfVT.getVectorNumElements();
22496 unsigned NumSrcElts = InVT.getVectorNumElements();
22497 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
22498 for (int i = 0; i != HalfNumElts; ++i)
22499 HiMask[i] = HalfNumElts + i;
22501 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
22502 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
22503 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
22504 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
22507 // We should only get here for sign extend.
22508 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
22509 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
22511 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
22513 SDValue SignExt = Curr;
22515 // As SRAI is only available on i16/i32 types, we expand only up to i32
22516 // and handle i64 separately.
22517 if (InVT != MVT::v4i32) {
22518 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
22520 unsigned DestWidth = DestVT.getScalarSizeInBits();
22521 unsigned Scale = DestWidth / InSVT.getSizeInBits();
22523 unsigned InNumElts = InVT.getVectorNumElements();
22524 unsigned DestElts = DestVT.getVectorNumElements();
22526 // Build a shuffle mask that takes each input element and places it in the
22527 // MSBs of the new element size.
22528 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
22529 for (unsigned i = 0; i != DestElts; ++i)
22530 Mask[i * Scale + (Scale - 1)] = i;
22532 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
22533 Curr = DAG.getBitcast(DestVT, Curr);
22535 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
22536 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
22537 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
22540 if (VT == MVT::v2i64) {
22541 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
22542 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
22543 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
22544 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
22545 SignExt = DAG.getBitcast(VT, SignExt);
22551 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22552 SelectionDAG &DAG) {
22553 MVT VT = Op->getSimpleValueType(0);
22554 SDValue In = Op->getOperand(0);
22555 MVT InVT = In.getSimpleValueType();
22558 if (InVT.getVectorElementType() == MVT::i1)
22559 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
22561 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
22562 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22563 "Expected same number of elements");
22564 assert((VT.getVectorElementType() == MVT::i16 ||
22565 VT.getVectorElementType() == MVT::i32 ||
22566 VT.getVectorElementType() == MVT::i64) &&
22567 "Unexpected element type");
22568 assert((InVT.getVectorElementType() == MVT::i8 ||
22569 InVT.getVectorElementType() == MVT::i16 ||
22570 InVT.getVectorElementType() == MVT::i32) &&
22571 "Unexpected element type");
22573 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
22574 if (InVT == MVT::v8i8) {
22575 if (VT != MVT::v8i64)
22578 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
22579 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
22580 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
22583 if (Subtarget.hasInt256())
22586 // Optimize vectors in AVX mode
22587 // Sign extend v8i16 to v8i32 and
22590 // Divide input vector into two parts
22591 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
22592 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
22593 // concat the vectors to original VT
22594 MVT HalfVT = VT.getHalfNumVectorElementsVT();
22595 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
22597 unsigned NumElems = InVT.getVectorNumElements();
22598 SmallVector<int,8> ShufMask(NumElems, -1);
22599 for (unsigned i = 0; i != NumElems/2; ++i)
22600 ShufMask[i] = i + NumElems/2;
22602 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
22603 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
22605 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
22608 /// Change a vector store into a pair of half-size vector stores.
22609 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
22610 SDValue StoredVal = Store->getValue();
22611 assert((StoredVal.getValueType().is256BitVector() ||
22612 StoredVal.getValueType().is512BitVector()) &&
22613 "Expecting 256/512-bit op");
22615 // Splitting volatile memory ops is not allowed unless the operation was not
22616 // legal to begin with. Assume the input store is legal (this transform is
22617 // only used for targets with AVX). Note: It is possible that we have an
22618 // illegal type like v2i128, and so we could allow splitting a volatile store
22619 // in that case if that is important.
22620 if (!Store->isSimple())
22623 EVT StoreVT = StoredVal.getValueType();
22624 unsigned NumElems = StoreVT.getVectorNumElements();
22625 unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
22626 unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
22629 SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
22630 SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
22631 SDValue Ptr0 = Store->getBasePtr();
22632 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
22633 unsigned Alignment = Store->getAlignment();
22635 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
22636 Alignment, Store->getMemOperand()->getFlags());
22637 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
22638 Store->getPointerInfo().getWithOffset(HalfAlign),
22639 MinAlign(Alignment, HalfAlign),
22640 Store->getMemOperand()->getFlags());
22641 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
22644 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
22646 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
22647 SelectionDAG &DAG) {
22648 SDValue StoredVal = Store->getValue();
22649 assert(StoreVT.is128BitVector() &&
22650 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
22651 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
22653 // Splitting volatile memory ops is not allowed unless the operation was not
22654 // legal to begin with. We are assuming the input op is legal (this transform
22655 // is only used for targets with AVX).
22656 if (!Store->isSimple())
22659 MVT StoreSVT = StoreVT.getScalarType();
22660 unsigned NumElems = StoreVT.getVectorNumElements();
22661 unsigned ScalarSize = StoreSVT.getStoreSize();
22662 unsigned Alignment = Store->getAlignment();
22665 SmallVector<SDValue, 4> Stores;
22666 for (unsigned i = 0; i != NumElems; ++i) {
22667 unsigned Offset = i * ScalarSize;
22668 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
22669 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
22670 DAG.getIntPtrConstant(i, DL));
22671 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
22672 Store->getPointerInfo().getWithOffset(Offset),
22673 MinAlign(Alignment, Offset),
22674 Store->getMemOperand()->getFlags());
22675 Stores.push_back(Ch);
22677 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
22680 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
22681 SelectionDAG &DAG) {
22682 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
22684 SDValue StoredVal = St->getValue();
22686 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
22687 if (StoredVal.getValueType().isVector() &&
22688 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
22689 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
22691 assert(!St->isTruncatingStore() && "Expected non-truncating store");
22692 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
22693 "Expected AVX512F without AVX512DQI");
22695 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
22696 DAG.getUNDEF(MVT::v16i1), StoredVal,
22697 DAG.getIntPtrConstant(0, dl));
22698 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
22699 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
22701 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
22702 St->getPointerInfo(), St->getAlignment(),
22703 St->getMemOperand()->getFlags());
22706 if (St->isTruncatingStore())
22709 // If this is a 256-bit store of concatenated ops, we are better off splitting
22710 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
22711 // and each half can execute independently. Some cores would split the op into
22712 // halves anyway, so the concat (vinsertf128) is purely an extra op.
22713 MVT StoreVT = StoredVal.getSimpleValueType();
22714 if (StoreVT.is256BitVector()) {
22715 SmallVector<SDValue, 4> CatOps;
22716 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
22717 return splitVectorStore(St, DAG);
22721 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22722 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
22724 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
22725 TargetLowering::TypeWidenVector && "Unexpected type action!");
22727 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
22728 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
22729 DAG.getUNDEF(StoreVT));
22731 if (Subtarget.hasSSE2()) {
22732 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
22734 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
22735 MVT CastVT = MVT::getVectorVT(StVT, 2);
22736 StoredVal = DAG.getBitcast(CastVT, StoredVal);
22737 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
22738 DAG.getIntPtrConstant(0, dl));
22740 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
22741 St->getPointerInfo(), St->getAlignment(),
22742 St->getMemOperand()->getFlags());
22744 assert(Subtarget.hasSSE1() && "Expected SSE");
22745 SDVTList Tys = DAG.getVTList(MVT::Other);
22746 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
22747 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
22748 St->getMemOperand());
22751 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
22752 // may emit an illegal shuffle but the expansion is still better than scalar
22753 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
22754 // we'll emit a shuffle and a arithmetic shift.
22755 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
22756 // TODO: It is possible to support ZExt by zeroing the undef values during
22757 // the shuffle phase or after the shuffle.
22758 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
22759 SelectionDAG &DAG) {
22760 MVT RegVT = Op.getSimpleValueType();
22761 assert(RegVT.isVector() && "We only custom lower vector loads.");
22762 assert(RegVT.isInteger() &&
22763 "We only custom lower integer vector loads.");
22765 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
22768 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
22769 if (RegVT.getVectorElementType() == MVT::i1) {
22770 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
22771 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
22772 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
22773 "Expected AVX512F without AVX512DQI");
22775 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
22776 Ld->getPointerInfo(), Ld->getAlignment(),
22777 Ld->getMemOperand()->getFlags());
22779 // Replace chain users with the new chain.
22780 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
22782 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
22783 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
22784 DAG.getBitcast(MVT::v16i1, Val),
22785 DAG.getIntPtrConstant(0, dl));
22786 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
22792 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
22793 /// each of which has no other use apart from the AND / OR.
22794 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
22795 Opc = Op.getOpcode();
22796 if (Opc != ISD::OR && Opc != ISD::AND)
22798 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
22799 Op.getOperand(0).hasOneUse() &&
22800 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
22801 Op.getOperand(1).hasOneUse());
22804 /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
22805 /// SETCC node has a single use.
22806 static bool isXor1OfSetCC(SDValue Op) {
22807 if (Op.getOpcode() != ISD::XOR)
22809 if (isOneConstant(Op.getOperand(1)))
22810 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
22811 Op.getOperand(0).hasOneUse();
22815 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
22816 bool addTest = true;
22817 SDValue Chain = Op.getOperand(0);
22818 SDValue Cond = Op.getOperand(1);
22819 SDValue Dest = Op.getOperand(2);
22822 bool Inverted = false;
22824 if (Cond.getOpcode() == ISD::SETCC) {
22825 // Check for setcc([su]{add,sub,mul}o == 0).
22826 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
22827 isNullConstant(Cond.getOperand(1)) &&
22828 Cond.getOperand(0).getResNo() == 1 &&
22829 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
22830 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
22831 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
22832 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
22833 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
22834 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
22836 Cond = Cond.getOperand(0);
22838 if (SDValue NewCond = LowerSETCC(Cond, DAG))
22843 // FIXME: LowerXALUO doesn't handle these!!
22844 else if (Cond.getOpcode() == X86ISD::ADD ||
22845 Cond.getOpcode() == X86ISD::SUB ||
22846 Cond.getOpcode() == X86ISD::SMUL ||
22847 Cond.getOpcode() == X86ISD::UMUL)
22848 Cond = LowerXALUO(Cond, DAG);
22851 // Look pass (and (setcc_carry (cmp ...)), 1).
22852 if (Cond.getOpcode() == ISD::AND &&
22853 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22854 isOneConstant(Cond.getOperand(1)))
22855 Cond = Cond.getOperand(0);
22857 // If condition flag is set by a X86ISD::CMP, then use it as the condition
22858 // setting operand in place of the X86ISD::SETCC.
22859 unsigned CondOpcode = Cond.getOpcode();
22860 if (CondOpcode == X86ISD::SETCC ||
22861 CondOpcode == X86ISD::SETCC_CARRY) {
22862 CC = Cond.getOperand(0);
22864 SDValue Cmp = Cond.getOperand(1);
22865 unsigned Opc = Cmp.getOpcode();
22866 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
22867 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
22871 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
22875 // These can only come from an arithmetic instruction with overflow,
22876 // e.g. SADDO, UADDO.
22877 Cond = Cond.getOperand(1);
22883 CondOpcode = Cond.getOpcode();
22884 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22885 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22886 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22888 X86::CondCode X86Cond;
22889 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22892 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
22894 CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22898 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
22899 SDValue Cmp = Cond.getOperand(0).getOperand(1);
22900 if (CondOpc == ISD::OR) {
22901 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
22902 // two branches instead of an explicit OR instruction with a
22904 if (Cmp == Cond.getOperand(1).getOperand(1) &&
22905 isX86LogicalCmp(Cmp)) {
22906 CC = Cond.getOperand(0).getOperand(0);
22907 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22908 Chain, Dest, CC, Cmp);
22909 CC = Cond.getOperand(1).getOperand(0);
22913 } else { // ISD::AND
22914 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
22915 // two branches instead of an explicit AND instruction with a
22916 // separate test. However, we only do this if this block doesn't
22917 // have a fall-through edge, because this requires an explicit
22918 // jmp when the condition is false.
22919 if (Cmp == Cond.getOperand(1).getOperand(1) &&
22920 isX86LogicalCmp(Cmp) &&
22921 Op.getNode()->hasOneUse()) {
22922 X86::CondCode CCode0 =
22923 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22924 CCode0 = X86::GetOppositeBranchCondition(CCode0);
22925 CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
22926 SDNode *User = *Op.getNode()->use_begin();
22927 // Look for an unconditional branch following this conditional branch.
22928 // We need this because we need to reverse the successors in order
22929 // to implement FCMP_OEQ.
22930 if (User->getOpcode() == ISD::BR) {
22931 SDValue FalseBB = User->getOperand(1);
22933 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22934 assert(NewBR == User);
22938 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain,
22940 X86::CondCode CCode1 =
22941 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
22942 CCode1 = X86::GetOppositeBranchCondition(CCode1);
22943 CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
22949 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
22950 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
22951 // It should be transformed during dag combiner except when the condition
22952 // is set by a arithmetics with overflow node.
22953 X86::CondCode CCode =
22954 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22955 CCode = X86::GetOppositeBranchCondition(CCode);
22956 CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22957 Cond = Cond.getOperand(0).getOperand(1);
22959 } else if (Cond.getOpcode() == ISD::SETCC &&
22960 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
22961 // For FCMP_OEQ, we can emit
22962 // two branches instead of an explicit AND instruction with a
22963 // separate test. However, we only do this if this block doesn't
22964 // have a fall-through edge, because this requires an explicit
22965 // jmp when the condition is false.
22966 if (Op.getNode()->hasOneUse()) {
22967 SDNode *User = *Op.getNode()->use_begin();
22968 // Look for an unconditional branch following this conditional branch.
22969 // We need this because we need to reverse the successors in order
22970 // to implement FCMP_OEQ.
22971 if (User->getOpcode() == ISD::BR) {
22972 SDValue FalseBB = User->getOperand(1);
22974 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22975 assert(NewBR == User);
22979 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22980 Cond.getOperand(0), Cond.getOperand(1));
22981 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22982 CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22983 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22984 Chain, Dest, CC, Cmp);
22985 CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22990 } else if (Cond.getOpcode() == ISD::SETCC &&
22991 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
22992 // For FCMP_UNE, we can emit
22993 // two branches instead of an explicit OR instruction with a
22995 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22996 Cond.getOperand(0), Cond.getOperand(1));
22997 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22998 CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22999 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
23000 Chain, Dest, CC, Cmp);
23001 CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23008 // Look pass the truncate if the high bits are known zero.
23009 if (isTruncWithZeroHighBitsInput(Cond, DAG))
23010 Cond = Cond.getOperand(0);
23012 // We know the result of AND is compared against zero. Try to match
23014 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
23016 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
23025 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
23026 CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23027 Cond = EmitTest(Cond, X86Cond, dl, DAG, Subtarget);
23029 Cond = ConvertCmpIfNecessary(Cond, DAG);
23030 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
23031 Chain, Dest, CC, Cond);
23034 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
23035 // Calls to _alloca are needed to probe the stack when allocating more than 4k
23036 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
23037 // that the guard pages used by the OS virtual memory manager are allocated in
23038 // correct sequence.
23040 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
23041 SelectionDAG &DAG) const {
23042 MachineFunction &MF = DAG.getMachineFunction();
23043 bool SplitStack = MF.shouldSplitStack();
23044 bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
23045 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
23046 SplitStack || EmitStackProbe;
23050 SDNode *Node = Op.getNode();
23051 SDValue Chain = Op.getOperand(0);
23052 SDValue Size = Op.getOperand(1);
23053 MaybeAlign Alignment(Op.getConstantOperandVal(2));
23054 EVT VT = Node->getValueType(0);
23056 // Chain the dynamic stack allocation so that it doesn't modify the stack
23057 // pointer when other instructions are using the stack.
23058 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
23060 bool Is64Bit = Subtarget.is64Bit();
23061 MVT SPTy = getPointerTy(DAG.getDataLayout());
23065 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23066 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
23067 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
23068 " not tell us which reg is the stack pointer!");
23070 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
23071 Chain = SP.getValue(1);
23072 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23073 const Align StackAlign(TFI.getStackAlignment());
23074 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
23075 if (Alignment && Alignment > StackAlign)
23077 DAG.getNode(ISD::AND, dl, VT, Result,
23078 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23079 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
23080 } else if (SplitStack) {
23081 MachineRegisterInfo &MRI = MF.getRegInfo();
23084 // The 64 bit implementation of segmented stacks needs to clobber both r10
23085 // r11. This makes it impossible to use it along with nested parameters.
23086 const Function &F = MF.getFunction();
23087 for (const auto &A : F.args()) {
23088 if (A.hasNestAttr())
23089 report_fatal_error("Cannot use segmented stacks with functions that "
23090 "have nested arguments.");
23094 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23095 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23096 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23097 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
23098 DAG.getRegister(Vreg, SPTy));
23100 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
23101 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
23102 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
23104 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23105 Register SPReg = RegInfo->getStackRegister();
23106 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
23107 Chain = SP.getValue(1);
23110 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
23111 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23112 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
23118 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
23119 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
23121 SDValue Ops[2] = {Result, Chain};
23122 return DAG.getMergeValues(Ops, dl);
23125 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
23126 MachineFunction &MF = DAG.getMachineFunction();
23127 auto PtrVT = getPointerTy(MF.getDataLayout());
23128 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23130 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23133 if (!Subtarget.is64Bit() ||
23134 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
23135 // vastart just stores the address of the VarArgsFrameIndex slot into the
23136 // memory location argument.
23137 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23138 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
23139 MachinePointerInfo(SV));
23143 // gp_offset (0 - 6 * 8)
23144 // fp_offset (48 - 48 + 8 * 16)
23145 // overflow_arg_area (point to parameters coming in memory).
23147 SmallVector<SDValue, 8> MemOps;
23148 SDValue FIN = Op.getOperand(1);
23150 SDValue Store = DAG.getStore(
23151 Op.getOperand(0), DL,
23152 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
23153 MachinePointerInfo(SV));
23154 MemOps.push_back(Store);
23157 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
23158 Store = DAG.getStore(
23159 Op.getOperand(0), DL,
23160 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
23161 MachinePointerInfo(SV, 4));
23162 MemOps.push_back(Store);
23164 // Store ptr to overflow_arg_area
23165 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
23166 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23168 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
23169 MemOps.push_back(Store);
23171 // Store ptr to reg_save_area.
23172 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
23173 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
23174 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
23175 Store = DAG.getStore(
23176 Op.getOperand(0), DL, RSFIN, FIN,
23177 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
23178 MemOps.push_back(Store);
23179 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
23182 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
23183 assert(Subtarget.is64Bit() &&
23184 "LowerVAARG only handles 64-bit va_arg!");
23185 assert(Op.getNumOperands() == 4);
23187 MachineFunction &MF = DAG.getMachineFunction();
23188 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
23189 // The Win64 ABI uses char* instead of a structure.
23190 return DAG.expandVAArg(Op.getNode());
23192 SDValue Chain = Op.getOperand(0);
23193 SDValue SrcPtr = Op.getOperand(1);
23194 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23195 unsigned Align = Op.getConstantOperandVal(3);
23198 EVT ArgVT = Op.getNode()->getValueType(0);
23199 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
23200 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
23203 // Decide which area this value should be read from.
23204 // TODO: Implement the AMD64 ABI in its entirety. This simple
23205 // selection mechanism works only for the basic types.
23206 if (ArgVT == MVT::f80) {
23207 llvm_unreachable("va_arg for f80 not yet implemented");
23208 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
23209 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
23210 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
23211 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
23213 llvm_unreachable("Unhandled argument type in LowerVAARG");
23216 if (ArgMode == 2) {
23217 // Sanity Check: Make sure using fp_offset makes sense.
23218 assert(!Subtarget.useSoftFloat() &&
23219 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
23220 Subtarget.hasSSE1());
23223 // Insert VAARG_64 node into the DAG
23224 // VAARG_64 returns two values: Variable Argument Address, Chain
23225 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
23226 DAG.getConstant(ArgMode, dl, MVT::i8),
23227 DAG.getConstant(Align, dl, MVT::i32)};
23228 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
23229 SDValue VAARG = DAG.getMemIntrinsicNode(
23230 X86ISD::VAARG_64, dl,
23231 VTs, InstOps, MVT::i64,
23232 MachinePointerInfo(SV),
23234 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
23235 Chain = VAARG.getValue(1);
23237 // Load the next argument and return it
23238 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
23241 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
23242 SelectionDAG &DAG) {
23243 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
23244 // where a va_list is still an i8*.
23245 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
23246 if (Subtarget.isCallingConvWin64(
23247 DAG.getMachineFunction().getFunction().getCallingConv()))
23248 // Probably a Win64 va_copy.
23249 return DAG.expandVACopy(Op.getNode());
23251 SDValue Chain = Op.getOperand(0);
23252 SDValue DstPtr = Op.getOperand(1);
23253 SDValue SrcPtr = Op.getOperand(2);
23254 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
23255 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
23258 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
23259 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
23261 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
23264 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
23265 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
23269 case X86ISD::VSHLI:
23270 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
23273 case X86ISD::VSRLI:
23274 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
23277 case X86ISD::VSRAI:
23278 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
23280 llvm_unreachable("Unknown target vector shift node");
23283 /// Handle vector element shifts where the shift amount is a constant.
23284 /// Takes immediate version of shift as input.
23285 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
23286 SDValue SrcOp, uint64_t ShiftAmt,
23287 SelectionDAG &DAG) {
23288 MVT ElementType = VT.getVectorElementType();
23290 // Bitcast the source vector to the output type, this is mainly necessary for
23291 // vXi8/vXi64 shifts.
23292 if (VT != SrcOp.getSimpleValueType())
23293 SrcOp = DAG.getBitcast(VT, SrcOp);
23295 // Fold this packed shift into its first operand if ShiftAmt is 0.
23299 // Check for ShiftAmt >= element width
23300 if (ShiftAmt >= ElementType.getSizeInBits()) {
23301 if (Opc == X86ISD::VSRAI)
23302 ShiftAmt = ElementType.getSizeInBits() - 1;
23304 return DAG.getConstant(0, dl, VT);
23307 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
23308 && "Unknown target vector shift-by-constant node");
23310 // Fold this packed vector shift into a build vector if SrcOp is a
23311 // vector of Constants or UNDEFs.
23312 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
23313 SmallVector<SDValue, 8> Elts;
23314 unsigned NumElts = SrcOp->getNumOperands();
23317 default: llvm_unreachable("Unknown opcode!");
23318 case X86ISD::VSHLI:
23319 for (unsigned i = 0; i != NumElts; ++i) {
23320 SDValue CurrentOp = SrcOp->getOperand(i);
23321 if (CurrentOp->isUndef()) {
23322 // Must produce 0s in the correct bits.
23323 Elts.push_back(DAG.getConstant(0, dl, ElementType));
23326 auto *ND = cast<ConstantSDNode>(CurrentOp);
23327 const APInt &C = ND->getAPIntValue();
23328 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
23331 case X86ISD::VSRLI:
23332 for (unsigned i = 0; i != NumElts; ++i) {
23333 SDValue CurrentOp = SrcOp->getOperand(i);
23334 if (CurrentOp->isUndef()) {
23335 // Must produce 0s in the correct bits.
23336 Elts.push_back(DAG.getConstant(0, dl, ElementType));
23339 auto *ND = cast<ConstantSDNode>(CurrentOp);
23340 const APInt &C = ND->getAPIntValue();
23341 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
23344 case X86ISD::VSRAI:
23345 for (unsigned i = 0; i != NumElts; ++i) {
23346 SDValue CurrentOp = SrcOp->getOperand(i);
23347 if (CurrentOp->isUndef()) {
23348 // All shifted in bits must be the same so use 0.
23349 Elts.push_back(DAG.getConstant(0, dl, ElementType));
23352 auto *ND = cast<ConstantSDNode>(CurrentOp);
23353 const APInt &C = ND->getAPIntValue();
23354 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
23359 return DAG.getBuildVector(VT, dl, Elts);
23362 return DAG.getNode(Opc, dl, VT, SrcOp,
23363 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
23366 /// Handle vector element shifts where the shift amount may or may not be a
23367 /// constant. Takes immediate version of shift as input.
23368 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
23369 SDValue SrcOp, SDValue ShAmt,
23370 const X86Subtarget &Subtarget,
23371 SelectionDAG &DAG) {
23372 MVT SVT = ShAmt.getSimpleValueType();
23373 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
23375 // Catch shift-by-constant.
23376 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
23377 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
23378 CShAmt->getZExtValue(), DAG);
23380 // Change opcode to non-immediate version.
23381 Opc = getTargetVShiftUniformOpcode(Opc, true);
23383 // Need to build a vector containing shift amount.
23384 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
23385 // +====================+============+=======================================+
23386 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
23387 // +====================+============+=======================================+
23388 // | i64 | Yes, No | Use ShAmt as lowest elt |
23389 // | i32 | Yes | zero-extend in-reg |
23390 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
23391 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
23392 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
23393 // +====================+============+=======================================+
23395 if (SVT == MVT::i64)
23396 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
23397 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
23398 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
23399 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
23400 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
23401 ShAmt = ShAmt.getOperand(0);
23402 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
23403 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
23404 if (Subtarget.hasSSE41())
23405 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
23406 MVT::v2i64, ShAmt);
23408 SDValue ByteShift = DAG.getTargetConstant(
23409 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
23410 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
23411 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
23413 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
23416 } else if (Subtarget.hasSSE41() &&
23417 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
23418 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
23419 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
23420 MVT::v2i64, ShAmt);
23422 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
23423 DAG.getUNDEF(SVT)};
23424 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
23427 // The return type has to be a 128-bit type with the same element
23428 // type as the input type.
23429 MVT EltVT = VT.getVectorElementType();
23430 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
23432 ShAmt = DAG.getBitcast(ShVT, ShAmt);
23433 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
23436 /// Return Mask with the necessary casting or extending
23437 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
23438 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
23439 const X86Subtarget &Subtarget, SelectionDAG &DAG,
23442 if (isAllOnesConstant(Mask))
23443 return DAG.getConstant(1, dl, MaskVT);
23444 if (X86::isZeroNode(Mask))
23445 return DAG.getConstant(0, dl, MaskVT);
23447 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
23449 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
23450 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
23451 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
23452 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
23454 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
23455 DAG.getConstant(0, dl, MVT::i32));
23456 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
23457 DAG.getConstant(1, dl, MVT::i32));
23459 Lo = DAG.getBitcast(MVT::v32i1, Lo);
23460 Hi = DAG.getBitcast(MVT::v32i1, Hi);
23462 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
23464 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
23465 Mask.getSimpleValueType().getSizeInBits());
23466 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
23467 // are extracted by EXTRACT_SUBVECTOR.
23468 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
23469 DAG.getBitcast(BitcastVT, Mask),
23470 DAG.getIntPtrConstant(0, dl));
23474 /// Return (and \p Op, \p Mask) for compare instructions or
23475 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
23476 /// necessary casting or extending for \p Mask when lowering masking intrinsics
23477 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
23478 SDValue PreservedSrc,
23479 const X86Subtarget &Subtarget,
23480 SelectionDAG &DAG) {
23481 MVT VT = Op.getSimpleValueType();
23482 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
23483 unsigned OpcodeSelect = ISD::VSELECT;
23486 if (isAllOnesConstant(Mask))
23489 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23491 if (PreservedSrc.isUndef())
23492 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
23493 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
23496 /// Creates an SDNode for a predicated scalar operation.
23497 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
23498 /// The mask is coming as MVT::i8 and it should be transformed
23499 /// to MVT::v1i1 while lowering masking intrinsics.
23500 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
23501 /// "X86select" instead of "vselect". We just can't create the "vselect" node
23502 /// for a scalar instruction.
23503 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
23504 SDValue PreservedSrc,
23505 const X86Subtarget &Subtarget,
23506 SelectionDAG &DAG) {
23508 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
23509 if (MaskConst->getZExtValue() & 0x1)
23512 MVT VT = Op.getSimpleValueType();
23515 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
23516 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
23517 DAG.getBitcast(MVT::v8i1, Mask),
23518 DAG.getIntPtrConstant(0, dl));
23519 if (Op.getOpcode() == X86ISD::FSETCCM ||
23520 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
23521 Op.getOpcode() == X86ISD::VFPCLASSS)
23522 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
23524 if (PreservedSrc.isUndef())
23525 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
23526 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
23529 static int getSEHRegistrationNodeSize(const Function *Fn) {
23530 if (!Fn->hasPersonalityFn())
23531 report_fatal_error(
23532 "querying registration node size for function without personality");
23533 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
23534 // WinEHStatePass for the full struct definition.
23535 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
23536 case EHPersonality::MSVC_X86SEH: return 24;
23537 case EHPersonality::MSVC_CXX: return 16;
23540 report_fatal_error(
23541 "can only recover FP for 32-bit MSVC EH personality functions");
23544 /// When the MSVC runtime transfers control to us, either to an outlined
23545 /// function or when returning to a parent frame after catching an exception, we
23546 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
23547 /// Here's the math:
23548 /// RegNodeBase = EntryEBP - RegNodeSize
23549 /// ParentFP = RegNodeBase - ParentFrameOffset
23550 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
23551 /// subtracting the offset (negative on x86) takes us back to the parent FP.
23552 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
23553 SDValue EntryEBP) {
23554 MachineFunction &MF = DAG.getMachineFunction();
23557 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23558 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23560 // It's possible that the parent function no longer has a personality function
23561 // if the exceptional code was optimized away, in which case we just return
23562 // the incoming EBP.
23563 if (!Fn->hasPersonalityFn())
23566 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
23567 // registration, or the .set_setframe offset.
23568 MCSymbol *OffsetSym =
23569 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
23570 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23571 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
23572 SDValue ParentFrameOffset =
23573 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
23575 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
23576 // prologue to RBP in the parent function.
23577 const X86Subtarget &Subtarget =
23578 static_cast<const X86Subtarget &>(DAG.getSubtarget());
23579 if (Subtarget.is64Bit())
23580 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
23582 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
23583 // RegNodeBase = EntryEBP - RegNodeSize
23584 // ParentFP = RegNodeBase - ParentFrameOffset
23585 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
23586 DAG.getConstant(RegNodeSize, dl, PtrVT));
23587 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
23590 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
23591 SelectionDAG &DAG) const {
23592 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
23593 auto isRoundModeCurDirection = [](SDValue Rnd) {
23594 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
23595 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
23599 auto isRoundModeSAE = [](SDValue Rnd) {
23600 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
23601 unsigned RC = C->getZExtValue();
23602 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
23603 // Clear the NO_EXC bit and check remaining bits.
23604 RC ^= X86::STATIC_ROUNDING::NO_EXC;
23605 // As a convenience we allow no other bits or explicitly
23606 // current direction.
23607 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
23613 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
23614 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
23615 RC = C->getZExtValue();
23616 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
23617 // Clear the NO_EXC bit and check remaining bits.
23618 RC ^= X86::STATIC_ROUNDING::NO_EXC;
23619 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
23620 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
23621 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
23622 RC == X86::STATIC_ROUNDING::TO_ZERO;
23630 unsigned IntNo = Op.getConstantOperandVal(0);
23631 MVT VT = Op.getSimpleValueType();
23632 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
23635 switch(IntrData->Type) {
23636 case INTR_TYPE_1OP: {
23637 // We specify 2 possible opcodes for intrinsics with rounding modes.
23638 // First, we check if the intrinsic may have non-default rounding mode,
23639 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23640 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23641 if (IntrWithRoundingModeOpcode != 0) {
23642 SDValue Rnd = Op.getOperand(2);
23644 if (isRoundModeSAEToX(Rnd, RC))
23645 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23647 DAG.getTargetConstant(RC, dl, MVT::i32));
23648 if (!isRoundModeCurDirection(Rnd))
23651 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23654 case INTR_TYPE_1OP_SAE: {
23655 SDValue Sae = Op.getOperand(2);
23658 if (isRoundModeCurDirection(Sae))
23659 Opc = IntrData->Opc0;
23660 else if (isRoundModeSAE(Sae))
23661 Opc = IntrData->Opc1;
23665 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
23667 case INTR_TYPE_2OP: {
23668 SDValue Src2 = Op.getOperand(2);
23670 // We specify 2 possible opcodes for intrinsics with rounding modes.
23671 // First, we check if the intrinsic may have non-default rounding mode,
23672 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23673 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23674 if (IntrWithRoundingModeOpcode != 0) {
23675 SDValue Rnd = Op.getOperand(3);
23677 if (isRoundModeSAEToX(Rnd, RC))
23678 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23679 Op.getOperand(1), Src2,
23680 DAG.getTargetConstant(RC, dl, MVT::i32));
23681 if (!isRoundModeCurDirection(Rnd))
23685 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23686 Op.getOperand(1), Src2);
23688 case INTR_TYPE_2OP_SAE: {
23689 SDValue Sae = Op.getOperand(3);
23692 if (isRoundModeCurDirection(Sae))
23693 Opc = IntrData->Opc0;
23694 else if (isRoundModeSAE(Sae))
23695 Opc = IntrData->Opc1;
23699 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
23702 case INTR_TYPE_3OP:
23703 case INTR_TYPE_3OP_IMM8: {
23704 SDValue Src1 = Op.getOperand(1);
23705 SDValue Src2 = Op.getOperand(2);
23706 SDValue Src3 = Op.getOperand(3);
23708 // We specify 2 possible opcodes for intrinsics with rounding modes.
23709 // First, we check if the intrinsic may have non-default rounding mode,
23710 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23711 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23712 if (IntrWithRoundingModeOpcode != 0) {
23713 SDValue Rnd = Op.getOperand(4);
23715 if (isRoundModeSAEToX(Rnd, RC))
23716 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23718 DAG.getTargetConstant(RC, dl, MVT::i32));
23719 if (!isRoundModeCurDirection(Rnd))
23723 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23724 {Src1, Src2, Src3});
23726 case INTR_TYPE_4OP:
23727 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
23728 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
23729 case INTR_TYPE_1OP_MASK: {
23730 SDValue Src = Op.getOperand(1);
23731 SDValue PassThru = Op.getOperand(2);
23732 SDValue Mask = Op.getOperand(3);
23733 // We add rounding mode to the Node when
23734 // - RC Opcode is specified and
23735 // - RC is not "current direction".
23736 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23737 if (IntrWithRoundingModeOpcode != 0) {
23738 SDValue Rnd = Op.getOperand(4);
23740 if (isRoundModeSAEToX(Rnd, RC))
23741 return getVectorMaskingNode(
23742 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23743 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
23744 Mask, PassThru, Subtarget, DAG);
23745 if (!isRoundModeCurDirection(Rnd))
23748 return getVectorMaskingNode(
23749 DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
23752 case INTR_TYPE_1OP_MASK_SAE: {
23753 SDValue Src = Op.getOperand(1);
23754 SDValue PassThru = Op.getOperand(2);
23755 SDValue Mask = Op.getOperand(3);
23756 SDValue Rnd = Op.getOperand(4);
23759 if (isRoundModeCurDirection(Rnd))
23760 Opc = IntrData->Opc0;
23761 else if (isRoundModeSAE(Rnd))
23762 Opc = IntrData->Opc1;
23766 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
23769 case INTR_TYPE_SCALAR_MASK: {
23770 SDValue Src1 = Op.getOperand(1);
23771 SDValue Src2 = Op.getOperand(2);
23772 SDValue passThru = Op.getOperand(3);
23773 SDValue Mask = Op.getOperand(4);
23774 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23775 // There are 2 kinds of intrinsics in this group:
23776 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
23777 // (2) With rounding mode and sae - 7 operands.
23778 bool HasRounding = IntrWithRoundingModeOpcode != 0;
23779 if (Op.getNumOperands() == (5U + HasRounding)) {
23781 SDValue Rnd = Op.getOperand(5);
23783 if (isRoundModeSAEToX(Rnd, RC))
23784 return getScalarMaskingNode(
23785 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
23786 DAG.getTargetConstant(RC, dl, MVT::i32)),
23787 Mask, passThru, Subtarget, DAG);
23788 if (!isRoundModeCurDirection(Rnd))
23791 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
23793 Mask, passThru, Subtarget, DAG);
23796 assert(Op.getNumOperands() == (6U + HasRounding) &&
23797 "Unexpected intrinsic form");
23798 SDValue RoundingMode = Op.getOperand(5);
23799 unsigned Opc = IntrData->Opc0;
23801 SDValue Sae = Op.getOperand(6);
23802 if (isRoundModeSAE(Sae))
23803 Opc = IntrWithRoundingModeOpcode;
23804 else if (!isRoundModeCurDirection(Sae))
23807 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
23808 Src2, RoundingMode),
23809 Mask, passThru, Subtarget, DAG);
23811 case INTR_TYPE_SCALAR_MASK_RND: {
23812 SDValue Src1 = Op.getOperand(1);
23813 SDValue Src2 = Op.getOperand(2);
23814 SDValue passThru = Op.getOperand(3);
23815 SDValue Mask = Op.getOperand(4);
23816 SDValue Rnd = Op.getOperand(5);
23820 if (isRoundModeCurDirection(Rnd))
23821 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23822 else if (isRoundModeSAEToX(Rnd, RC))
23823 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23824 DAG.getTargetConstant(RC, dl, MVT::i32));
23828 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
23830 case INTR_TYPE_SCALAR_MASK_SAE: {
23831 SDValue Src1 = Op.getOperand(1);
23832 SDValue Src2 = Op.getOperand(2);
23833 SDValue passThru = Op.getOperand(3);
23834 SDValue Mask = Op.getOperand(4);
23835 SDValue Sae = Op.getOperand(5);
23837 if (isRoundModeCurDirection(Sae))
23838 Opc = IntrData->Opc0;
23839 else if (isRoundModeSAE(Sae))
23840 Opc = IntrData->Opc1;
23844 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23845 Mask, passThru, Subtarget, DAG);
23847 case INTR_TYPE_2OP_MASK: {
23848 SDValue Src1 = Op.getOperand(1);
23849 SDValue Src2 = Op.getOperand(2);
23850 SDValue PassThru = Op.getOperand(3);
23851 SDValue Mask = Op.getOperand(4);
23853 if (IntrData->Opc1 != 0) {
23854 SDValue Rnd = Op.getOperand(5);
23856 if (isRoundModeSAEToX(Rnd, RC))
23857 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23858 DAG.getTargetConstant(RC, dl, MVT::i32));
23859 else if (!isRoundModeCurDirection(Rnd))
23863 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23864 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
23866 case INTR_TYPE_2OP_MASK_SAE: {
23867 SDValue Src1 = Op.getOperand(1);
23868 SDValue Src2 = Op.getOperand(2);
23869 SDValue PassThru = Op.getOperand(3);
23870 SDValue Mask = Op.getOperand(4);
23872 unsigned Opc = IntrData->Opc0;
23873 if (IntrData->Opc1 != 0) {
23874 SDValue Sae = Op.getOperand(5);
23875 if (isRoundModeSAE(Sae))
23876 Opc = IntrData->Opc1;
23877 else if (!isRoundModeCurDirection(Sae))
23881 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23882 Mask, PassThru, Subtarget, DAG);
23884 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
23885 SDValue Src1 = Op.getOperand(1);
23886 SDValue Src2 = Op.getOperand(2);
23887 SDValue Src3 = Op.getOperand(3);
23888 SDValue PassThru = Op.getOperand(4);
23889 SDValue Mask = Op.getOperand(5);
23890 SDValue Sae = Op.getOperand(6);
23892 if (isRoundModeCurDirection(Sae))
23893 Opc = IntrData->Opc0;
23894 else if (isRoundModeSAE(Sae))
23895 Opc = IntrData->Opc1;
23899 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23900 Mask, PassThru, Subtarget, DAG);
23902 case INTR_TYPE_3OP_MASK_SAE: {
23903 SDValue Src1 = Op.getOperand(1);
23904 SDValue Src2 = Op.getOperand(2);
23905 SDValue Src3 = Op.getOperand(3);
23906 SDValue PassThru = Op.getOperand(4);
23907 SDValue Mask = Op.getOperand(5);
23909 unsigned Opc = IntrData->Opc0;
23910 if (IntrData->Opc1 != 0) {
23911 SDValue Sae = Op.getOperand(6);
23912 if (isRoundModeSAE(Sae))
23913 Opc = IntrData->Opc1;
23914 else if (!isRoundModeCurDirection(Sae))
23917 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23918 Mask, PassThru, Subtarget, DAG);
23921 SDValue Src1 = Op.getOperand(1);
23922 SDValue Src2 = Op.getOperand(2);
23923 SDValue Src3 = Op.getOperand(3);
23925 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
23926 Src3 = DAG.getBitcast(MaskVT, Src3);
23928 // Reverse the operands to match VSELECT order.
23929 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
23932 SDValue Src1 = Op.getOperand(1);
23933 SDValue Src2 = Op.getOperand(2);
23935 // Swap Src1 and Src2 in the node creation
23936 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
23939 // NOTE: We need to swizzle the operands to pass the multiply operands
23941 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23942 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
23944 SDValue Src1 = Op.getOperand(1);
23945 SDValue Imm = Op.getOperand(2);
23946 SDValue Mask = Op.getOperand(3);
23947 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
23948 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
23950 // Need to fill with zeros to ensure the bitcast will produce zeroes
23951 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23952 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23953 DAG.getConstant(0, dl, MVT::v8i1),
23954 FPclassMask, DAG.getIntPtrConstant(0, dl));
23955 return DAG.getBitcast(MVT::i8, Ins);
23958 case CMP_MASK_CC: {
23959 MVT MaskVT = Op.getSimpleValueType();
23960 SDValue CC = Op.getOperand(3);
23961 // We specify 2 possible opcodes for intrinsics with rounding modes.
23962 // First, we check if the intrinsic may have non-default rounding mode,
23963 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23964 if (IntrData->Opc1 != 0) {
23965 SDValue Sae = Op.getOperand(4);
23966 if (isRoundModeSAE(Sae))
23967 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
23968 Op.getOperand(2), CC, Sae);
23969 if (!isRoundModeCurDirection(Sae))
23972 //default rounding mode
23973 return DAG.getNode(IntrData->Opc0, dl, MaskVT,
23974 {Op.getOperand(1), Op.getOperand(2), CC});
23976 case CMP_MASK_SCALAR_CC: {
23977 SDValue Src1 = Op.getOperand(1);
23978 SDValue Src2 = Op.getOperand(2);
23979 SDValue CC = Op.getOperand(3);
23980 SDValue Mask = Op.getOperand(4);
23983 if (IntrData->Opc1 != 0) {
23984 SDValue Sae = Op.getOperand(5);
23985 if (isRoundModeSAE(Sae))
23986 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
23987 else if (!isRoundModeCurDirection(Sae))
23990 //default rounding mode
23991 if (!Cmp.getNode())
23992 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
23994 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
23996 // Need to fill with zeros to ensure the bitcast will produce zeroes
23997 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23998 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23999 DAG.getConstant(0, dl, MVT::v8i1),
24000 CmpMask, DAG.getIntPtrConstant(0, dl));
24001 return DAG.getBitcast(MVT::i8, Ins);
24003 case COMI: { // Comparison intrinsics
24004 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
24005 SDValue LHS = Op.getOperand(1);
24006 SDValue RHS = Op.getOperand(2);
24007 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
24008 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
24011 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
24012 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
24013 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
24014 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
24017 case ISD::SETNE: { // (ZF = 1 or PF = 1)
24018 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
24019 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
24020 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
24023 case ISD::SETGT: // (CF = 0 and ZF = 0)
24024 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
24026 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
24027 SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
24030 case ISD::SETGE: // CF = 0
24031 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
24033 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
24034 SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
24037 llvm_unreachable("Unexpected illegal condition!");
24039 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24041 case COMI_RM: { // Comparison intrinsics with Sae
24042 SDValue LHS = Op.getOperand(1);
24043 SDValue RHS = Op.getOperand(2);
24044 unsigned CondVal = Op.getConstantOperandVal(3);
24045 SDValue Sae = Op.getOperand(4);
24048 if (isRoundModeCurDirection(Sae))
24049 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
24050 DAG.getTargetConstant(CondVal, dl, MVT::i8));
24051 else if (isRoundModeSAE(Sae))
24052 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
24053 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
24056 // Need to fill with zeros to ensure the bitcast will produce zeroes
24057 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24058 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24059 DAG.getConstant(0, dl, MVT::v16i1),
24060 FCmp, DAG.getIntPtrConstant(0, dl));
24061 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
24062 DAG.getBitcast(MVT::i16, Ins));
24065 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
24066 Op.getOperand(1), Op.getOperand(2), Subtarget,
24068 case COMPRESS_EXPAND_IN_REG: {
24069 SDValue Mask = Op.getOperand(3);
24070 SDValue DataToCompress = Op.getOperand(1);
24071 SDValue PassThru = Op.getOperand(2);
24072 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
24073 return Op.getOperand(1);
24075 // Avoid false dependency.
24076 if (PassThru.isUndef())
24077 PassThru = DAG.getConstant(0, dl, VT);
24079 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
24083 case FIXUPIMM_MASKZ: {
24084 SDValue Src1 = Op.getOperand(1);
24085 SDValue Src2 = Op.getOperand(2);
24086 SDValue Src3 = Op.getOperand(3);
24087 SDValue Imm = Op.getOperand(4);
24088 SDValue Mask = Op.getOperand(5);
24089 SDValue Passthru = (IntrData->Type == FIXUPIMM)
24091 : getZeroVector(VT, Subtarget, DAG, dl);
24093 unsigned Opc = IntrData->Opc0;
24094 if (IntrData->Opc1 != 0) {
24095 SDValue Sae = Op.getOperand(6);
24096 if (isRoundModeSAE(Sae))
24097 Opc = IntrData->Opc1;
24098 else if (!isRoundModeCurDirection(Sae))
24102 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
24104 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
24105 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24107 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24110 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
24111 // Clear the upper bits of the rounding immediate so that the legacy
24112 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24113 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
24114 SDValue RoundingMode =
24115 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24116 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24117 Op.getOperand(1), RoundingMode);
24120 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
24121 // Clear the upper bits of the rounding immediate so that the legacy
24122 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24123 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
24124 SDValue RoundingMode =
24125 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24126 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24127 Op.getOperand(1), Op.getOperand(2), RoundingMode);
24130 assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");
24132 // The control is a TargetConstant, but we need to convert it to a
24134 uint64_t Imm = Op.getConstantOperandVal(2);
24135 SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
24136 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24137 Op.getOperand(1), Control);
24141 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
24142 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
24145 // If the carry in is zero, then we should just use ADD/SUB instead of
24147 if (isNullConstant(Op.getOperand(1))) {
24148 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
24151 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
24152 DAG.getConstant(-1, dl, MVT::i8));
24153 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
24154 Op.getOperand(3), GenCF.getValue(1));
24156 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
24157 SDValue Results[] = { SetCC, Res };
24158 return DAG.getMergeValues(Results, dl);
24160 case CVTPD2PS_MASK:
24161 case CVTPD2DQ_MASK:
24162 case CVTQQ2PS_MASK:
24163 case TRUNCATE_TO_REG: {
24164 SDValue Src = Op.getOperand(1);
24165 SDValue PassThru = Op.getOperand(2);
24166 SDValue Mask = Op.getOperand(3);
24168 if (isAllOnesConstant(Mask))
24169 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24171 MVT SrcVT = Src.getSimpleValueType();
24172 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24173 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24174 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
24175 {Src, PassThru, Mask});
24177 case CVTPS2PH_MASK: {
24178 SDValue Src = Op.getOperand(1);
24179 SDValue Rnd = Op.getOperand(2);
24180 SDValue PassThru = Op.getOperand(3);
24181 SDValue Mask = Op.getOperand(4);
24183 if (isAllOnesConstant(Mask))
24184 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
24186 MVT SrcVT = Src.getSimpleValueType();
24187 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24188 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24189 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
24193 case CVTNEPS2BF16_MASK: {
24194 SDValue Src = Op.getOperand(1);
24195 SDValue PassThru = Op.getOperand(2);
24196 SDValue Mask = Op.getOperand(3);
24198 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24199 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24201 // Break false dependency.
24202 if (PassThru.isUndef())
24203 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
24205 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
24214 default: return SDValue(); // Don't custom lower most intrinsics.
24216 // ptest and testp intrinsics. The intrinsic these come from are designed to
24217 // return an integer value, not just an instruction so lower it to the ptest
24218 // or testp pattern and a setcc for the result.
24219 case Intrinsic::x86_avx512_ktestc_b:
24220 case Intrinsic::x86_avx512_ktestc_w:
24221 case Intrinsic::x86_avx512_ktestc_d:
24222 case Intrinsic::x86_avx512_ktestc_q:
24223 case Intrinsic::x86_avx512_ktestz_b:
24224 case Intrinsic::x86_avx512_ktestz_w:
24225 case Intrinsic::x86_avx512_ktestz_d:
24226 case Intrinsic::x86_avx512_ktestz_q:
24227 case Intrinsic::x86_sse41_ptestz:
24228 case Intrinsic::x86_sse41_ptestc:
24229 case Intrinsic::x86_sse41_ptestnzc:
24230 case Intrinsic::x86_avx_ptestz_256:
24231 case Intrinsic::x86_avx_ptestc_256:
24232 case Intrinsic::x86_avx_ptestnzc_256:
24233 case Intrinsic::x86_avx_vtestz_ps:
24234 case Intrinsic::x86_avx_vtestc_ps:
24235 case Intrinsic::x86_avx_vtestnzc_ps:
24236 case Intrinsic::x86_avx_vtestz_pd:
24237 case Intrinsic::x86_avx_vtestc_pd:
24238 case Intrinsic::x86_avx_vtestnzc_pd:
24239 case Intrinsic::x86_avx_vtestz_ps_256:
24240 case Intrinsic::x86_avx_vtestc_ps_256:
24241 case Intrinsic::x86_avx_vtestnzc_ps_256:
24242 case Intrinsic::x86_avx_vtestz_pd_256:
24243 case Intrinsic::x86_avx_vtestc_pd_256:
24244 case Intrinsic::x86_avx_vtestnzc_pd_256: {
24245 unsigned TestOpc = X86ISD::PTEST;
24246 X86::CondCode X86CC;
24248 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
24249 case Intrinsic::x86_avx512_ktestc_b:
24250 case Intrinsic::x86_avx512_ktestc_w:
24251 case Intrinsic::x86_avx512_ktestc_d:
24252 case Intrinsic::x86_avx512_ktestc_q:
24254 TestOpc = X86ISD::KTEST;
24255 X86CC = X86::COND_B;
24257 case Intrinsic::x86_avx512_ktestz_b:
24258 case Intrinsic::x86_avx512_ktestz_w:
24259 case Intrinsic::x86_avx512_ktestz_d:
24260 case Intrinsic::x86_avx512_ktestz_q:
24261 TestOpc = X86ISD::KTEST;
24262 X86CC = X86::COND_E;
24264 case Intrinsic::x86_avx_vtestz_ps:
24265 case Intrinsic::x86_avx_vtestz_pd:
24266 case Intrinsic::x86_avx_vtestz_ps_256:
24267 case Intrinsic::x86_avx_vtestz_pd_256:
24268 TestOpc = X86ISD::TESTP;
24270 case Intrinsic::x86_sse41_ptestz:
24271 case Intrinsic::x86_avx_ptestz_256:
24273 X86CC = X86::COND_E;
24275 case Intrinsic::x86_avx_vtestc_ps:
24276 case Intrinsic::x86_avx_vtestc_pd:
24277 case Intrinsic::x86_avx_vtestc_ps_256:
24278 case Intrinsic::x86_avx_vtestc_pd_256:
24279 TestOpc = X86ISD::TESTP;
24281 case Intrinsic::x86_sse41_ptestc:
24282 case Intrinsic::x86_avx_ptestc_256:
24284 X86CC = X86::COND_B;
24286 case Intrinsic::x86_avx_vtestnzc_ps:
24287 case Intrinsic::x86_avx_vtestnzc_pd:
24288 case Intrinsic::x86_avx_vtestnzc_ps_256:
24289 case Intrinsic::x86_avx_vtestnzc_pd_256:
24290 TestOpc = X86ISD::TESTP;
24292 case Intrinsic::x86_sse41_ptestnzc:
24293 case Intrinsic::x86_avx_ptestnzc_256:
24295 X86CC = X86::COND_A;
24299 SDValue LHS = Op.getOperand(1);
24300 SDValue RHS = Op.getOperand(2);
24301 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
24302 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
24303 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24306 case Intrinsic::x86_sse42_pcmpistria128:
24307 case Intrinsic::x86_sse42_pcmpestria128:
24308 case Intrinsic::x86_sse42_pcmpistric128:
24309 case Intrinsic::x86_sse42_pcmpestric128:
24310 case Intrinsic::x86_sse42_pcmpistrio128:
24311 case Intrinsic::x86_sse42_pcmpestrio128:
24312 case Intrinsic::x86_sse42_pcmpistris128:
24313 case Intrinsic::x86_sse42_pcmpestris128:
24314 case Intrinsic::x86_sse42_pcmpistriz128:
24315 case Intrinsic::x86_sse42_pcmpestriz128: {
24317 X86::CondCode X86CC;
24319 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
24320 case Intrinsic::x86_sse42_pcmpistria128:
24321 Opcode = X86ISD::PCMPISTR;
24322 X86CC = X86::COND_A;
24324 case Intrinsic::x86_sse42_pcmpestria128:
24325 Opcode = X86ISD::PCMPESTR;
24326 X86CC = X86::COND_A;
24328 case Intrinsic::x86_sse42_pcmpistric128:
24329 Opcode = X86ISD::PCMPISTR;
24330 X86CC = X86::COND_B;
24332 case Intrinsic::x86_sse42_pcmpestric128:
24333 Opcode = X86ISD::PCMPESTR;
24334 X86CC = X86::COND_B;
24336 case Intrinsic::x86_sse42_pcmpistrio128:
24337 Opcode = X86ISD::PCMPISTR;
24338 X86CC = X86::COND_O;
24340 case Intrinsic::x86_sse42_pcmpestrio128:
24341 Opcode = X86ISD::PCMPESTR;
24342 X86CC = X86::COND_O;
24344 case Intrinsic::x86_sse42_pcmpistris128:
24345 Opcode = X86ISD::PCMPISTR;
24346 X86CC = X86::COND_S;
24348 case Intrinsic::x86_sse42_pcmpestris128:
24349 Opcode = X86ISD::PCMPESTR;
24350 X86CC = X86::COND_S;
24352 case Intrinsic::x86_sse42_pcmpistriz128:
24353 Opcode = X86ISD::PCMPISTR;
24354 X86CC = X86::COND_E;
24356 case Intrinsic::x86_sse42_pcmpestriz128:
24357 Opcode = X86ISD::PCMPESTR;
24358 X86CC = X86::COND_E;
24361 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24362 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24363 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
24364 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
24365 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24368 case Intrinsic::x86_sse42_pcmpistri128:
24369 case Intrinsic::x86_sse42_pcmpestri128: {
24371 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
24372 Opcode = X86ISD::PCMPISTR;
24374 Opcode = X86ISD::PCMPESTR;
24376 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24377 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24378 return DAG.getNode(Opcode, dl, VTs, NewOps);
24381 case Intrinsic::x86_sse42_pcmpistrm128:
24382 case Intrinsic::x86_sse42_pcmpestrm128: {
24384 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
24385 Opcode = X86ISD::PCMPISTR;
24387 Opcode = X86ISD::PCMPESTR;
24389 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24390 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24391 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
24394 case Intrinsic::eh_sjlj_lsda: {
24395 MachineFunction &MF = DAG.getMachineFunction();
24396 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24397 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
24398 auto &Context = MF.getMMI().getContext();
24399 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
24400 Twine(MF.getFunctionNumber()));
24401 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
24402 DAG.getMCSymbol(S, PtrVT));
24405 case Intrinsic::x86_seh_lsda: {
24406 // Compute the symbol for the LSDA. We know it'll get emitted later.
24407 MachineFunction &MF = DAG.getMachineFunction();
24408 SDValue Op1 = Op.getOperand(1);
24409 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
24410 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
24411 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
24413 // Generate a simple absolute symbol reference. This intrinsic is only
24414 // supported on 32-bit Windows, which isn't PIC.
24415 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
24416 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
24419 case Intrinsic::eh_recoverfp: {
24420 SDValue FnOp = Op.getOperand(1);
24421 SDValue IncomingFPOp = Op.getOperand(2);
24422 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
24423 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
24425 report_fatal_error(
24426 "llvm.eh.recoverfp must take a function as the first argument");
24427 return recoverFramePointer(DAG, Fn, IncomingFPOp);
24430 case Intrinsic::localaddress: {
24431 // Returns one of the stack, base, or frame pointer registers, depending on
24432 // which is used to reference local variables.
24433 MachineFunction &MF = DAG.getMachineFunction();
24434 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24436 if (RegInfo->hasBasePointer(MF))
24437 Reg = RegInfo->getBaseRegister();
24438 else { // Handles the SP or FP case.
24439 bool CantUseFP = RegInfo->needsStackRealignment(MF);
24441 Reg = RegInfo->getPtrSizedStackRegister(MF);
24443 Reg = RegInfo->getPtrSizedFrameRegister(MF);
24445 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
24448 case Intrinsic::x86_avx512_vp2intersect_q_512:
24449 case Intrinsic::x86_avx512_vp2intersect_q_256:
24450 case Intrinsic::x86_avx512_vp2intersect_q_128:
24451 case Intrinsic::x86_avx512_vp2intersect_d_512:
24452 case Intrinsic::x86_avx512_vp2intersect_d_256:
24453 case Intrinsic::x86_avx512_vp2intersect_d_128: {
24454 MVT MaskVT = Op.getSimpleValueType();
24456 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
24459 SDValue Operation =
24460 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
24461 Op->getOperand(1), Op->getOperand(2));
24463 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
24464 MaskVT, Operation);
24465 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
24466 MaskVT, Operation);
24467 return DAG.getMergeValues({Result0, Result1}, DL);
24469 case Intrinsic::x86_mmx_pslli_w:
24470 case Intrinsic::x86_mmx_pslli_d:
24471 case Intrinsic::x86_mmx_pslli_q:
24472 case Intrinsic::x86_mmx_psrli_w:
24473 case Intrinsic::x86_mmx_psrli_d:
24474 case Intrinsic::x86_mmx_psrli_q:
24475 case Intrinsic::x86_mmx_psrai_w:
24476 case Intrinsic::x86_mmx_psrai_d: {
24478 SDValue ShAmt = Op.getOperand(2);
24479 // If the argument is a constant, convert it to a target constant.
24480 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
24481 // Clamp out of bounds shift amounts since they will otherwise be masked
24482 // to 8-bits which may make it no longer out of bounds.
24483 unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
24484 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
24485 Op.getOperand(0), Op.getOperand(1),
24486 DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
24489 unsigned NewIntrinsic;
24491 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
24492 case Intrinsic::x86_mmx_pslli_w:
24493 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
24495 case Intrinsic::x86_mmx_pslli_d:
24496 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
24498 case Intrinsic::x86_mmx_pslli_q:
24499 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
24501 case Intrinsic::x86_mmx_psrli_w:
24502 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
24504 case Intrinsic::x86_mmx_psrli_d:
24505 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
24507 case Intrinsic::x86_mmx_psrli_q:
24508 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
24510 case Intrinsic::x86_mmx_psrai_w:
24511 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
24513 case Intrinsic::x86_mmx_psrai_d:
24514 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
24518 // The vector shift intrinsics with scalars uses 32b shift amounts but
24519 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
24521 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
24522 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
24523 DAG.getConstant(NewIntrinsic, DL, MVT::i32),
24524 Op.getOperand(1), ShAmt);
24530 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24531 SDValue Src, SDValue Mask, SDValue Base,
24532 SDValue Index, SDValue ScaleOp, SDValue Chain,
24533 const X86Subtarget &Subtarget) {
24535 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24536 // Scale must be constant.
24539 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24540 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24541 TLI.getPointerTy(DAG.getDataLayout()));
24542 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
24543 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
24544 // If source is undef or we know it won't be used, use a zero vector
24545 // to break register dependency.
24546 // TODO: use undef instead and let BreakFalseDeps deal with it?
24547 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
24548 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
24550 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24552 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
24553 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
24554 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24555 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
24558 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
24559 SDValue Src, SDValue Mask, SDValue Base,
24560 SDValue Index, SDValue ScaleOp, SDValue Chain,
24561 const X86Subtarget &Subtarget) {
24562 MVT VT = Op.getSimpleValueType();
24564 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24565 // Scale must be constant.
24568 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24569 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24570 TLI.getPointerTy(DAG.getDataLayout()));
24571 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
24572 VT.getVectorNumElements());
24573 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
24575 // We support two versions of the gather intrinsics. One with scalar mask and
24576 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
24577 if (Mask.getValueType() != MaskVT)
24578 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24580 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
24581 // If source is undef or we know it won't be used, use a zero vector
24582 // to break register dependency.
24583 // TODO: use undef instead and let BreakFalseDeps deal with it?
24584 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
24585 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
24587 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24589 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
24590 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
24591 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24592 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
24595 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24596 SDValue Src, SDValue Mask, SDValue Base,
24597 SDValue Index, SDValue ScaleOp, SDValue Chain,
24598 const X86Subtarget &Subtarget) {
24600 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24601 // Scale must be constant.
24604 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24605 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24606 TLI.getPointerTy(DAG.getDataLayout()));
24607 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
24608 Src.getSimpleValueType().getVectorNumElements());
24609 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
24611 // We support two versions of the scatter intrinsics. One with scalar mask and
24612 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
24613 if (Mask.getValueType() != MaskVT)
24614 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24616 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24618 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
24619 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
24620 SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
24621 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24622 return Res.getValue(1);
24625 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24626 SDValue Mask, SDValue Base, SDValue Index,
24627 SDValue ScaleOp, SDValue Chain,
24628 const X86Subtarget &Subtarget) {
24630 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24631 // Scale must be constant.
24634 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24635 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24636 TLI.getPointerTy(DAG.getDataLayout()));
24637 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
24638 SDValue Segment = DAG.getRegister(0, MVT::i32);
24640 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
24641 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24642 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
24643 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
24644 return SDValue(Res, 0);
24647 /// Handles the lowering of builtin intrinsics with chain that return their
24648 /// value into registers EDX:EAX.
24649 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
24650 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
24652 /// Returns a Glue value which can be used to add extra copy-from-reg if the
24653 /// expanded intrinsics implicitly defines extra registers (i.e. not just
24655 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
24657 unsigned TargetOpcode,
24659 const X86Subtarget &Subtarget,
24660 SmallVectorImpl<SDValue> &Results) {
24661 SDValue Chain = N->getOperand(0);
24665 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
24666 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
24667 Glue = Chain.getValue(1);
24670 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
24671 SDValue N1Ops[] = {Chain, Glue};
24672 SDNode *N1 = DAG.getMachineNode(
24673 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
24674 Chain = SDValue(N1, 0);
24676 // Reads the content of XCR and returns it in registers EDX:EAX.
24678 if (Subtarget.is64Bit()) {
24679 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
24680 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
24683 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
24684 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
24687 Chain = HI.getValue(1);
24688 Glue = HI.getValue(2);
24690 if (Subtarget.is64Bit()) {
24691 // Merge the two 32-bit values into a 64-bit one.
24692 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
24693 DAG.getConstant(32, DL, MVT::i8));
24694 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
24695 Results.push_back(Chain);
24699 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
24700 SDValue Ops[] = { LO, HI };
24701 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
24702 Results.push_back(Pair);
24703 Results.push_back(Chain);
24707 /// Handles the lowering of builtin intrinsics that read the time stamp counter
24708 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
24709 /// READCYCLECOUNTER nodes.
24710 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
24712 const X86Subtarget &Subtarget,
24713 SmallVectorImpl<SDValue> &Results) {
24714 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
24715 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
24716 // and the EAX register is loaded with the low-order 32 bits.
24717 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
24718 /* NoRegister */0, Subtarget,
24720 if (Opcode != X86::RDTSCP)
24723 SDValue Chain = Results[1];
24724 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
24725 // the ECX register. Add 'ecx' explicitly to the chain.
24726 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
24728 Results.push_back(ecx.getValue(1));
24731 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
24732 SelectionDAG &DAG) {
24733 SmallVector<SDValue, 3> Results;
24735 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
24737 return DAG.getMergeValues(Results, DL);
24740 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
24741 MachineFunction &MF = DAG.getMachineFunction();
24742 SDValue Chain = Op.getOperand(0);
24743 SDValue RegNode = Op.getOperand(2);
24744 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
24746 report_fatal_error("EH registrations only live in functions using WinEH");
24748 // Cast the operand to an alloca, and remember the frame index.
24749 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
24751 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
24752 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
24754 // Return the chain operand without making any DAG nodes.
24758 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
24759 MachineFunction &MF = DAG.getMachineFunction();
24760 SDValue Chain = Op.getOperand(0);
24761 SDValue EHGuard = Op.getOperand(2);
24762 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
24764 report_fatal_error("EHGuard only live in functions using WinEH");
24766 // Cast the operand to an alloca, and remember the frame index.
24767 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
24769 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
24770 EHInfo->EHGuardFrameIndex = FINode->getIndex();
24772 // Return the chain operand without making any DAG nodes.
24776 /// Emit Truncating Store with signed or unsigned saturation.
24778 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
24779 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
24780 SelectionDAG &DAG) {
24782 SDVTList VTs = DAG.getVTList(MVT::Other);
24783 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
24784 SDValue Ops[] = { Chain, Val, Ptr, Undef };
24786 DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
24787 DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
24790 /// Emit Masked Truncating Store with signed or unsigned saturation.
24792 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
24793 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
24794 MachineMemOperand *MMO, SelectionDAG &DAG) {
24796 SDVTList VTs = DAG.getVTList(MVT::Other);
24797 SDValue Ops[] = { Chain, Val, Ptr, Mask };
24799 DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
24800 DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
24803 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
24804 SelectionDAG &DAG) {
24805 unsigned IntNo = Op.getConstantOperandVal(1);
24806 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
24809 case llvm::Intrinsic::x86_seh_ehregnode:
24810 return MarkEHRegistrationNode(Op, DAG);
24811 case llvm::Intrinsic::x86_seh_ehguard:
24812 return MarkEHGuard(Op, DAG);
24813 case llvm::Intrinsic::x86_rdpkru: {
24815 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24816 // Create a RDPKRU node and pass 0 to the ECX parameter.
24817 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
24818 DAG.getConstant(0, dl, MVT::i32));
24820 case llvm::Intrinsic::x86_wrpkru: {
24822 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
24823 // to the EDX and ECX parameters.
24824 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
24825 Op.getOperand(0), Op.getOperand(2),
24826 DAG.getConstant(0, dl, MVT::i32),
24827 DAG.getConstant(0, dl, MVT::i32));
24829 case llvm::Intrinsic::x86_flags_read_u32:
24830 case llvm::Intrinsic::x86_flags_read_u64:
24831 case llvm::Intrinsic::x86_flags_write_u32:
24832 case llvm::Intrinsic::x86_flags_write_u64: {
24833 // We need a frame pointer because this will get lowered to a PUSH/POP
24835 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
24836 MFI.setHasCopyImplyingStackAdjustment(true);
24837 // Don't do anything here, we will expand these intrinsics out later
24838 // during FinalizeISel in EmitInstrWithCustomInserter.
24841 case Intrinsic::x86_lwpins32:
24842 case Intrinsic::x86_lwpins64:
24843 case Intrinsic::x86_umwait:
24844 case Intrinsic::x86_tpause: {
24846 SDValue Chain = Op->getOperand(0);
24847 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24851 default: llvm_unreachable("Impossible intrinsic");
24852 case Intrinsic::x86_umwait:
24853 Opcode = X86ISD::UMWAIT;
24855 case Intrinsic::x86_tpause:
24856 Opcode = X86ISD::TPAUSE;
24858 case Intrinsic::x86_lwpins32:
24859 case Intrinsic::x86_lwpins64:
24860 Opcode = X86ISD::LWPINS;
24864 SDValue Operation =
24865 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
24866 Op->getOperand(3), Op->getOperand(4));
24867 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
24868 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24869 Operation.getValue(1));
24871 case Intrinsic::x86_enqcmd:
24872 case Intrinsic::x86_enqcmds: {
24874 SDValue Chain = Op.getOperand(0);
24875 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24878 default: llvm_unreachable("Impossible intrinsic!");
24879 case Intrinsic::x86_enqcmd:
24880 Opcode = X86ISD::ENQCMD;
24882 case Intrinsic::x86_enqcmds:
24883 Opcode = X86ISD::ENQCMDS;
24886 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
24888 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
24889 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24890 Operation.getValue(1));
24897 switch(IntrData->Type) {
24898 default: llvm_unreachable("Unknown Intrinsic Type");
24901 // Emit the node with the right value type.
24902 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
24903 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24905 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
24906 // Otherwise return the value from Rand, which is always 0, casted to i32.
24907 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
24908 DAG.getConstant(1, dl, Op->getValueType(1)),
24909 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
24910 SDValue(Result.getNode(), 1)};
24911 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
24913 // Return { result, isValid, chain }.
24914 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
24915 SDValue(Result.getNode(), 2));
24917 case GATHER_AVX2: {
24918 SDValue Chain = Op.getOperand(0);
24919 SDValue Src = Op.getOperand(2);
24920 SDValue Base = Op.getOperand(3);
24921 SDValue Index = Op.getOperand(4);
24922 SDValue Mask = Op.getOperand(5);
24923 SDValue Scale = Op.getOperand(6);
24924 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24925 Scale, Chain, Subtarget);
24928 //gather(v1, mask, index, base, scale);
24929 SDValue Chain = Op.getOperand(0);
24930 SDValue Src = Op.getOperand(2);
24931 SDValue Base = Op.getOperand(3);
24932 SDValue Index = Op.getOperand(4);
24933 SDValue Mask = Op.getOperand(5);
24934 SDValue Scale = Op.getOperand(6);
24935 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
24939 //scatter(base, mask, index, v1, scale);
24940 SDValue Chain = Op.getOperand(0);
24941 SDValue Base = Op.getOperand(2);
24942 SDValue Mask = Op.getOperand(3);
24943 SDValue Index = Op.getOperand(4);
24944 SDValue Src = Op.getOperand(5);
24945 SDValue Scale = Op.getOperand(6);
24946 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24947 Scale, Chain, Subtarget);
24950 const APInt &HintVal = Op.getConstantOperandAPInt(6);
24951 assert((HintVal == 2 || HintVal == 3) &&
24952 "Wrong prefetch hint in intrinsic: should be 2 or 3");
24953 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
24954 SDValue Chain = Op.getOperand(0);
24955 SDValue Mask = Op.getOperand(2);
24956 SDValue Index = Op.getOperand(3);
24957 SDValue Base = Op.getOperand(4);
24958 SDValue Scale = Op.getOperand(5);
24959 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
24962 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
24964 SmallVector<SDValue, 2> Results;
24965 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
24967 return DAG.getMergeValues(Results, dl);
24969 // Read Performance Monitoring Counters.
24971 // GetExtended Control Register.
24973 SmallVector<SDValue, 2> Results;
24975 // RDPMC uses ECX to select the index of the performance counter to read.
24976 // XGETBV uses ECX to select the index of the XCR register to return.
24977 // The result is stored into registers EDX:EAX.
24978 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
24979 Subtarget, Results);
24980 return DAG.getMergeValues(Results, dl);
24982 // XTEST intrinsics.
24984 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
24985 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24987 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
24988 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
24989 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
24990 Ret, SDValue(InTrans.getNode(), 1));
24992 case TRUNCATE_TO_MEM_VI8:
24993 case TRUNCATE_TO_MEM_VI16:
24994 case TRUNCATE_TO_MEM_VI32: {
24995 SDValue Mask = Op.getOperand(4);
24996 SDValue DataToTruncate = Op.getOperand(3);
24997 SDValue Addr = Op.getOperand(2);
24998 SDValue Chain = Op.getOperand(0);
25000 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
25001 assert(MemIntr && "Expected MemIntrinsicSDNode!");
25003 EVT MemVT = MemIntr->getMemoryVT();
25005 uint16_t TruncationOp = IntrData->Opc0;
25006 switch (TruncationOp) {
25007 case X86ISD::VTRUNC: {
25008 if (isAllOnesConstant(Mask)) // return just a truncate store
25009 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
25010 MemIntr->getMemOperand());
25012 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25013 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25014 SDValue Offset = DAG.getUNDEF(VMask.getValueType());
25016 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
25017 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
25018 true /* truncating */);
25020 case X86ISD::VTRUNCUS:
25021 case X86ISD::VTRUNCS: {
25022 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
25023 if (isAllOnesConstant(Mask))
25024 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
25025 MemIntr->getMemOperand(), DAG);
25027 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25028 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25030 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
25031 VMask, MemVT, MemIntr->getMemOperand(), DAG);
25034 llvm_unreachable("Unsupported truncstore intrinsic");
25040 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
25041 SelectionDAG &DAG) const {
25042 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25043 MFI.setReturnAddressIsTaken(true);
25045 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
25048 unsigned Depth = Op.getConstantOperandVal(0);
25050 EVT PtrVT = getPointerTy(DAG.getDataLayout());
25053 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
25054 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25055 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
25056 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
25057 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
25058 MachinePointerInfo());
25061 // Just load the return address.
25062 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
25063 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
25064 MachinePointerInfo());
25067 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
25068 SelectionDAG &DAG) const {
25069 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
25070 return getReturnAddressFrameIndex(DAG);
25073 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
25074 MachineFunction &MF = DAG.getMachineFunction();
25075 MachineFrameInfo &MFI = MF.getFrameInfo();
25076 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
25077 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25078 EVT VT = Op.getValueType();
25080 MFI.setFrameAddressIsTaken(true);
25082 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
25083 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
25084 // is not possible to crawl up the stack without looking at the unwind codes
25086 int FrameAddrIndex = FuncInfo->getFAIndex();
25087 if (!FrameAddrIndex) {
25088 // Set up a frame object for the return address.
25089 unsigned SlotSize = RegInfo->getSlotSize();
25090 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
25091 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
25092 FuncInfo->setFAIndex(FrameAddrIndex);
25094 return DAG.getFrameIndex(FrameAddrIndex, VT);
25097 unsigned FrameReg =
25098 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
25099 SDLoc dl(Op); // FIXME probably not meaningful
25100 unsigned Depth = Op.getConstantOperandVal(0);
25101 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
25102 (FrameReg == X86::EBP && VT == MVT::i32)) &&
25103 "Invalid Frame Register!");
25104 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
25106 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
25107 MachinePointerInfo());
25111 // FIXME? Maybe this could be a TableGen attribute on some registers and
25112 // this table could be generated automatically from RegInfo.
25113 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
25114 const MachineFunction &MF) const {
25115 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25117 Register Reg = StringSwitch<unsigned>(RegName)
25118 .Case("esp", X86::ESP)
25119 .Case("rsp", X86::RSP)
25120 .Case("ebp", X86::EBP)
25121 .Case("rbp", X86::RBP)
25124 if (Reg == X86::EBP || Reg == X86::RBP) {
25125 if (!TFI.hasFP(MF))
25126 report_fatal_error("register " + StringRef(RegName) +
25127 " is allocatable: function has no frame pointer");
25130 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25131 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
25132 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
25133 "Invalid Frame Register!");
25141 report_fatal_error("Invalid register name global variable");
25144 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
25145 SelectionDAG &DAG) const {
25146 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25147 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
25150 unsigned X86TargetLowering::getExceptionPointerRegister(
25151 const Constant *PersonalityFn) const {
25152 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
25153 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25155 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
25158 unsigned X86TargetLowering::getExceptionSelectorRegister(
25159 const Constant *PersonalityFn) const {
25160 // Funclet personalities don't use selectors (the runtime does the selection).
25161 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
25162 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25165 bool X86TargetLowering::needsFixedCatchObjects() const {
25166 return Subtarget.isTargetWin64();
25169 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
25170 SDValue Chain = Op.getOperand(0);
25171 SDValue Offset = Op.getOperand(1);
25172 SDValue Handler = Op.getOperand(2);
25175 EVT PtrVT = getPointerTy(DAG.getDataLayout());
25176 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25177 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
25178 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
25179 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
25180 "Invalid Frame Register!");
25181 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
25182 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
25184 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
25185 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
25187 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
25188 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
25189 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
25191 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
25192 DAG.getRegister(StoreAddrReg, PtrVT));
25195 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
25196 SelectionDAG &DAG) const {
25198 // If the subtarget is not 64bit, we may need the global base reg
25199 // after isel expand pseudo, i.e., after CGBR pass ran.
25200 // Therefore, ask for the GlobalBaseReg now, so that the pass
25201 // inserts the code for us in case we need it.
25202 // Otherwise, we will end up in a situation where we will
25203 // reference a virtual register that is not defined!
25204 if (!Subtarget.is64Bit()) {
25205 const X86InstrInfo *TII = Subtarget.getInstrInfo();
25206 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
25208 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
25209 DAG.getVTList(MVT::i32, MVT::Other),
25210 Op.getOperand(0), Op.getOperand(1));
25213 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
25214 SelectionDAG &DAG) const {
25216 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
25217 Op.getOperand(0), Op.getOperand(1));
25220 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
25221 SelectionDAG &DAG) const {
25223 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
25227 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
25228 return Op.getOperand(0);
25231 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
25232 SelectionDAG &DAG) const {
25233 SDValue Root = Op.getOperand(0);
25234 SDValue Trmp = Op.getOperand(1); // trampoline
25235 SDValue FPtr = Op.getOperand(2); // nested function
25236 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
25239 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
25240 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
25242 if (Subtarget.is64Bit()) {
25243 SDValue OutChains[6];
25245 // Large code-model.
25246 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
25247 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
25249 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
25250 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
25252 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
25254 // Load the pointer to the nested function into R11.
25255 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
25256 SDValue Addr = Trmp;
25257 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25258 Addr, MachinePointerInfo(TrmpAddr));
25260 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25261 DAG.getConstant(2, dl, MVT::i64));
25263 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
25264 /* Alignment = */ 2);
25266 // Load the 'nest' parameter value into R10.
25267 // R10 is specified in X86CallingConv.td
25268 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
25269 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25270 DAG.getConstant(10, dl, MVT::i64));
25271 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25272 Addr, MachinePointerInfo(TrmpAddr, 10));
25274 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25275 DAG.getConstant(12, dl, MVT::i64));
25277 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
25278 /* Alignment = */ 2);
25280 // Jump to the nested function.
25281 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
25282 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25283 DAG.getConstant(20, dl, MVT::i64));
25284 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25285 Addr, MachinePointerInfo(TrmpAddr, 20));
25287 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
25288 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25289 DAG.getConstant(22, dl, MVT::i64));
25290 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
25291 Addr, MachinePointerInfo(TrmpAddr, 22));
25293 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
25295 const Function *Func =
25296 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
25297 CallingConv::ID CC = Func->getCallingConv();
25302 llvm_unreachable("Unsupported calling convention");
25303 case CallingConv::C:
25304 case CallingConv::X86_StdCall: {
25305 // Pass 'nest' parameter in ECX.
25306 // Must be kept in sync with X86CallingConv.td
25307 NestReg = X86::ECX;
25309 // Check that ECX wasn't needed by an 'inreg' parameter.
25310 FunctionType *FTy = Func->getFunctionType();
25311 const AttributeList &Attrs = Func->getAttributes();
25313 if (!Attrs.isEmpty() && !Func->isVarArg()) {
25314 unsigned InRegCount = 0;
25317 for (FunctionType::param_iterator I = FTy->param_begin(),
25318 E = FTy->param_end(); I != E; ++I, ++Idx)
25319 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
25320 auto &DL = DAG.getDataLayout();
25321 // FIXME: should only count parameters that are lowered to integers.
25322 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
25325 if (InRegCount > 2) {
25326 report_fatal_error("Nest register in use - reduce number of inreg"
25332 case CallingConv::X86_FastCall:
25333 case CallingConv::X86_ThisCall:
25334 case CallingConv::Fast:
25335 case CallingConv::Tail:
25336 // Pass 'nest' parameter in EAX.
25337 // Must be kept in sync with X86CallingConv.td
25338 NestReg = X86::EAX;
25342 SDValue OutChains[4];
25343 SDValue Addr, Disp;
25345 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25346 DAG.getConstant(10, dl, MVT::i32));
25347 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
25349 // This is storing the opcode for MOV32ri.
25350 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
25351 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
25353 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
25354 Trmp, MachinePointerInfo(TrmpAddr));
25356 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25357 DAG.getConstant(1, dl, MVT::i32));
25359 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
25360 /* Alignment = */ 1);
25362 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
25363 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25364 DAG.getConstant(5, dl, MVT::i32));
25365 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
25366 Addr, MachinePointerInfo(TrmpAddr, 5),
25367 /* Alignment = */ 1);
25369 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25370 DAG.getConstant(6, dl, MVT::i32));
25372 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
25373 /* Alignment = */ 1);
25375 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
25379 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
25380 SelectionDAG &DAG) const {
25382 The rounding mode is in bits 11:10 of FPSR, and has the following
25384 00 Round to nearest
25389 FLT_ROUNDS, on the other hand, expects the following:
25396 To perform the conversion, we do:
25397 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
25400 MachineFunction &MF = DAG.getMachineFunction();
25401 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25402 const Align StackAlignment(TFI.getStackAlignment());
25403 MVT VT = Op.getSimpleValueType();
25406 // Save FP Control Word to stack slot
25408 MF.getFrameInfo().CreateStackObject(2, StackAlignment.value(), false);
25409 SDValue StackSlot =
25410 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
25412 MachineMemOperand *MMO =
25413 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
25414 MachineMemOperand::MOStore, 2, 2);
25416 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
25417 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
25418 DAG.getVTList(MVT::Other),
25419 Ops, MVT::i16, MMO);
25421 // Load FP Control Word from stack slot
25423 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
25425 // Transform as necessary
25427 DAG.getNode(ISD::SRL, DL, MVT::i16,
25428 DAG.getNode(ISD::AND, DL, MVT::i16,
25429 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
25430 DAG.getConstant(11, DL, MVT::i8));
25432 DAG.getNode(ISD::SRL, DL, MVT::i16,
25433 DAG.getNode(ISD::AND, DL, MVT::i16,
25434 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
25435 DAG.getConstant(9, DL, MVT::i8));
25438 DAG.getNode(ISD::AND, DL, MVT::i16,
25439 DAG.getNode(ISD::ADD, DL, MVT::i16,
25440 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
25441 DAG.getConstant(1, DL, MVT::i16)),
25442 DAG.getConstant(3, DL, MVT::i16));
25444 return DAG.getNode((VT.getSizeInBits() < 16 ?
25445 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
25448 // Split an unary integer op into 2 half sized ops.
25449 static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
25450 MVT VT = Op.getSimpleValueType();
25451 unsigned NumElems = VT.getVectorNumElements();
25452 unsigned SizeInBits = VT.getSizeInBits();
25453 MVT EltVT = VT.getVectorElementType();
25454 SDValue Src = Op.getOperand(0);
25455 assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
25456 "Src and Op should have the same element type!");
25458 // Extract the Lo/Hi vectors
25460 SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
25461 SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
25463 MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
25464 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25465 DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
25466 DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
25469 // Decompose 256-bit ops into smaller 128-bit ops.
25470 static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
25471 assert(Op.getSimpleValueType().is256BitVector() &&
25472 Op.getSimpleValueType().isInteger() &&
25473 "Only handle AVX 256-bit vector integer operation");
25474 return LowerVectorIntUnary(Op, DAG);
25477 // Decompose 512-bit ops into smaller 256-bit ops.
25478 static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
25479 assert(Op.getSimpleValueType().is512BitVector() &&
25480 Op.getSimpleValueType().isInteger() &&
25481 "Only handle AVX 512-bit vector integer operation");
25482 return LowerVectorIntUnary(Op, DAG);
25485 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
25487 // i8/i16 vector implemented using dword LZCNT vector instruction
25488 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
25489 // split the vector, perform operation on it's Lo a Hi part and
25490 // concatenate the results.
25491 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
25492 const X86Subtarget &Subtarget) {
25493 assert(Op.getOpcode() == ISD::CTLZ);
25495 MVT VT = Op.getSimpleValueType();
25496 MVT EltVT = VT.getVectorElementType();
25497 unsigned NumElems = VT.getVectorNumElements();
25499 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
25500 "Unsupported element type");
25502 // Split vector, it's Lo and Hi parts will be handled in next iteration.
25503 if (NumElems > 16 ||
25504 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
25505 return LowerVectorIntUnary(Op, DAG);
25507 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
25508 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
25509 "Unsupported value type for operation");
25511 // Use native supported vector instruction vplzcntd.
25512 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
25513 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
25514 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
25515 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
25517 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
25520 // Lower CTLZ using a PSHUFB lookup table implementation.
25521 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
25522 const X86Subtarget &Subtarget,
25523 SelectionDAG &DAG) {
25524 MVT VT = Op.getSimpleValueType();
25525 int NumElts = VT.getVectorNumElements();
25526 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
25527 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
25529 // Per-nibble leading zero PSHUFB lookup table.
25530 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
25531 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
25532 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
25533 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
25535 SmallVector<SDValue, 64> LUTVec;
25536 for (int i = 0; i < NumBytes; ++i)
25537 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
25538 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
25540 // Begin by bitcasting the input to byte vector, then split those bytes
25541 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
25542 // If the hi input nibble is zero then we add both results together, otherwise
25543 // we just take the hi result (by masking the lo result to zero before the
25545 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
25546 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
25548 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
25550 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
25552 if (CurrVT.is512BitVector()) {
25553 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
25554 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
25555 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
25557 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
25560 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
25561 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
25562 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
25563 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
25565 // Merge result back from vXi8 back to VT, working on the lo/hi halves
25566 // of the current vector width in the same way we did for the nibbles.
25567 // If the upper half of the input element is zero then add the halves'
25568 // leading zero counts together, otherwise just use the upper half's.
25569 // Double the width of the result until we are at target width.
25570 while (CurrVT != VT) {
25571 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
25572 int CurrNumElts = CurrVT.getVectorNumElements();
25573 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
25574 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
25575 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
25577 // Check if the upper half of the input element is zero.
25578 if (CurrVT.is512BitVector()) {
25579 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
25580 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
25581 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
25582 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
25584 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
25585 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
25587 HiZ = DAG.getBitcast(NextVT, HiZ);
25589 // Move the upper/lower halves to the lower bits as we'll be extending to
25590 // NextVT. Mask the lower result to zero if HiZ is true and add the results
25592 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
25593 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
25594 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
25595 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
25596 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
25603 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
25604 const X86Subtarget &Subtarget,
25605 SelectionDAG &DAG) {
25606 MVT VT = Op.getSimpleValueType();
25608 if (Subtarget.hasCDI() &&
25609 // vXi8 vectors need to be promoted to 512-bits for vXi32.
25610 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
25611 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
25613 // Decompose 256-bit ops into smaller 128-bit ops.
25614 if (VT.is256BitVector() && !Subtarget.hasInt256())
25615 return Lower256IntUnary(Op, DAG);
25617 // Decompose 512-bit ops into smaller 256-bit ops.
25618 if (VT.is512BitVector() && !Subtarget.hasBWI())
25619 return Lower512IntUnary(Op, DAG);
25621 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
25622 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
25625 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
25626 SelectionDAG &DAG) {
25627 MVT VT = Op.getSimpleValueType();
25629 unsigned NumBits = VT.getSizeInBits();
25631 unsigned Opc = Op.getOpcode();
25634 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
25636 Op = Op.getOperand(0);
25637 if (VT == MVT::i8) {
25638 // Zero extend to i32 since there is not an i8 bsr.
25640 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
25643 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
25644 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
25645 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
25647 if (Opc == ISD::CTLZ) {
25648 // If src is zero (i.e. bsr sets ZF), returns NumBits.
25649 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
25650 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
25652 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
25655 // Finally xor with NumBits-1.
25656 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
25657 DAG.getConstant(NumBits - 1, dl, OpVT));
25660 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
25664 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
25665 SelectionDAG &DAG) {
25666 MVT VT = Op.getSimpleValueType();
25667 unsigned NumBits = VT.getScalarSizeInBits();
25668 SDValue N0 = Op.getOperand(0);
25671 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
25672 "Only scalar CTTZ requires custom lowering");
25674 // Issue a bsf (scan bits forward) which also sets EFLAGS.
25675 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
25676 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
25678 // If src is zero (i.e. bsf sets ZF), returns NumBits.
25679 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
25680 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
25682 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
25685 /// Break a 256-bit integer operation into two new 128-bit ones and then
25686 /// concatenate the result back.
25687 static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
25688 MVT VT = Op.getSimpleValueType();
25690 assert(VT.is256BitVector() && VT.isInteger() &&
25691 "Unsupported value type for operation");
25693 unsigned NumElems = VT.getVectorNumElements();
25696 // Extract the LHS vectors
25697 SDValue LHS = Op.getOperand(0);
25698 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
25699 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
25701 // Extract the RHS vectors
25702 SDValue RHS = Op.getOperand(1);
25703 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
25704 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
25706 MVT EltVT = VT.getVectorElementType();
25707 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
25709 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25710 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
25711 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
25714 /// Break a 512-bit integer operation into two new 256-bit ones and then
25715 /// concatenate the result back.
25716 static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
25717 MVT VT = Op.getSimpleValueType();
25719 assert(VT.is512BitVector() && VT.isInteger() &&
25720 "Unsupported value type for operation");
25722 unsigned NumElems = VT.getVectorNumElements();
25725 // Extract the LHS vectors
25726 SDValue LHS = Op.getOperand(0);
25727 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
25728 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
25730 // Extract the RHS vectors
25731 SDValue RHS = Op.getOperand(1);
25732 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
25733 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
25735 MVT EltVT = VT.getVectorElementType();
25736 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
25738 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25739 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
25740 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
25743 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
25744 const X86Subtarget &Subtarget) {
25745 MVT VT = Op.getSimpleValueType();
25746 if (VT == MVT::i16 || VT == MVT::i32)
25747 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
25749 if (VT.getScalarType() == MVT::i1)
25750 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
25751 Op.getOperand(0), Op.getOperand(1));
25753 assert(Op.getSimpleValueType().is256BitVector() &&
25754 Op.getSimpleValueType().isInteger() &&
25755 "Only handle AVX 256-bit vector integer operation");
25756 return split256IntArith(Op, DAG);
25759 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
25760 const X86Subtarget &Subtarget) {
25761 MVT VT = Op.getSimpleValueType();
25762 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
25763 unsigned Opcode = Op.getOpcode();
25764 if (VT.getScalarType() == MVT::i1) {
25767 default: llvm_unreachable("Expected saturated arithmetic opcode");
25770 // *addsat i1 X, Y --> X | Y
25771 return DAG.getNode(ISD::OR, dl, VT, X, Y);
25774 // *subsat i1 X, Y --> X & ~Y
25775 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
25779 if (VT.is128BitVector()) {
25780 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
25781 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25782 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
25783 *DAG.getContext(), VT);
25785 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
25786 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
25787 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
25788 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
25789 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
25791 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
25792 // usubsat X, Y --> (X >u Y) ? X - Y : 0
25793 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
25794 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
25795 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
25797 // Use default expansion.
25801 assert(Op.getSimpleValueType().is256BitVector() &&
25802 Op.getSimpleValueType().isInteger() &&
25803 "Only handle AVX 256-bit vector integer operation");
25804 return split256IntArith(Op, DAG);
25807 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
25808 SelectionDAG &DAG) {
25809 MVT VT = Op.getSimpleValueType();
25810 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
25811 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25812 // 8-bit integer abs to NEG and CMOV.
25814 SDValue N0 = Op.getOperand(0);
25815 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25816 DAG.getConstant(0, DL, VT), N0);
25817 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
25818 SDValue(Neg.getNode(), 1)};
25819 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
25822 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
25823 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
25825 SDValue Src = Op.getOperand(0);
25827 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
25828 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
25831 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
25832 assert(VT.isInteger() &&
25833 "Only handle AVX 256-bit vector integer operation");
25834 return Lower256IntUnary(Op, DAG);
25837 // Default to expand.
25841 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
25842 MVT VT = Op.getSimpleValueType();
25844 // For AVX1 cases, split to use legal ops (everything but v4i64).
25845 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
25846 return split256IntArith(Op, DAG);
25849 unsigned Opcode = Op.getOpcode();
25850 SDValue N0 = Op.getOperand(0);
25851 SDValue N1 = Op.getOperand(1);
25853 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
25854 // using the SMIN/SMAX instructions and flipping the signbit back.
25855 if (VT == MVT::v8i16) {
25856 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
25857 "Unexpected MIN/MAX opcode");
25858 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
25859 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
25860 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
25861 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
25862 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
25863 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
25866 // Else, expand to a compare/select.
25869 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
25870 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
25871 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
25872 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
25873 default: llvm_unreachable("Unknown MINMAX opcode");
25876 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
25877 return DAG.getSelect(DL, VT, Cond, N0, N1);
25880 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
25881 SelectionDAG &DAG) {
25883 MVT VT = Op.getSimpleValueType();
25885 if (VT.getScalarType() == MVT::i1)
25886 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
25888 // Decompose 256-bit ops into 128-bit ops.
25889 if (VT.is256BitVector() && !Subtarget.hasInt256())
25890 return split256IntArith(Op, DAG);
25892 SDValue A = Op.getOperand(0);
25893 SDValue B = Op.getOperand(1);
25895 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
25896 // vector pairs, multiply and truncate.
25897 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
25898 unsigned NumElts = VT.getVectorNumElements();
25900 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
25901 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25902 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
25903 return DAG.getNode(
25904 ISD::TRUNCATE, dl, VT,
25905 DAG.getNode(ISD::MUL, dl, ExVT,
25906 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
25907 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
25910 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25912 // Extract the lo/hi parts to any extend to i16.
25913 // We're going to mask off the low byte of each result element of the
25914 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
25916 SDValue Undef = DAG.getUNDEF(VT);
25917 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
25918 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
25921 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25922 // If the LHS is a constant, manually unpackl/unpackh.
25923 SmallVector<SDValue, 16> LoOps, HiOps;
25924 for (unsigned i = 0; i != NumElts; i += 16) {
25925 for (unsigned j = 0; j != 8; ++j) {
25926 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
25928 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
25933 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25934 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25936 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
25937 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
25940 // Multiply, mask the lower 8bits of the lo/hi results and pack.
25941 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25942 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25943 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
25944 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
25945 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25948 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
25949 if (VT == MVT::v4i32) {
25950 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
25951 "Should not custom lower when pmulld is available!");
25953 // Extract the odd parts.
25954 static const int UnpackMask[] = { 1, -1, 3, -1 };
25955 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
25956 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
25958 // Multiply the even parts.
25959 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25960 DAG.getBitcast(MVT::v2i64, A),
25961 DAG.getBitcast(MVT::v2i64, B));
25962 // Now multiply odd parts.
25963 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25964 DAG.getBitcast(MVT::v2i64, Aodds),
25965 DAG.getBitcast(MVT::v2i64, Bodds));
25967 Evens = DAG.getBitcast(VT, Evens);
25968 Odds = DAG.getBitcast(VT, Odds);
25970 // Merge the two vectors back together with a shuffle. This expands into 2
25972 static const int ShufMask[] = { 0, 4, 2, 6 };
25973 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
25976 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
25977 "Only know how to lower V2I64/V4I64/V8I64 multiply");
25978 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
25980 // Ahi = psrlqi(a, 32);
25981 // Bhi = psrlqi(b, 32);
25983 // AloBlo = pmuludq(a, b);
25984 // AloBhi = pmuludq(a, Bhi);
25985 // AhiBlo = pmuludq(Ahi, b);
25987 // Hi = psllqi(AloBhi + AhiBlo, 32);
25988 // return AloBlo + Hi;
25989 KnownBits AKnown = DAG.computeKnownBits(A);
25990 KnownBits BKnown = DAG.computeKnownBits(B);
25992 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
25993 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
25994 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
25996 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
25997 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
25998 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
26000 SDValue Zero = DAG.getConstant(0, dl, VT);
26002 // Only multiply lo/hi halves that aren't known to be zero.
26003 SDValue AloBlo = Zero;
26004 if (!ALoIsZero && !BLoIsZero)
26005 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
26007 SDValue AloBhi = Zero;
26008 if (!ALoIsZero && !BHiIsZero) {
26009 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
26010 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
26013 SDValue AhiBlo = Zero;
26014 if (!AHiIsZero && !BLoIsZero) {
26015 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
26016 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
26019 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
26020 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
26022 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
26025 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
26026 SelectionDAG &DAG) {
26028 MVT VT = Op.getSimpleValueType();
26029 bool IsSigned = Op->getOpcode() == ISD::MULHS;
26030 unsigned NumElts = VT.getVectorNumElements();
26031 SDValue A = Op.getOperand(0);
26032 SDValue B = Op.getOperand(1);
26034 // Decompose 256-bit ops into 128-bit ops.
26035 if (VT.is256BitVector() && !Subtarget.hasInt256())
26036 return split256IntArith(Op, DAG);
26038 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
26039 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
26040 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
26041 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
26043 // PMULxD operations multiply each even value (starting at 0) of LHS with
26044 // the related value of RHS and produce a widen result.
26045 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26046 // => <2 x i64> <ae|cg>
26048 // In other word, to have all the results, we need to perform two PMULxD:
26049 // 1. one with the even values.
26050 // 2. one with the odd values.
26051 // To achieve #2, with need to place the odd values at an even position.
26053 // Place the odd value at an even position (basically, shift all values 1
26054 // step to the left):
26055 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
26056 9, -1, 11, -1, 13, -1, 15, -1};
26057 // <a|b|c|d> => <b|undef|d|undef>
26058 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
26059 makeArrayRef(&Mask[0], NumElts));
26060 // <e|f|g|h> => <f|undef|h|undef>
26061 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
26062 makeArrayRef(&Mask[0], NumElts));
26064 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
26066 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
26068 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
26069 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26070 // => <2 x i64> <ae|cg>
26071 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26072 DAG.getBitcast(MulVT, A),
26073 DAG.getBitcast(MulVT, B)));
26074 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
26075 // => <2 x i64> <bf|dh>
26076 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26077 DAG.getBitcast(MulVT, Odd0),
26078 DAG.getBitcast(MulVT, Odd1)));
26080 // Shuffle it back into the right order.
26081 SmallVector<int, 16> ShufMask(NumElts);
26082 for (int i = 0; i != (int)NumElts; ++i)
26083 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
26085 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
26087 // If we have a signed multiply but no PMULDQ fix up the result of an
26088 // unsigned multiply.
26089 if (IsSigned && !Subtarget.hasSSE41()) {
26090 SDValue Zero = DAG.getConstant(0, dl, VT);
26091 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
26092 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
26093 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
26094 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
26096 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
26097 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
26103 // Only i8 vectors should need custom lowering after this.
26104 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
26105 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
26106 "Unsupported vector type");
26108 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
26109 // logical shift down the upper half and pack back to i8.
26111 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
26112 // and then ashr/lshr the upper bits down to the lower bits before multiply.
26113 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26115 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26116 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26117 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26118 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
26119 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
26120 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
26121 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
26122 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
26125 // For signed 512-bit vectors, split into 256-bit vectors to allow the
26126 // sign-extension to occur.
26127 if (VT == MVT::v64i8 && IsSigned)
26128 return split512IntArith(Op, DAG);
26130 // Signed AVX2 implementation - extend xmm subvectors to ymm.
26131 if (VT == MVT::v32i8 && IsSigned) {
26132 MVT ExVT = MVT::v16i16;
26133 SDValue ALo = extract128BitVector(A, 0, DAG, dl);
26134 SDValue BLo = extract128BitVector(B, 0, DAG, dl);
26135 SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
26136 SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
26137 ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
26138 BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
26139 AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
26140 BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
26141 SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26142 SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26143 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
26144 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
26146 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26147 // Shuffle lowering should turn this into PACKUS+PERMQ
26148 Lo = DAG.getBitcast(VT, Lo);
26149 Hi = DAG.getBitcast(VT, Hi);
26150 return DAG.getVectorShuffle(VT, dl, Lo, Hi,
26151 { 0, 2, 4, 6, 8, 10, 12, 14,
26152 16, 18, 20, 22, 24, 26, 28, 30,
26153 32, 34, 36, 38, 40, 42, 44, 46,
26154 48, 50, 52, 54, 56, 58, 60, 62});
26157 // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
26158 // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
26159 // shift the results and pack the half lane results back together.
26161 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26163 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
26164 -1, -1, -1, -1, -1, -1, -1, -1};
26166 // Extract the lo parts and zero/sign extend to i16.
26167 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
26168 // shifts to sign extend. Using unpack for unsigned only requires an xor to
26169 // create zeros and a copy due to tied registers contraints pre-avx. But using
26170 // zero_extend_vector_inreg would require an additional pshufd for the high
26174 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26175 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
26177 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
26178 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
26179 } else if (IsSigned) {
26180 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
26181 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
26183 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
26184 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
26186 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
26187 DAG.getConstant(0, dl, VT)));
26188 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
26189 DAG.getConstant(0, dl, VT)));
26193 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26194 // If the LHS is a constant, manually unpackl/unpackh and extend.
26195 SmallVector<SDValue, 16> LoOps, HiOps;
26196 for (unsigned i = 0; i != NumElts; i += 16) {
26197 for (unsigned j = 0; j != 8; ++j) {
26198 SDValue LoOp = B.getOperand(i + j);
26199 SDValue HiOp = B.getOperand(i + j + 8);
26202 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
26203 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
26205 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
26206 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
26209 LoOps.push_back(LoOp);
26210 HiOps.push_back(HiOp);
26214 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26215 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26216 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26217 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
26219 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
26220 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
26221 } else if (IsSigned) {
26222 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
26223 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
26225 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
26226 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
26228 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
26229 DAG.getConstant(0, dl, VT)));
26230 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
26231 DAG.getConstant(0, dl, VT)));
26234 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
26235 // pack back to vXi8.
26236 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26237 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26238 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
26239 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
26241 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26242 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26245 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
26246 assert(Subtarget.isTargetWin64() && "Unexpected target");
26247 EVT VT = Op.getValueType();
26248 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
26249 "Unexpected return type for lowering");
26253 switch (Op->getOpcode()) {
26254 default: llvm_unreachable("Unexpected request for libcall!");
26255 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
26256 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
26257 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
26258 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
26259 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
26260 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
26264 SDValue InChain = DAG.getEntryNode();
26266 TargetLowering::ArgListTy Args;
26267 TargetLowering::ArgListEntry Entry;
26268 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
26269 EVT ArgVT = Op->getOperand(i).getValueType();
26270 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
26271 "Unexpected argument type for lowering");
26272 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
26273 Entry.Node = StackPtr;
26274 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
26275 MachinePointerInfo(), /* Alignment = */ 16);
26276 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26277 Entry.Ty = PointerType::get(ArgTy,0);
26278 Entry.IsSExt = false;
26279 Entry.IsZExt = false;
26280 Args.push_back(Entry);
26283 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
26284 getPointerTy(DAG.getDataLayout()));
26286 TargetLowering::CallLoweringInfo CLI(DAG);
26287 CLI.setDebugLoc(dl)
26290 getLibcallCallingConv(LC),
26291 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
26294 .setSExtResult(isSigned)
26295 .setZExtResult(!isSigned);
26297 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
26298 return DAG.getBitcast(VT, CallInfo.first);
26301 // Return true if the required (according to Opcode) shift-imm form is natively
26302 // supported by the Subtarget
26303 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
26305 if (VT.getScalarSizeInBits() < 16)
26308 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
26309 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
26312 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
26313 (VT.is256BitVector() && Subtarget.hasInt256());
26315 bool AShift = LShift && (Subtarget.hasAVX512() ||
26316 (VT != MVT::v2i64 && VT != MVT::v4i64));
26317 return (Opcode == ISD::SRA) ? AShift : LShift;
26320 // The shift amount is a variable, but it is the same for all vector lanes.
26321 // These instructions are defined together with shift-immediate.
26323 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
26325 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
26328 // Return true if the required (according to Opcode) variable-shift form is
26329 // natively supported by the Subtarget
26330 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
26333 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
26336 // vXi16 supported only on AVX-512, BWI
26337 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
26340 if (Subtarget.hasAVX512())
26343 bool LShift = VT.is128BitVector() || VT.is256BitVector();
26344 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
26345 return (Opcode == ISD::SRA) ? AShift : LShift;
26348 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
26349 const X86Subtarget &Subtarget) {
26350 MVT VT = Op.getSimpleValueType();
26352 SDValue R = Op.getOperand(0);
26353 SDValue Amt = Op.getOperand(1);
26354 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
26356 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
26357 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
26358 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
26359 SDValue Ex = DAG.getBitcast(ExVT, R);
26361 // ashr(R, 63) === cmp_slt(R, 0)
26362 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
26363 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
26364 "Unsupported PCMPGT op");
26365 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
26368 if (ShiftAmt >= 32) {
26369 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
26371 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
26372 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
26373 ShiftAmt - 32, DAG);
26374 if (VT == MVT::v2i64)
26375 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
26376 if (VT == MVT::v4i64)
26377 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
26378 {9, 1, 11, 3, 13, 5, 15, 7});
26380 // SRA upper i32, SRL whole i64 and select lower i32.
26381 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
26384 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
26385 Lower = DAG.getBitcast(ExVT, Lower);
26386 if (VT == MVT::v2i64)
26387 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
26388 if (VT == MVT::v4i64)
26389 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
26390 {8, 1, 10, 3, 12, 5, 14, 7});
26392 return DAG.getBitcast(VT, Ex);
26395 // Optimize shl/srl/sra with constant shift amount.
26396 APInt APIntShiftAmt;
26397 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
26400 // If the shift amount is out of range, return undef.
26401 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
26402 return DAG.getUNDEF(VT);
26404 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
26406 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
26407 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
26409 // i64 SRA needs to be performed as partial shifts.
26410 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
26411 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
26412 Op.getOpcode() == ISD::SRA)
26413 return ArithmeticShiftRight64(ShiftAmt);
26415 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
26416 VT == MVT::v64i8) {
26417 unsigned NumElts = VT.getVectorNumElements();
26418 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26420 // Simple i8 add case
26421 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
26422 return DAG.getNode(ISD::ADD, dl, VT, R, R);
26424 // ashr(R, 7) === cmp_slt(R, 0)
26425 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
26426 SDValue Zeros = DAG.getConstant(0, dl, VT);
26427 if (VT.is512BitVector()) {
26428 assert(VT == MVT::v64i8 && "Unexpected element type!");
26429 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
26430 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
26432 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
26435 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
26436 if (VT == MVT::v16i8 && Subtarget.hasXOP())
26439 if (Op.getOpcode() == ISD::SHL) {
26440 // Make a large shift.
26441 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
26443 SHL = DAG.getBitcast(VT, SHL);
26444 // Zero out the rightmost bits.
26445 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
26446 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
26448 if (Op.getOpcode() == ISD::SRL) {
26449 // Make a large shift.
26450 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
26452 SRL = DAG.getBitcast(VT, SRL);
26453 // Zero out the leftmost bits.
26454 return DAG.getNode(ISD::AND, dl, VT, SRL,
26455 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
26457 if (Op.getOpcode() == ISD::SRA) {
26458 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
26459 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
26461 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
26462 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
26463 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
26466 llvm_unreachable("Unknown shift opcode.");
26472 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
26473 const X86Subtarget &Subtarget) {
26474 MVT VT = Op.getSimpleValueType();
26476 SDValue R = Op.getOperand(0);
26477 SDValue Amt = Op.getOperand(1);
26478 unsigned Opcode = Op.getOpcode();
26479 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
26480 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
26482 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
26483 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
26484 MVT EltVT = VT.getVectorElementType();
26485 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
26486 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
26487 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
26488 else if (EltVT.bitsLT(MVT::i32))
26489 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
26491 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
26494 // vXi8 shifts - shift as v8i16 + mask result.
26495 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
26496 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
26497 VT == MVT::v64i8) &&
26498 !Subtarget.hasXOP()) {
26499 unsigned NumElts = VT.getVectorNumElements();
26500 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26501 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
26502 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
26503 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
26504 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
26506 // Create the mask using vXi16 shifts. For shift-rights we need to move
26507 // the upper byte down before splatting the vXi8 mask.
26508 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
26509 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
26510 BaseShAmt, Subtarget, DAG);
26511 if (Opcode != ISD::SHL)
26512 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
26514 BitMask = DAG.getBitcast(VT, BitMask);
26515 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
26516 SmallVector<int, 64>(NumElts, 0));
26518 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
26519 DAG.getBitcast(ExtVT, R), BaseShAmt,
26521 Res = DAG.getBitcast(VT, Res);
26522 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
26524 if (Opcode == ISD::SRA) {
26525 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
26526 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
26527 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
26528 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
26529 BaseShAmt, Subtarget, DAG);
26530 SignMask = DAG.getBitcast(VT, SignMask);
26531 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
26532 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
26539 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
26540 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
26541 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
26542 Amt = Amt.getOperand(0);
26543 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
26544 std::vector<SDValue> Vals(Ratio);
26545 for (unsigned i = 0; i != Ratio; ++i)
26546 Vals[i] = Amt.getOperand(i);
26547 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
26548 for (unsigned j = 0; j != Ratio; ++j)
26549 if (Vals[j] != Amt.getOperand(i + j))
26553 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
26554 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
26559 // Convert a shift/rotate left amount to a multiplication scale factor.
26560 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
26561 const X86Subtarget &Subtarget,
26562 SelectionDAG &DAG) {
26563 MVT VT = Amt.getSimpleValueType();
26564 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
26565 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
26566 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
26569 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
26570 SmallVector<SDValue, 8> Elts;
26571 MVT SVT = VT.getVectorElementType();
26572 unsigned SVTBits = SVT.getSizeInBits();
26573 APInt One(SVTBits, 1);
26574 unsigned NumElems = VT.getVectorNumElements();
26576 for (unsigned i = 0; i != NumElems; ++i) {
26577 SDValue Op = Amt->getOperand(i);
26578 if (Op->isUndef()) {
26579 Elts.push_back(Op);
26583 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
26584 APInt C(SVTBits, ND->getZExtValue());
26585 uint64_t ShAmt = C.getZExtValue();
26586 if (ShAmt >= SVTBits) {
26587 Elts.push_back(DAG.getUNDEF(SVT));
26590 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
26592 return DAG.getBuildVector(VT, dl, Elts);
26595 // If the target doesn't support variable shifts, use either FP conversion
26596 // or integer multiplication to avoid shifting each element individually.
26597 if (VT == MVT::v4i32) {
26598 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
26599 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
26600 DAG.getConstant(0x3f800000U, dl, VT));
26601 Amt = DAG.getBitcast(MVT::v4f32, Amt);
26602 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
26605 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
26606 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
26607 SDValue Z = DAG.getConstant(0, dl, VT);
26608 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
26609 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
26610 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
26611 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
26612 if (Subtarget.hasSSE41())
26613 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
26615 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
26616 DAG.getBitcast(VT, Hi),
26617 {0, 2, 4, 6, 8, 10, 12, 14});
26623 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
26624 SelectionDAG &DAG) {
26625 MVT VT = Op.getSimpleValueType();
26627 SDValue R = Op.getOperand(0);
26628 SDValue Amt = Op.getOperand(1);
26629 unsigned EltSizeInBits = VT.getScalarSizeInBits();
26630 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26632 unsigned Opc = Op.getOpcode();
26633 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
26634 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
26636 assert(VT.isVector() && "Custom lowering only for vector shifts!");
26637 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
26639 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
26642 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
26645 if (SupportedVectorVarShift(VT, Subtarget, Opc))
26648 // XOP has 128-bit variable logical/arithmetic shifts.
26649 // +ve/-ve Amt = shift left/right.
26650 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
26651 VT == MVT::v8i16 || VT == MVT::v16i8)) {
26652 if (Opc == ISD::SRL || Opc == ISD::SRA) {
26653 SDValue Zero = DAG.getConstant(0, dl, VT);
26654 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
26656 if (Opc == ISD::SHL || Opc == ISD::SRL)
26657 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
26658 if (Opc == ISD::SRA)
26659 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
26662 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
26663 // shifts per-lane and then shuffle the partial results back together.
26664 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
26665 // Splat the shift amounts so the scalar shifts above will catch it.
26666 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
26667 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
26668 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
26669 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
26670 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
26673 // i64 vector arithmetic shift can be emulated with the transform:
26674 // M = lshr(SIGN_MASK, Amt)
26675 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
26676 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
26678 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
26679 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
26680 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
26681 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
26682 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
26686 // If possible, lower this shift as a sequence of two shifts by
26687 // constant plus a BLENDing shuffle instead of scalarizing it.
26689 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
26691 // Could be rewritten as:
26692 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
26694 // The advantage is that the two shifts from the example would be
26695 // lowered as X86ISD::VSRLI nodes in parallel before blending.
26696 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
26697 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
26698 SDValue Amt1, Amt2;
26699 unsigned NumElts = VT.getVectorNumElements();
26700 SmallVector<int, 8> ShuffleMask;
26701 for (unsigned i = 0; i != NumElts; ++i) {
26702 SDValue A = Amt->getOperand(i);
26704 ShuffleMask.push_back(SM_SentinelUndef);
26707 if (!Amt1 || Amt1 == A) {
26708 ShuffleMask.push_back(i);
26712 if (!Amt2 || Amt2 == A) {
26713 ShuffleMask.push_back(i + NumElts);
26720 // Only perform this blend if we can perform it without loading a mask.
26721 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
26722 (VT != MVT::v16i16 ||
26723 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
26724 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
26725 canWidenShuffleElements(ShuffleMask))) {
26726 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
26727 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
26728 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
26729 Cst2->getAPIntValue().ult(EltSizeInBits)) {
26730 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
26731 Cst1->getZExtValue(), DAG);
26732 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
26733 Cst2->getZExtValue(), DAG);
26734 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
26739 // If possible, lower this packed shift into a vector multiply instead of
26740 // expanding it into a sequence of scalar shifts.
26741 if (Opc == ISD::SHL)
26742 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
26743 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
26745 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
26746 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
26747 if (Opc == ISD::SRL && ConstantAmt &&
26748 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
26749 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
26750 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
26751 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
26752 SDValue Zero = DAG.getConstant(0, dl, VT);
26753 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
26754 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
26755 return DAG.getSelect(dl, VT, ZAmt, R, Res);
26759 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
26760 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
26761 // TODO: Special case handling for shift by 0/1, really we can afford either
26762 // of these cases in pre-SSE41/XOP/AVX512 but not both.
26763 if (Opc == ISD::SRA && ConstantAmt &&
26764 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
26765 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
26766 !Subtarget.hasAVX512()) ||
26767 DAG.isKnownNeverZero(Amt))) {
26768 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
26769 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
26770 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
26772 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
26774 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
26776 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
26777 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
26778 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
26779 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
26783 // v4i32 Non Uniform Shifts.
26784 // If the shift amount is constant we can shift each lane using the SSE2
26785 // immediate shifts, else we need to zero-extend each lane to the lower i64
26786 // and shift using the SSE2 variable shifts.
26787 // The separate results can then be blended together.
26788 if (VT == MVT::v4i32) {
26789 SDValue Amt0, Amt1, Amt2, Amt3;
26791 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
26792 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
26793 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
26794 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
26796 // The SSE2 shifts use the lower i64 as the same shift amount for
26797 // all lanes and the upper i64 is ignored. On AVX we're better off
26798 // just zero-extending, but for SSE just duplicating the top 16-bits is
26799 // cheaper and has the same effect for out of range values.
26800 if (Subtarget.hasAVX()) {
26801 SDValue Z = DAG.getConstant(0, dl, VT);
26802 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
26803 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
26804 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
26805 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
26807 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
26808 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26809 {4, 5, 6, 7, -1, -1, -1, -1});
26810 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26811 {0, 1, 1, 1, -1, -1, -1, -1});
26812 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26813 {2, 3, 3, 3, -1, -1, -1, -1});
26814 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
26815 {0, 1, 1, 1, -1, -1, -1, -1});
26816 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
26817 {2, 3, 3, 3, -1, -1, -1, -1});
26821 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
26822 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
26823 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
26824 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
26825 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
26827 // Merge the shifted lane results optimally with/without PBLENDW.
26828 // TODO - ideally shuffle combining would handle this.
26829 if (Subtarget.hasSSE41()) {
26830 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
26831 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
26832 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
26834 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
26835 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
26836 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
26839 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
26840 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
26841 // make the existing SSE solution better.
26842 // NOTE: We honor prefered vector width before promoting to 512-bits.
26843 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
26844 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
26845 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
26846 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
26847 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
26848 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
26849 "Unexpected vector type");
26850 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
26851 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
26852 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26853 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
26854 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
26855 return DAG.getNode(ISD::TRUNCATE, dl, VT,
26856 DAG.getNode(Opc, dl, ExtVT, R, Amt));
26859 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
26860 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
26861 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
26862 (VT == MVT::v16i8 || VT == MVT::v64i8 ||
26863 (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
26864 !Subtarget.hasXOP()) {
26865 int NumElts = VT.getVectorNumElements();
26866 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
26868 // Extend constant shift amount to vXi16 (it doesn't matter if the type
26870 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26871 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
26872 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
26873 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
26874 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
26875 "Constant build vector expected");
26877 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
26878 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
26879 : DAG.getZExtOrTrunc(R, dl, ExVT);
26880 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
26881 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
26882 return DAG.getZExtOrTrunc(R, dl, VT);
26885 SmallVector<SDValue, 16> LoAmt, HiAmt;
26886 for (int i = 0; i != NumElts; i += 16) {
26887 for (int j = 0; j != 8; ++j) {
26888 LoAmt.push_back(Amt.getOperand(i + j));
26889 HiAmt.push_back(Amt.getOperand(i + j + 8));
26893 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
26894 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
26895 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
26897 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
26898 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
26899 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
26900 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
26901 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
26902 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
26903 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
26904 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
26905 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
26908 if (VT == MVT::v16i8 ||
26909 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
26910 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
26911 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
26913 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26914 if (VT.is512BitVector()) {
26915 // On AVX512BW targets we make use of the fact that VSELECT lowers
26916 // to a masked blend which selects bytes based just on the sign bit
26917 // extracted to a mask.
26918 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26919 V0 = DAG.getBitcast(VT, V0);
26920 V1 = DAG.getBitcast(VT, V1);
26921 Sel = DAG.getBitcast(VT, Sel);
26922 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
26924 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26925 } else if (Subtarget.hasSSE41()) {
26926 // On SSE41 targets we make use of the fact that VSELECT lowers
26927 // to PBLENDVB which selects bytes based just on the sign bit.
26928 V0 = DAG.getBitcast(VT, V0);
26929 V1 = DAG.getBitcast(VT, V1);
26930 Sel = DAG.getBitcast(VT, Sel);
26931 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26933 // On pre-SSE41 targets we test for the sign bit by comparing to
26934 // zero - a negative value will set all bits of the lanes to true
26935 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26936 SDValue Z = DAG.getConstant(0, dl, SelVT);
26937 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
26938 return DAG.getSelect(dl, SelVT, C, V0, V1);
26941 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26942 // We can safely do this using i16 shifts as we're only interested in
26943 // the 3 lower bits of each byte.
26944 Amt = DAG.getBitcast(ExtVT, Amt);
26945 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
26946 Amt = DAG.getBitcast(VT, Amt);
26948 if (Opc == ISD::SHL || Opc == ISD::SRL) {
26949 // r = VSELECT(r, shift(r, 4), a);
26950 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
26951 R = SignBitSelect(VT, Amt, M, R);
26954 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26956 // r = VSELECT(r, shift(r, 2), a);
26957 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
26958 R = SignBitSelect(VT, Amt, M, R);
26961 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26963 // return VSELECT(r, shift(r, 1), a);
26964 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
26965 R = SignBitSelect(VT, Amt, M, R);
26969 if (Opc == ISD::SRA) {
26970 // For SRA we need to unpack each byte to the higher byte of a i16 vector
26971 // so we can correctly sign extend. We don't care what happens to the
26973 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26974 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26975 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
26976 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
26977 ALo = DAG.getBitcast(ExtVT, ALo);
26978 AHi = DAG.getBitcast(ExtVT, AHi);
26979 RLo = DAG.getBitcast(ExtVT, RLo);
26980 RHi = DAG.getBitcast(ExtVT, RHi);
26982 // r = VSELECT(r, shift(r, 4), a);
26983 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
26984 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
26985 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26986 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26989 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26990 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26992 // r = VSELECT(r, shift(r, 2), a);
26993 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
26994 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
26995 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26996 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26999 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
27000 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
27002 // r = VSELECT(r, shift(r, 1), a);
27003 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
27004 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
27005 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27006 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27008 // Logical shift the result back to the lower byte, leaving a zero upper
27009 // byte meaning that we can safely pack with PACKUSWB.
27010 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
27011 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
27012 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
27016 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
27017 MVT ExtVT = MVT::v8i32;
27018 SDValue Z = DAG.getConstant(0, dl, VT);
27019 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
27020 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
27021 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
27022 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
27023 ALo = DAG.getBitcast(ExtVT, ALo);
27024 AHi = DAG.getBitcast(ExtVT, AHi);
27025 RLo = DAG.getBitcast(ExtVT, RLo);
27026 RHi = DAG.getBitcast(ExtVT, RHi);
27027 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
27028 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
27029 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
27030 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
27031 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27034 if (VT == MVT::v8i16) {
27035 // If we have a constant shift amount, the non-SSE41 path is best as
27036 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
27037 bool UseSSE41 = Subtarget.hasSSE41() &&
27038 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27040 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
27041 // On SSE41 targets we make use of the fact that VSELECT lowers
27042 // to PBLENDVB which selects bytes based just on the sign bit.
27044 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
27045 V0 = DAG.getBitcast(ExtVT, V0);
27046 V1 = DAG.getBitcast(ExtVT, V1);
27047 Sel = DAG.getBitcast(ExtVT, Sel);
27048 return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
27050 // On pre-SSE41 targets we splat the sign bit - a negative value will
27051 // set all bits of the lanes to true and VSELECT uses that in
27052 // its OR(AND(V0,C),AND(V1,~C)) lowering.
27054 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
27055 return DAG.getSelect(dl, VT, C, V0, V1);
27058 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
27060 // On SSE41 targets we need to replicate the shift mask in both
27061 // bytes for PBLENDVB.
27064 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
27065 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
27067 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
27070 // r = VSELECT(r, shift(r, 8), a);
27071 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
27072 R = SignBitSelect(Amt, M, R);
27075 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27077 // r = VSELECT(r, shift(r, 4), a);
27078 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
27079 R = SignBitSelect(Amt, M, R);
27082 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27084 // r = VSELECT(r, shift(r, 2), a);
27085 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
27086 R = SignBitSelect(Amt, M, R);
27089 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27091 // return VSELECT(r, shift(r, 1), a);
27092 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
27093 R = SignBitSelect(Amt, M, R);
27097 // Decompose 256-bit shifts into 128-bit shifts.
27098 if (VT.is256BitVector())
27099 return split256IntArith(Op, DAG);
27104 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
27105 SelectionDAG &DAG) {
27106 MVT VT = Op.getSimpleValueType();
27107 assert(VT.isVector() && "Custom lowering only for vector rotates!");
27110 SDValue R = Op.getOperand(0);
27111 SDValue Amt = Op.getOperand(1);
27112 unsigned Opcode = Op.getOpcode();
27113 unsigned EltSizeInBits = VT.getScalarSizeInBits();
27114 int NumElts = VT.getVectorNumElements();
27116 // Check for constant splat rotation amount.
27118 SmallVector<APInt, 32> EltBits;
27119 int CstSplatIndex = -1;
27120 if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
27121 for (int i = 0; i != NumElts; ++i)
27122 if (!UndefElts[i]) {
27123 if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
27127 CstSplatIndex = -1;
27131 // AVX512 implicitly uses modulo rotation amounts.
27132 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
27133 // Attempt to rotate by immediate.
27134 if (0 <= CstSplatIndex) {
27135 unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
27136 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
27137 return DAG.getNode(Op, DL, VT, R,
27138 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
27141 // Else, fall-back on VPROLV/VPRORV.
27145 assert((Opcode == ISD::ROTL) && "Only ROTL supported");
27147 // XOP has 128-bit vector variable + immediate rotates.
27148 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
27149 // XOP implicitly uses modulo rotation amounts.
27150 if (Subtarget.hasXOP()) {
27151 if (VT.is256BitVector())
27152 return split256IntArith(Op, DAG);
27153 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
27155 // Attempt to rotate by immediate.
27156 if (0 <= CstSplatIndex) {
27157 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
27158 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
27159 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
27162 // Use general rotate by variable (per-element).
27166 // Split 256-bit integers on pre-AVX2 targets.
27167 if (VT.is256BitVector() && !Subtarget.hasAVX2())
27168 return split256IntArith(Op, DAG);
27170 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
27171 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
27172 Subtarget.hasAVX2())) &&
27173 "Only vXi32/vXi16/vXi8 vector rotates supported");
27175 // Rotate by an uniform constant - expand back to shifts.
27176 if (0 <= CstSplatIndex)
27179 bool IsSplatAmt = DAG.isSplatValue(Amt);
27181 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
27183 if (EltSizeInBits == 8 && !IsSplatAmt) {
27184 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
27187 // We don't need ModuloAmt here as we just peek at individual bits.
27188 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27190 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27191 if (Subtarget.hasSSE41()) {
27192 // On SSE41 targets we make use of the fact that VSELECT lowers
27193 // to PBLENDVB which selects bytes based just on the sign bit.
27194 V0 = DAG.getBitcast(VT, V0);
27195 V1 = DAG.getBitcast(VT, V1);
27196 Sel = DAG.getBitcast(VT, Sel);
27197 return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
27199 // On pre-SSE41 targets we test for the sign bit by comparing to
27200 // zero - a negative value will set all bits of the lanes to true
27201 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27202 SDValue Z = DAG.getConstant(0, DL, SelVT);
27203 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
27204 return DAG.getSelect(DL, SelVT, C, V0, V1);
27207 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27208 // We can safely do this using i16 shifts as we're only interested in
27209 // the 3 lower bits of each byte.
27210 Amt = DAG.getBitcast(ExtVT, Amt);
27211 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
27212 Amt = DAG.getBitcast(VT, Amt);
27214 // r = VSELECT(r, rot(r, 4), a);
27218 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
27219 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
27220 R = SignBitSelect(VT, Amt, M, R);
27223 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27225 // r = VSELECT(r, rot(r, 2), a);
27228 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
27229 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
27230 R = SignBitSelect(VT, Amt, M, R);
27233 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27235 // return VSELECT(r, rot(r, 1), a);
27238 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
27239 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
27240 return SignBitSelect(VT, Amt, M, R);
27243 // ISD::ROT* uses modulo rotate amounts.
27244 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
27245 DAG.getConstant(EltSizeInBits - 1, DL, VT));
27247 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27248 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
27249 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
27251 // Fallback for splats + all supported variable shifts.
27252 // Fallback for non-constants AVX2 vXi16 as well.
27253 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
27254 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
27255 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
27256 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
27257 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
27258 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
27261 // As with shifts, convert the rotation amount to a multiplication factor.
27262 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
27263 assert(Scale && "Failed to convert ROTL amount to scale");
27265 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
27266 if (EltSizeInBits == 16) {
27267 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
27268 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
27269 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27272 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
27273 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
27274 // that can then be OR'd with the lower 32-bits.
27275 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
27276 static const int OddMask[] = {1, -1, 3, -1};
27277 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
27278 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
27280 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27281 DAG.getBitcast(MVT::v2i64, R),
27282 DAG.getBitcast(MVT::v2i64, Scale));
27283 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27284 DAG.getBitcast(MVT::v2i64, R13),
27285 DAG.getBitcast(MVT::v2i64, Scale13));
27286 Res02 = DAG.getBitcast(VT, Res02);
27287 Res13 = DAG.getBitcast(VT, Res13);
27289 return DAG.getNode(ISD::OR, DL, VT,
27290 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
27291 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
27294 /// Returns true if the operand type is exactly twice the native width, and
27295 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
27296 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
27297 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
27298 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
27299 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
27302 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
27303 if (OpWidth == 128)
27304 return Subtarget.hasCmpxchg16b();
27309 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27310 // TODO: In 32-bit mode, use FISTP when X87 is available?
27311 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
27312 Type *MemType = SI->getValueOperand()->getType();
27314 bool NoImplicitFloatOps =
27315 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27316 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27317 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
27320 return needsCmpXchgNb(MemType);
27323 // Note: this turns large loads into lock cmpxchg8b/16b.
27324 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27325 TargetLowering::AtomicExpansionKind
27326 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
27327 Type *MemType = LI->getType();
27329 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
27330 // can use movq to do the load. If we have X87 we can load into an 80-bit
27331 // X87 register and store it to a stack temporary.
27332 bool NoImplicitFloatOps =
27333 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27334 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27335 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27336 (Subtarget.hasSSE2() || Subtarget.hasX87()))
27337 return AtomicExpansionKind::None;
27339 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27340 : AtomicExpansionKind::None;
27343 TargetLowering::AtomicExpansionKind
27344 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
27345 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27346 Type *MemType = AI->getType();
27348 // If the operand is too big, we must see if cmpxchg8/16b is available
27349 // and default to library calls otherwise.
27350 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
27351 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27352 : AtomicExpansionKind::None;
27355 AtomicRMWInst::BinOp Op = AI->getOperation();
27358 llvm_unreachable("Unknown atomic operation");
27359 case AtomicRMWInst::Xchg:
27360 case AtomicRMWInst::Add:
27361 case AtomicRMWInst::Sub:
27362 // It's better to use xadd, xsub or xchg for these in all cases.
27363 return AtomicExpansionKind::None;
27364 case AtomicRMWInst::Or:
27365 case AtomicRMWInst::And:
27366 case AtomicRMWInst::Xor:
27367 // If the atomicrmw's result isn't actually used, we can just add a "lock"
27368 // prefix to a normal instruction for these operations.
27369 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
27370 : AtomicExpansionKind::None;
27371 case AtomicRMWInst::Nand:
27372 case AtomicRMWInst::Max:
27373 case AtomicRMWInst::Min:
27374 case AtomicRMWInst::UMax:
27375 case AtomicRMWInst::UMin:
27376 case AtomicRMWInst::FAdd:
27377 case AtomicRMWInst::FSub:
27378 // These always require a non-trivial set of data operations on x86. We must
27379 // use a cmpxchg loop.
27380 return AtomicExpansionKind::CmpXChg;
27385 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
27386 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27387 Type *MemType = AI->getType();
27388 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
27389 // there is no benefit in turning such RMWs into loads, and it is actually
27390 // harmful as it introduces a mfence.
27391 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
27394 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
27395 // lowering available in lowerAtomicArith.
27396 // TODO: push more cases through this path.
27397 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
27398 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
27402 auto Builder = IRBuilder<>(AI);
27403 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
27404 auto SSID = AI->getSyncScopeID();
27405 // We must restrict the ordering to avoid generating loads with Release or
27406 // ReleaseAcquire orderings.
27407 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
27409 // Before the load we need a fence. Here is an example lifted from
27410 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
27413 // x.store(1, relaxed);
27414 // r1 = y.fetch_add(0, release);
27416 // y.fetch_add(42, acquire);
27417 // r2 = x.load(relaxed);
27418 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
27419 // lowered to just a load without a fence. A mfence flushes the store buffer,
27420 // making the optimization clearly correct.
27421 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
27422 // otherwise, we might be able to be more aggressive on relaxed idempotent
27423 // rmw. In practice, they do not look useful, so we don't try to be
27424 // especially clever.
27425 if (SSID == SyncScope::SingleThread)
27426 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
27427 // the IR level, so we must wrap it in an intrinsic.
27430 if (!Subtarget.hasMFence())
27431 // FIXME: it might make sense to use a locked operation here but on a
27432 // different cache-line to prevent cache-line bouncing. In practice it
27433 // is probably a small win, and x86 processors without mfence are rare
27434 // enough that we do not bother.
27438 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
27439 Builder.CreateCall(MFence, {});
27441 // Finally we can emit the atomic load.
27443 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
27444 AI->getType()->getPrimitiveSizeInBits());
27445 Loaded->setAtomic(Order, SSID);
27446 AI->replaceAllUsesWith(Loaded);
27447 AI->eraseFromParent();
27451 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
27452 if (!SI.isUnordered())
27454 return ExperimentalUnorderedISEL;
27456 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
27457 if (!LI.isUnordered())
27459 return ExperimentalUnorderedISEL;
27463 /// Emit a locked operation on a stack location which does not change any
27464 /// memory location, but does involve a lock prefix. Location is chosen to be
27465 /// a) very likely accessed only by a single thread to minimize cache traffic,
27466 /// and b) definitely dereferenceable. Returns the new Chain result.
27467 static SDValue emitLockedStackOp(SelectionDAG &DAG,
27468 const X86Subtarget &Subtarget,
27469 SDValue Chain, SDLoc DL) {
27470 // Implementation notes:
27471 // 1) LOCK prefix creates a full read/write reordering barrier for memory
27472 // operations issued by the current processor. As such, the location
27473 // referenced is not relevant for the ordering properties of the instruction.
27474 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
27475 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
27476 // 2) Using an immediate operand appears to be the best encoding choice
27477 // here since it doesn't require an extra register.
27478 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
27479 // is small enough it might just be measurement noise.)
27480 // 4) When choosing offsets, there are several contributing factors:
27481 // a) If there's no redzone, we default to TOS. (We could allocate a cache
27482 // line aligned stack object to improve this case.)
27483 // b) To minimize our chances of introducing a false dependence, we prefer
27484 // to offset the stack usage from TOS slightly.
27485 // c) To minimize concerns about cross thread stack usage - in particular,
27486 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
27487 // captures state in the TOS frame and accesses it from many threads -
27488 // we want to use an offset such that the offset is in a distinct cache
27489 // line from the TOS frame.
27491 // For a general discussion of the tradeoffs and benchmark results, see:
27492 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
27494 auto &MF = DAG.getMachineFunction();
27495 auto &TFL = *Subtarget.getFrameLowering();
27496 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
27498 if (Subtarget.is64Bit()) {
27499 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
27501 DAG.getRegister(X86::RSP, MVT::i64), // Base
27502 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
27503 DAG.getRegister(0, MVT::i64), // Index
27504 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
27505 DAG.getRegister(0, MVT::i16), // Segment.
27508 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
27510 return SDValue(Res, 1);
27513 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
27515 DAG.getRegister(X86::ESP, MVT::i32), // Base
27516 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
27517 DAG.getRegister(0, MVT::i32), // Index
27518 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
27519 DAG.getRegister(0, MVT::i16), // Segment.
27523 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
27525 return SDValue(Res, 1);
27528 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
27529 SelectionDAG &DAG) {
27531 AtomicOrdering FenceOrdering =
27532 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
27533 SyncScope::ID FenceSSID =
27534 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
27536 // The only fence that needs an instruction is a sequentially-consistent
27537 // cross-thread fence.
27538 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
27539 FenceSSID == SyncScope::System) {
27540 if (Subtarget.hasMFence())
27541 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
27543 SDValue Chain = Op.getOperand(0);
27544 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
27547 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
27548 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
27551 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
27552 SelectionDAG &DAG) {
27553 MVT T = Op.getSimpleValueType();
27557 switch(T.SimpleTy) {
27558 default: llvm_unreachable("Invalid value type!");
27559 case MVT::i8: Reg = X86::AL; size = 1; break;
27560 case MVT::i16: Reg = X86::AX; size = 2; break;
27561 case MVT::i32: Reg = X86::EAX; size = 4; break;
27563 assert(Subtarget.is64Bit() && "Node not type legal!");
27564 Reg = X86::RAX; size = 8;
27567 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
27568 Op.getOperand(2), SDValue());
27569 SDValue Ops[] = { cpIn.getValue(0),
27572 DAG.getTargetConstant(size, DL, MVT::i8),
27573 cpIn.getValue(1) };
27574 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
27575 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
27576 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
27580 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
27581 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
27582 MVT::i32, cpOut.getValue(2));
27583 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
27585 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
27586 cpOut, Success, EFLAGS.getValue(1));
27589 // Create MOVMSKB, taking into account whether we need to split for AVX1.
27590 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
27591 const X86Subtarget &Subtarget) {
27592 MVT InVT = V.getSimpleValueType();
27594 if (InVT == MVT::v64i8) {
27596 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
27597 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
27598 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
27599 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
27600 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
27601 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
27602 DAG.getConstant(32, DL, MVT::i8));
27603 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
27605 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
27607 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
27608 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
27609 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
27610 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
27611 DAG.getConstant(16, DL, MVT::i8));
27612 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
27615 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
27618 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
27619 SelectionDAG &DAG) {
27620 SDValue Src = Op.getOperand(0);
27621 MVT SrcVT = Src.getSimpleValueType();
27622 MVT DstVT = Op.getSimpleValueType();
27624 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
27625 // half to v32i1 and concatenating the result.
27626 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
27627 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
27628 assert(Subtarget.hasBWI() && "Expected BWI target");
27630 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
27631 DAG.getIntPtrConstant(0, dl));
27632 Lo = DAG.getBitcast(MVT::v32i1, Lo);
27633 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
27634 DAG.getIntPtrConstant(1, dl));
27635 Hi = DAG.getBitcast(MVT::v32i1, Hi);
27636 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
27639 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
27640 if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
27641 DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
27644 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
27645 MVT CastVT = DstVT.getHalfNumVectorElementsVT();
27646 Lo = DAG.getBitcast(CastVT, Lo);
27647 Hi = DAG.getBitcast(CastVT, Hi);
27648 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
27651 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
27652 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
27653 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
27654 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
27656 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
27657 V = getPMOVMSKB(DL, V, DAG, Subtarget);
27658 return DAG.getZExtOrTrunc(V, DL, DstVT);
27661 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
27662 SrcVT == MVT::i64) && "Unexpected VT!");
27664 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27665 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
27666 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
27667 // This conversion needs to be expanded.
27671 if (SrcVT.isVector()) {
27672 // Widen the vector in input in the case of MVT::v2i32.
27673 // Example: from MVT::v2i32 to MVT::v4i32.
27674 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
27675 SrcVT.getVectorNumElements() * 2);
27676 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
27677 DAG.getUNDEF(SrcVT));
27679 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
27680 "Unexpected source type in LowerBITCAST");
27681 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
27684 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
27685 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
27687 if (DstVT == MVT::x86mmx)
27688 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
27690 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
27691 DAG.getIntPtrConstant(0, dl));
27694 /// Compute the horizontal sum of bytes in V for the elements of VT.
27696 /// Requires V to be a byte vector and VT to be an integer vector type with
27697 /// wider elements than V's type. The width of the elements of VT determines
27698 /// how many bytes of V are summed horizontally to produce each element of the
27700 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
27701 const X86Subtarget &Subtarget,
27702 SelectionDAG &DAG) {
27704 MVT ByteVecVT = V.getSimpleValueType();
27705 MVT EltVT = VT.getVectorElementType();
27706 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
27707 "Expected value to have byte element type.");
27708 assert(EltVT != MVT::i8 &&
27709 "Horizontal byte sum only makes sense for wider elements!");
27710 unsigned VecSize = VT.getSizeInBits();
27711 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
27713 // PSADBW instruction horizontally add all bytes and leave the result in i64
27714 // chunks, thus directly computes the pop count for v2i64 and v4i64.
27715 if (EltVT == MVT::i64) {
27716 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
27717 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
27718 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
27719 return DAG.getBitcast(VT, V);
27722 if (EltVT == MVT::i32) {
27723 // We unpack the low half and high half into i32s interleaved with zeros so
27724 // that we can use PSADBW to horizontally sum them. The most useful part of
27725 // this is that it lines up the results of two PSADBW instructions to be
27726 // two v2i64 vectors which concatenated are the 4 population counts. We can
27727 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
27728 SDValue Zeros = DAG.getConstant(0, DL, VT);
27729 SDValue V32 = DAG.getBitcast(VT, V);
27730 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
27731 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
27733 // Do the horizontal sums into two v2i64s.
27734 Zeros = DAG.getConstant(0, DL, ByteVecVT);
27735 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
27736 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
27737 DAG.getBitcast(ByteVecVT, Low), Zeros);
27738 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
27739 DAG.getBitcast(ByteVecVT, High), Zeros);
27741 // Merge them together.
27742 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
27743 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
27744 DAG.getBitcast(ShortVecVT, Low),
27745 DAG.getBitcast(ShortVecVT, High));
27747 return DAG.getBitcast(VT, V);
27750 // The only element type left is i16.
27751 assert(EltVT == MVT::i16 && "Unknown how to handle type");
27753 // To obtain pop count for each i16 element starting from the pop count for
27754 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
27755 // right by 8. It is important to shift as i16s as i8 vector shift isn't
27756 // directly supported.
27757 SDValue ShifterV = DAG.getConstant(8, DL, VT);
27758 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
27759 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
27760 DAG.getBitcast(ByteVecVT, V));
27761 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
27764 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
27765 const X86Subtarget &Subtarget,
27766 SelectionDAG &DAG) {
27767 MVT VT = Op.getSimpleValueType();
27768 MVT EltVT = VT.getVectorElementType();
27769 int NumElts = VT.getVectorNumElements();
27771 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
27773 // Implement a lookup table in register by using an algorithm based on:
27774 // http://wm.ite.pl/articles/sse-popcount.html
27776 // The general idea is that every lower byte nibble in the input vector is an
27777 // index into a in-register pre-computed pop count table. We then split up the
27778 // input vector in two new ones: (1) a vector with only the shifted-right
27779 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
27780 // masked out higher ones) for each byte. PSHUFB is used separately with both
27781 // to index the in-register table. Next, both are added and the result is a
27782 // i8 vector where each element contains the pop count for input byte.
27783 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
27784 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
27785 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
27786 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
27788 SmallVector<SDValue, 64> LUTVec;
27789 for (int i = 0; i < NumElts; ++i)
27790 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
27791 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
27792 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
27795 SDValue FourV = DAG.getConstant(4, DL, VT);
27796 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
27799 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
27801 // The input vector is used as the shuffle mask that index elements into the
27802 // LUT. After counting low and high nibbles, add the vector to obtain the
27803 // final pop count per i8 element.
27804 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
27805 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
27806 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
27809 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
27810 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
27811 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
27812 SelectionDAG &DAG) {
27813 MVT VT = Op.getSimpleValueType();
27814 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
27815 "Unknown CTPOP type to handle");
27816 SDLoc DL(Op.getNode());
27817 SDValue Op0 = Op.getOperand(0);
27819 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
27820 if (Subtarget.hasVPOPCNTDQ()) {
27821 unsigned NumElems = VT.getVectorNumElements();
27822 assert((VT.getVectorElementType() == MVT::i8 ||
27823 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
27824 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
27825 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
27826 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
27827 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
27828 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
27832 // Decompose 256-bit ops into smaller 128-bit ops.
27833 if (VT.is256BitVector() && !Subtarget.hasInt256())
27834 return Lower256IntUnary(Op, DAG);
27836 // Decompose 512-bit ops into smaller 256-bit ops.
27837 if (VT.is512BitVector() && !Subtarget.hasBWI())
27838 return Lower512IntUnary(Op, DAG);
27840 // For element types greater than i8, do vXi8 pop counts and a bytesum.
27841 if (VT.getScalarType() != MVT::i8) {
27842 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
27843 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
27844 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
27845 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
27848 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
27849 if (!Subtarget.hasSSSE3())
27852 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
27855 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
27856 SelectionDAG &DAG) {
27857 assert(Op.getSimpleValueType().isVector() &&
27858 "We only do custom lowering for vector population count.");
27859 return LowerVectorCTPOP(Op, Subtarget, DAG);
27862 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
27863 MVT VT = Op.getSimpleValueType();
27864 SDValue In = Op.getOperand(0);
27867 // For scalars, its still beneficial to transfer to/from the SIMD unit to
27868 // perform the BITREVERSE.
27869 if (!VT.isVector()) {
27870 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
27871 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
27872 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
27873 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
27874 DAG.getIntPtrConstant(0, DL));
27877 int NumElts = VT.getVectorNumElements();
27878 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
27880 // Decompose 256-bit ops into smaller 128-bit ops.
27881 if (VT.is256BitVector())
27882 return Lower256IntUnary(Op, DAG);
27884 assert(VT.is128BitVector() &&
27885 "Only 128-bit vector bitreverse lowering supported.");
27887 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
27888 // perform the BSWAP in the shuffle.
27889 // Its best to shuffle using the second operand as this will implicitly allow
27890 // memory folding for multiple vectors.
27891 SmallVector<SDValue, 16> MaskElts;
27892 for (int i = 0; i != NumElts; ++i) {
27893 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
27894 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
27895 int PermuteByte = SourceByte | (2 << 5);
27896 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
27900 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
27901 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
27902 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
27904 return DAG.getBitcast(VT, Res);
27907 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
27908 SelectionDAG &DAG) {
27909 MVT VT = Op.getSimpleValueType();
27911 if (Subtarget.hasXOP() && !VT.is512BitVector())
27912 return LowerBITREVERSE_XOP(Op, DAG);
27914 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
27916 SDValue In = Op.getOperand(0);
27919 // Split v8i64/v16i32 without BWI so that we can still use the PSHUFB
27921 if (VT == MVT::v8i64 || VT == MVT::v16i32) {
27922 assert(!Subtarget.hasBWI() && "BWI should Expand BITREVERSE");
27923 return Lower512IntUnary(Op, DAG);
27926 unsigned NumElts = VT.getVectorNumElements();
27927 assert(VT.getScalarType() == MVT::i8 &&
27928 "Only byte vector BITREVERSE supported");
27930 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
27931 if (VT.is256BitVector() && !Subtarget.hasInt256())
27932 return Lower256IntUnary(Op, DAG);
27934 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
27935 // two nibbles and a PSHUFB lookup to find the bitreverse of each
27936 // 0-15 value (moved to the other nibble).
27937 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
27938 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
27939 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
27941 const int LoLUT[16] = {
27942 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
27943 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
27944 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
27945 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
27946 const int HiLUT[16] = {
27947 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
27948 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
27949 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
27950 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
27952 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
27953 for (unsigned i = 0; i < NumElts; ++i) {
27954 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
27955 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
27958 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
27959 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
27960 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
27961 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
27962 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27965 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
27966 const X86Subtarget &Subtarget) {
27967 unsigned NewOpc = 0;
27968 switch (N->getOpcode()) {
27969 case ISD::ATOMIC_LOAD_ADD:
27970 NewOpc = X86ISD::LADD;
27972 case ISD::ATOMIC_LOAD_SUB:
27973 NewOpc = X86ISD::LSUB;
27975 case ISD::ATOMIC_LOAD_OR:
27976 NewOpc = X86ISD::LOR;
27978 case ISD::ATOMIC_LOAD_XOR:
27979 NewOpc = X86ISD::LXOR;
27981 case ISD::ATOMIC_LOAD_AND:
27982 NewOpc = X86ISD::LAND;
27985 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
27988 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
27990 return DAG.getMemIntrinsicNode(
27991 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
27992 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
27993 /*MemVT=*/N->getSimpleValueType(0), MMO);
27996 /// Lower atomic_load_ops into LOCK-prefixed operations.
27997 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
27998 const X86Subtarget &Subtarget) {
27999 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
28000 SDValue Chain = N->getOperand(0);
28001 SDValue LHS = N->getOperand(1);
28002 SDValue RHS = N->getOperand(2);
28003 unsigned Opc = N->getOpcode();
28004 MVT VT = N->getSimpleValueType(0);
28007 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
28008 // can only be lowered when the result is unused. They should have already
28009 // been transformed into a cmpxchg loop in AtomicExpand.
28010 if (N->hasAnyUseOfValue(0)) {
28011 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
28012 // select LXADD if LOCK_SUB can't be selected.
28013 if (Opc == ISD::ATOMIC_LOAD_SUB) {
28014 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
28015 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
28016 RHS, AN->getMemOperand());
28018 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
28019 "Used AtomicRMW ops other than Add should have been expanded!");
28023 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
28024 // The core idea here is that since the memory location isn't actually
28025 // changing, all we need is a lowering for the *ordering* impacts of the
28026 // atomicrmw. As such, we can chose a different operation and memory
28027 // location to minimize impact on other code.
28028 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
28029 // On X86, the only ordering which actually requires an instruction is
28030 // seq_cst which isn't SingleThread, everything just needs to be preserved
28031 // during codegen and then dropped. Note that we expect (but don't assume),
28032 // that orderings other than seq_cst and acq_rel have been canonicalized to
28033 // a store or load.
28034 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
28035 AN->getSyncScopeID() == SyncScope::System) {
28036 // Prefer a locked operation against a stack location to minimize cache
28037 // traffic. This assumes that stack locations are very likely to be
28038 // accessed only by the owning thread.
28039 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
28040 assert(!N->hasAnyUseOfValue(0));
28041 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28042 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28043 DAG.getUNDEF(VT), NewChain);
28045 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28046 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
28047 assert(!N->hasAnyUseOfValue(0));
28048 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28049 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28050 DAG.getUNDEF(VT), NewChain);
28053 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
28054 // RAUW the chain, but don't worry about the result, as it's unused.
28055 assert(!N->hasAnyUseOfValue(0));
28056 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28057 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28058 DAG.getUNDEF(VT), LockOp.getValue(1));
28061 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
28062 const X86Subtarget &Subtarget) {
28063 auto *Node = cast<AtomicSDNode>(Op.getNode());
28065 EVT VT = Node->getMemoryVT();
28067 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
28068 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
28070 // If this store is not sequentially consistent and the type is legal
28071 // we can just keep it.
28072 if (!IsSeqCst && IsTypeLegal)
28075 if (VT == MVT::i64 && !IsTypeLegal) {
28076 // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
28077 // FIXME: Use movlps with SSE1.
28078 // FIXME: Use fist with X87.
28079 bool NoImplicitFloatOps =
28080 DAG.getMachineFunction().getFunction().hasFnAttribute(
28081 Attribute::NoImplicitFloat);
28082 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
28083 Subtarget.hasSSE2()) {
28084 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
28085 Node->getOperand(2));
28086 SDVTList Tys = DAG.getVTList(MVT::Other);
28087 SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
28088 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
28090 Node->getMemOperand());
28092 // If this is a sequentially consistent store, also emit an appropriate
28095 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
28101 // Convert seq_cst store -> xchg
28102 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
28103 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
28104 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
28105 Node->getMemoryVT(),
28106 Node->getOperand(0),
28107 Node->getOperand(1), Node->getOperand(2),
28108 Node->getMemOperand());
28109 return Swap.getValue(1);
28112 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
28113 SDNode *N = Op.getNode();
28114 MVT VT = N->getSimpleValueType(0);
28116 // Let legalize expand this if it isn't a legal type yet.
28117 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
28120 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
28123 // Set the carry flag.
28124 SDValue Carry = Op.getOperand(2);
28125 EVT CarryVT = Carry.getValueType();
28126 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
28127 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
28128 Carry, DAG.getConstant(NegOne, DL, CarryVT));
28130 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
28131 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
28132 Op.getOperand(1), Carry.getValue(1));
28134 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
28135 if (N->getValueType(1) == MVT::i1)
28136 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
28138 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
28141 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
28142 SelectionDAG &DAG) {
28143 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
28145 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
28146 // which returns the values as { float, float } (in XMM0) or
28147 // { double, double } (which is returned in XMM0, XMM1).
28149 SDValue Arg = Op.getOperand(0);
28150 EVT ArgVT = Arg.getValueType();
28151 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28153 TargetLowering::ArgListTy Args;
28154 TargetLowering::ArgListEntry Entry;
28158 Entry.IsSExt = false;
28159 Entry.IsZExt = false;
28160 Args.push_back(Entry);
28162 bool isF64 = ArgVT == MVT::f64;
28163 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
28164 // the small struct {f32, f32} is returned in (eax, edx). For f64,
28165 // the results are returned via SRet in memory.
28166 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28167 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
28168 const char *LibcallName = TLI.getLibcallName(LC);
28170 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
28172 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
28173 : (Type *)VectorType::get(ArgTy, 4);
28175 TargetLowering::CallLoweringInfo CLI(DAG);
28176 CLI.setDebugLoc(dl)
28177 .setChain(DAG.getEntryNode())
28178 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
28180 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
28183 // Returned in xmm0 and xmm1.
28184 return CallResult.first;
28186 // Returned in bits 0:31 and 32:64 xmm0.
28187 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28188 CallResult.first, DAG.getIntPtrConstant(0, dl));
28189 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28190 CallResult.first, DAG.getIntPtrConstant(1, dl));
28191 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
28192 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
28195 /// Widen a vector input to a vector of NVT. The
28196 /// input vector must have the same element type as NVT.
28197 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
28198 bool FillWithZeroes = false) {
28199 // Check if InOp already has the right width.
28200 MVT InVT = InOp.getSimpleValueType();
28204 if (InOp.isUndef())
28205 return DAG.getUNDEF(NVT);
28207 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
28208 "input and widen element type must match");
28210 unsigned InNumElts = InVT.getVectorNumElements();
28211 unsigned WidenNumElts = NVT.getVectorNumElements();
28212 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
28213 "Unexpected request for vector widening");
28216 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
28217 InOp.getNumOperands() == 2) {
28218 SDValue N1 = InOp.getOperand(1);
28219 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
28221 InOp = InOp.getOperand(0);
28222 InVT = InOp.getSimpleValueType();
28223 InNumElts = InVT.getVectorNumElements();
28226 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
28227 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
28228 SmallVector<SDValue, 16> Ops;
28229 for (unsigned i = 0; i < InNumElts; ++i)
28230 Ops.push_back(InOp.getOperand(i));
28232 EVT EltVT = InOp.getOperand(0).getValueType();
28234 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
28235 DAG.getUNDEF(EltVT);
28236 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
28237 Ops.push_back(FillVal);
28238 return DAG.getBuildVector(NVT, dl, Ops);
28240 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
28242 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
28243 InOp, DAG.getIntPtrConstant(0, dl));
28246 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
28247 SelectionDAG &DAG) {
28248 assert(Subtarget.hasAVX512() &&
28249 "MGATHER/MSCATTER are supported on AVX-512 arch only");
28251 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
28252 SDValue Src = N->getValue();
28253 MVT VT = Src.getSimpleValueType();
28254 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
28257 SDValue Scale = N->getScale();
28258 SDValue Index = N->getIndex();
28259 SDValue Mask = N->getMask();
28260 SDValue Chain = N->getChain();
28261 SDValue BasePtr = N->getBasePtr();
28263 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
28264 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28265 // If the index is v2i64 and we have VLX we can use xmm for data and index.
28266 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
28267 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28268 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
28269 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
28270 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
28271 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28272 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
28273 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
28274 return SDValue(NewScatter.getNode(), 1);
28279 MVT IndexVT = Index.getSimpleValueType();
28280 MVT MaskVT = Mask.getSimpleValueType();
28282 // If the index is v2i32, we're being called by type legalization and we
28283 // should just let the default handling take care of it.
28284 if (IndexVT == MVT::v2i32)
28287 // If we don't have VLX and neither the passthru or index is 512-bits, we
28288 // need to widen until one is.
28289 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
28290 !Index.getSimpleValueType().is512BitVector()) {
28291 // Determine how much we need to widen by to get a 512-bit type.
28292 unsigned Factor = std::min(512/VT.getSizeInBits(),
28293 512/IndexVT.getSizeInBits());
28294 unsigned NumElts = VT.getVectorNumElements() * Factor;
28296 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28297 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28298 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28300 Src = ExtendToType(Src, VT, DAG);
28301 Index = ExtendToType(Index, IndexVT, DAG);
28302 Mask = ExtendToType(Mask, MaskVT, DAG, true);
28305 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
28306 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28307 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
28308 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
28309 return SDValue(NewScatter.getNode(), 1);
28312 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
28313 SelectionDAG &DAG) {
28315 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
28316 MVT VT = Op.getSimpleValueType();
28317 MVT ScalarVT = VT.getScalarType();
28318 SDValue Mask = N->getMask();
28319 MVT MaskVT = Mask.getSimpleValueType();
28320 SDValue PassThru = N->getPassThru();
28323 // Handle AVX masked loads which don't support passthru other than 0.
28324 if (MaskVT.getVectorElementType() != MVT::i1) {
28325 // We also allow undef in the isel pattern.
28326 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
28329 SDValue NewLoad = DAG.getMaskedLoad(
28330 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28331 getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
28332 N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
28333 N->isExpandingLoad());
28335 SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
28337 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
28340 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
28341 "Expanding masked load is supported on AVX-512 target only!");
28343 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
28344 "Expanding masked load is supported for 32 and 64-bit types only!");
28346 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28347 "Cannot lower masked load op.");
28349 assert((ScalarVT.getSizeInBits() >= 32 ||
28350 (Subtarget.hasBWI() &&
28351 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28352 "Unsupported masked load op.");
28354 // This operation is legal for targets with VLX, but without
28355 // VLX the vector should be widened to 512 bit
28356 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
28357 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
28358 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
28360 // Mask element has to be i1.
28361 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
28362 "Unexpected mask type");
28364 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
28366 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
28367 SDValue NewLoad = DAG.getMaskedLoad(
28368 WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28369 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
28370 N->getExtensionType(), N->isExpandingLoad());
28372 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
28373 NewLoad.getValue(0),
28374 DAG.getIntPtrConstant(0, dl));
28375 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
28376 return DAG.getMergeValues(RetOps, dl);
28379 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
28380 SelectionDAG &DAG) {
28381 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
28382 SDValue DataToStore = N->getValue();
28383 MVT VT = DataToStore.getSimpleValueType();
28384 MVT ScalarVT = VT.getScalarType();
28385 SDValue Mask = N->getMask();
28388 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
28389 "Expanding masked load is supported on AVX-512 target only!");
28391 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
28392 "Expanding masked load is supported for 32 and 64-bit types only!");
28394 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28395 "Cannot lower masked store op.");
28397 assert((ScalarVT.getSizeInBits() >= 32 ||
28398 (Subtarget.hasBWI() &&
28399 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28400 "Unsupported masked store op.");
28402 // This operation is legal for targets with VLX, but without
28403 // VLX the vector should be widened to 512 bit
28404 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
28405 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
28407 // Mask element has to be i1.
28408 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
28409 "Unexpected mask type");
28411 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
28413 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
28414 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
28415 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
28416 N->getOffset(), Mask, N->getMemoryVT(),
28417 N->getMemOperand(), N->getAddressingMode(),
28418 N->isTruncatingStore(), N->isCompressingStore());
28421 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
28422 SelectionDAG &DAG) {
28423 assert(Subtarget.hasAVX2() &&
28424 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
28426 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
28428 MVT VT = Op.getSimpleValueType();
28429 SDValue Index = N->getIndex();
28430 SDValue Mask = N->getMask();
28431 SDValue PassThru = N->getPassThru();
28432 MVT IndexVT = Index.getSimpleValueType();
28433 MVT MaskVT = Mask.getSimpleValueType();
28435 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
28437 // If the index is v2i32, we're being called by type legalization.
28438 if (IndexVT == MVT::v2i32)
28441 // If we don't have VLX and neither the passthru or index is 512-bits, we
28442 // need to widen until one is.
28444 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28445 !IndexVT.is512BitVector()) {
28446 // Determine how much we need to widen by to get a 512-bit type.
28447 unsigned Factor = std::min(512/VT.getSizeInBits(),
28448 512/IndexVT.getSizeInBits());
28450 unsigned NumElts = VT.getVectorNumElements() * Factor;
28452 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28453 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28454 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28456 PassThru = ExtendToType(PassThru, VT, DAG);
28457 Index = ExtendToType(Index, IndexVT, DAG);
28458 Mask = ExtendToType(Mask, MaskVT, DAG, true);
28461 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
28463 SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28464 DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
28465 N->getMemOperand());
28466 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
28467 NewGather, DAG.getIntPtrConstant(0, dl));
28468 return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
28471 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
28473 SDValue Src = Op.getOperand(0);
28474 MVT DstVT = Op.getSimpleValueType();
28476 AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
28477 unsigned SrcAS = N->getSrcAddressSpace();
28479 assert(SrcAS != N->getDestAddressSpace() &&
28480 "addrspacecast must be between different address spaces");
28482 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
28483 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
28484 } else if (DstVT == MVT::i64) {
28485 Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
28486 } else if (DstVT == MVT::i32) {
28487 Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
28489 report_fatal_error("Bad address space in addrspacecast");
28494 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
28495 SelectionDAG &DAG) const {
28496 // TODO: Eventually, the lowering of these nodes should be informed by or
28497 // deferred to the GC strategy for the function in which they appear. For
28498 // now, however, they must be lowered to something. Since they are logically
28499 // no-ops in the case of a null GC strategy (or a GC strategy which does not
28500 // require special handling for these nodes), lower them as literal NOOPs for
28502 SmallVector<SDValue, 2> Ops;
28504 Ops.push_back(Op.getOperand(0));
28505 if (Op->getGluedNode())
28506 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
28509 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
28510 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
28515 SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
28516 RTLIB::Libcall Call) const {
28518 bool IsStrict = Op->isStrictFPOpcode();
28519 unsigned Offset = IsStrict ? 1 : 0;
28520 SmallVector<SDValue, 2> Ops(Op->op_begin() + Offset, Op->op_end());
28523 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
28524 MakeLibCallOptions CallOptions;
28525 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, Call, MVT::f128, Ops,
28526 CallOptions, dl, Chain);
28529 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
28534 /// Provide custom lowering hooks for some operations.
28535 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
28536 switch (Op.getOpcode()) {
28537 default: llvm_unreachable("Should not custom lower this!");
28538 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
28539 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
28540 return LowerCMP_SWAP(Op, Subtarget, DAG);
28541 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
28542 case ISD::ATOMIC_LOAD_ADD:
28543 case ISD::ATOMIC_LOAD_SUB:
28544 case ISD::ATOMIC_LOAD_OR:
28545 case ISD::ATOMIC_LOAD_XOR:
28546 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
28547 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
28548 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
28549 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
28550 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
28551 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
28552 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
28553 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
28554 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
28555 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
28556 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
28557 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
28558 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
28559 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
28560 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
28561 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
28562 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
28563 case ISD::SHL_PARTS:
28564 case ISD::SRA_PARTS:
28565 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
28567 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
28568 case ISD::STRICT_SINT_TO_FP:
28569 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
28570 case ISD::STRICT_UINT_TO_FP:
28571 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
28572 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
28573 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
28574 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
28575 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
28576 case ISD::ZERO_EXTEND_VECTOR_INREG:
28577 case ISD::SIGN_EXTEND_VECTOR_INREG:
28578 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
28579 case ISD::FP_TO_SINT:
28580 case ISD::STRICT_FP_TO_SINT:
28581 case ISD::FP_TO_UINT:
28582 case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
28583 case ISD::FP_EXTEND:
28584 case ISD::STRICT_FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
28585 case ISD::FP_ROUND:
28586 case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG);
28587 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
28588 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
28590 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
28592 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
28593 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
28594 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
28596 case ISD::STRICT_FSETCC:
28597 case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG);
28598 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
28599 case ISD::SELECT: return LowerSELECT(Op, DAG);
28600 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
28601 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
28602 case ISD::VASTART: return LowerVASTART(Op, DAG);
28603 case ISD::VAARG: return LowerVAARG(Op, DAG);
28604 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
28605 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
28606 case ISD::INTRINSIC_VOID:
28607 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
28608 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
28609 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
28610 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
28611 case ISD::FRAME_TO_ARGS_OFFSET:
28612 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
28613 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
28614 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
28615 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
28616 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
28617 case ISD::EH_SJLJ_SETUP_DISPATCH:
28618 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
28619 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
28620 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
28621 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
28623 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
28625 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
28626 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
28628 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
28630 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
28633 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
28639 case ISD::UMULO: return LowerXALUO(Op, DAG);
28640 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
28641 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
28642 case ISD::ADDCARRY:
28643 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
28645 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
28649 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
28653 case ISD::UMIN: return LowerMINMAX(Op, DAG);
28654 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
28655 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
28656 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
28657 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
28658 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
28659 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
28660 case ISD::GC_TRANSITION_START:
28661 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION(Op, DAG);
28662 case ISD::ADDRSPACECAST:
28663 return LowerADDRSPACECAST(Op, DAG);
28667 /// Places new result values for the node in Results (their number
28668 /// and types must exactly match those of the original return values of
28669 /// the node), or leaves Results empty, which indicates that the node is not
28670 /// to be custom lowered after all.
28671 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
28672 SmallVectorImpl<SDValue> &Results,
28673 SelectionDAG &DAG) const {
28674 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
28676 if (!Res.getNode())
28679 // If the original node has one result, take the return value from
28680 // LowerOperation as is. It might not be result number 0.
28681 if (N->getNumValues() == 1) {
28682 Results.push_back(Res);
28686 // If the original node has multiple results, then the return node should
28687 // have the same number of results.
28688 assert((N->getNumValues() == Res->getNumValues()) &&
28689 "Lowering returned the wrong number of results!");
28691 // Places new result values base on N result number.
28692 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
28693 Results.push_back(Res.getValue(I));
28696 /// Replace a node with an illegal result type with a new node built out of
28698 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
28699 SmallVectorImpl<SDValue>&Results,
28700 SelectionDAG &DAG) const {
28702 switch (N->getOpcode()) {
28705 dbgs() << "ReplaceNodeResults: ";
28708 llvm_unreachable("Do not know how to custom type legalize this operation!");
28710 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
28711 // Use a v2i64 if possible.
28712 bool NoImplicitFloatOps =
28713 DAG.getMachineFunction().getFunction().hasFnAttribute(
28714 Attribute::NoImplicitFloat);
28715 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
28717 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
28718 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
28719 // Bit count should fit in 32-bits, extract it as that and then zero
28720 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
28721 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
28722 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
28723 DAG.getIntPtrConstant(0, dl));
28724 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
28725 Results.push_back(Wide);
28730 EVT VT = N->getValueType(0);
28731 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28732 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
28733 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
28734 // elements are needed.
28735 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28736 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
28737 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
28738 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
28739 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
28740 unsigned NumConcats = 16 / VT.getVectorNumElements();
28741 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
28742 ConcatOps[0] = Res;
28743 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
28744 Results.push_back(Res);
28747 case X86ISD::VPMADDWD:
28748 case X86ISD::AVG: {
28749 // Legalize types for X86ISD::AVG/VPMADDWD by widening.
28750 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28752 EVT VT = N->getValueType(0);
28753 EVT InVT = N->getOperand(0).getValueType();
28754 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
28755 "Expected a VT that divides into 128 bits.");
28756 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28757 "Unexpected type action!");
28758 unsigned NumConcat = 128 / InVT.getSizeInBits();
28760 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
28761 InVT.getVectorElementType(),
28762 NumConcat * InVT.getVectorNumElements());
28763 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
28764 VT.getVectorElementType(),
28765 NumConcat * VT.getVectorNumElements());
28767 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
28768 Ops[0] = N->getOperand(0);
28769 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
28770 Ops[0] = N->getOperand(1);
28771 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
28773 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
28774 Results.push_back(Res);
28778 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28779 assert(N->getValueType(0) == MVT::i64 &&
28780 "Unexpected type (!= i64) on ABS.");
28781 MVT HalfT = MVT::i32;
28782 SDValue Lo, Hi, Tmp;
28783 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
28785 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
28786 DAG.getConstant(0, dl, HalfT));
28787 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
28788 DAG.getConstant(1, dl, HalfT));
28790 ISD::SRA, dl, HalfT, Hi,
28791 DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
28792 TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
28793 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
28794 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
28795 SDValue(Lo.getNode(), 1));
28796 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
28797 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
28798 Results.push_back(Lo);
28799 Results.push_back(Hi);
28802 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
28803 case X86ISD::FMINC:
28805 case X86ISD::FMAXC:
28806 case X86ISD::FMAX: {
28807 EVT VT = N->getValueType(0);
28808 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
28809 SDValue UNDEF = DAG.getUNDEF(VT);
28810 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28811 N->getOperand(0), UNDEF);
28812 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28813 N->getOperand(1), UNDEF);
28814 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
28821 EVT VT = N->getValueType(0);
28822 if (VT.isVector()) {
28823 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28824 "Unexpected type action!");
28825 // If this RHS is a constant splat vector we can widen this and let
28826 // division/remainder by constant optimize it.
28827 // TODO: Can we do something for non-splat?
28829 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
28830 unsigned NumConcats = 128 / VT.getSizeInBits();
28831 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
28832 Ops0[0] = N->getOperand(0);
28833 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
28834 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
28835 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
28836 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
28837 Results.push_back(Res);
28845 case ISD::UDIVREM: {
28846 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
28847 Results.push_back(V);
28850 case ISD::TRUNCATE: {
28851 MVT VT = N->getSimpleValueType(0);
28852 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28855 // The generic legalizer will try to widen the input type to the same
28856 // number of elements as the widened result type. But this isn't always
28857 // the best thing so do some custom legalization to avoid some cases.
28858 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
28859 SDValue In = N->getOperand(0);
28860 EVT InVT = In.getValueType();
28862 unsigned InBits = InVT.getSizeInBits();
28863 if (128 % InBits == 0) {
28864 // 128 bit and smaller inputs should avoid truncate all together and
28865 // just use a build_vector that will become a shuffle.
28866 // TODO: Widen and use a shuffle directly?
28867 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
28868 EVT EltVT = VT.getVectorElementType();
28869 unsigned WidenNumElts = WidenVT.getVectorNumElements();
28870 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
28871 // Use the original element count so we don't do more scalar opts than
28873 unsigned MinElts = VT.getVectorNumElements();
28874 for (unsigned i=0; i < MinElts; ++i) {
28875 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
28876 DAG.getIntPtrConstant(i, dl));
28877 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
28879 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
28882 // With AVX512 there are some cases that can use a target specific
28883 // truncate node to go from 256/512 to less than 128 with zeros in the
28884 // upper elements of the 128 bit result.
28885 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
28886 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
28887 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
28888 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28891 // There's one case we can widen to 512 bits and use VTRUNC.
28892 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
28893 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
28894 DAG.getUNDEF(MVT::v4i64));
28895 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28899 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
28900 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
28901 isTypeLegal(MVT::v4i64)) {
28902 // Input needs to be split and output needs to widened. Let's use two
28903 // VTRUNCs, and shuffle their results together into the wider type.
28905 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
28907 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
28908 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
28909 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
28910 { 0, 1, 2, 3, 16, 17, 18, 19,
28911 -1, -1, -1, -1, -1, -1, -1, -1 });
28912 Results.push_back(Res);
28918 case ISD::ANY_EXTEND:
28919 // Right now, only MVT::v8i8 has Custom action for an illegal type.
28920 // It's intended to custom handle the input type.
28921 assert(N->getValueType(0) == MVT::v8i8 &&
28922 "Do not know how to legalize this Node");
28924 case ISD::SIGN_EXTEND:
28925 case ISD::ZERO_EXTEND: {
28926 EVT VT = N->getValueType(0);
28927 SDValue In = N->getOperand(0);
28928 EVT InVT = In.getValueType();
28929 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
28930 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
28931 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
28932 "Unexpected type action!");
28933 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
28934 // Custom split this so we can extend i8/i16->i32 invec. This is better
28935 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
28936 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
28937 // we allow the sra from the extend to i32 to be shared by the split.
28938 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
28940 // Fill a vector with sign bits for each element.
28941 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
28942 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
28944 // Create an unpackl and unpackh to interleave the sign bits then bitcast
28946 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28948 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
28949 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28951 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
28953 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28954 Results.push_back(Res);
28958 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
28959 if (!InVT.is128BitVector()) {
28960 // Not a 128 bit vector, but maybe type legalization will promote
28962 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
28964 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
28965 if (!InVT.is128BitVector())
28968 // Promote the input to 128 bits. Type legalization will turn this into
28969 // zext_inreg/sext_inreg.
28970 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
28973 // Perform custom splitting instead of the two stage extend we would get
28976 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
28977 assert(isTypeLegal(LoVT) && "Split VT not legal?");
28979 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
28981 // We need to shift the input over by half the number of elements.
28982 unsigned NumElts = InVT.getVectorNumElements();
28983 unsigned HalfNumElts = NumElts / 2;
28984 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
28985 for (unsigned i = 0; i != HalfNumElts; ++i)
28986 ShufMask[i] = i + HalfNumElts;
28988 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
28989 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
28991 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28992 Results.push_back(Res);
28996 case ISD::FP_TO_SINT:
28997 case ISD::STRICT_FP_TO_SINT:
28998 case ISD::FP_TO_UINT:
28999 case ISD::STRICT_FP_TO_UINT: {
29000 bool IsStrict = N->isStrictFPOpcode();
29001 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
29002 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
29003 EVT VT = N->getValueType(0);
29004 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29005 EVT SrcVT = Src.getValueType();
29007 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
29008 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29009 "Unexpected type action!");
29011 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
29012 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
29013 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
29014 VT.getVectorNumElements());
29018 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
29019 {N->getOperand(0), Src});
29020 Chain = Res.getValue(1);
29022 Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
29024 // Preserve what we know about the size of the original result. Except
29025 // when the result is v2i32 since we can't widen the assert.
29026 if (PromoteVT != MVT::v2i32)
29027 Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext,
29028 dl, PromoteVT, Res,
29029 DAG.getValueType(VT.getVectorElementType()));
29031 // Truncate back to the original width.
29032 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29034 // Now widen to 128 bits.
29035 unsigned NumConcats = 128 / VT.getSizeInBits();
29036 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
29037 VT.getVectorNumElements() * NumConcats);
29038 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29039 ConcatOps[0] = Res;
29040 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
29041 Results.push_back(Res);
29043 Results.push_back(Chain);
29048 if (VT == MVT::v2i32) {
29049 assert((IsSigned || Subtarget.hasAVX512()) &&
29050 "Can only handle signed conversion without AVX512");
29051 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29052 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29053 "Unexpected type action!");
29054 if (Src.getValueType() == MVT::v2f64) {
29057 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29059 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29061 // If we have VLX we can emit a target specific FP_TO_UINT node,.
29062 if (!IsSigned && !Subtarget.hasVLX()) {
29063 // Otherwise we can defer to the generic legalizer which will widen
29064 // the input as well. This will be further widened during op
29065 // legalization to v8i32<-v8f64.
29066 // For strict nodes we'll need to widen ourselves.
29067 // FIXME: Fix the type legalizer to safely widen strict nodes?
29070 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
29071 DAG.getConstantFP(0.0, dl, MVT::v2f64));
29072 Opc = N->getOpcode();
29077 Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
29078 {N->getOperand(0), Src});
29079 Chain = Res.getValue(1);
29081 Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
29083 Results.push_back(Res);
29085 Results.push_back(Chain);
29089 // Custom widen strict v2f32->v2i32 by padding with zeros.
29090 // FIXME: Should generic type legalizer do this?
29091 if (Src.getValueType() == MVT::v2f32 && IsStrict) {
29092 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
29093 DAG.getConstantFP(0.0, dl, MVT::v2f32));
29094 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
29095 {N->getOperand(0), Src});
29096 Results.push_back(Res);
29097 Results.push_back(Res.getValue(1));
29101 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
29102 // so early out here.
29106 assert(!VT.isVector() && "Vectors should have been handled above!");
29108 if (Subtarget.hasDQI() && VT == MVT::i64 &&
29109 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
29110 assert(!Subtarget.is64Bit() && "i64 should be legal");
29111 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
29112 // If we use a 128-bit result we might need to use a target specific node.
29114 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
29115 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
29116 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
29117 unsigned Opc = N->getOpcode();
29118 if (NumElts != SrcElts) {
29120 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29122 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29125 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
29126 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
29127 DAG.getConstantFP(0.0, dl, VecInVT), Src,
29131 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
29132 Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
29133 Chain = Res.getValue(1);
29135 Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
29136 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
29137 Results.push_back(Res);
29139 Results.push_back(Chain);
29144 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
29145 Results.push_back(V);
29147 Results.push_back(Chain);
29151 case ISD::SINT_TO_FP:
29152 case ISD::STRICT_SINT_TO_FP:
29153 case ISD::UINT_TO_FP:
29154 case ISD::STRICT_UINT_TO_FP: {
29155 bool IsStrict = N->isStrictFPOpcode();
29156 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
29157 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
29158 EVT VT = N->getValueType(0);
29159 if (VT != MVT::v2f32)
29161 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29162 EVT SrcVT = Src.getValueType();
29163 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
29165 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
29166 : X86ISD::STRICT_CVTUI2P;
29167 SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
29168 {N->getOperand(0), Src});
29169 Results.push_back(Res);
29170 Results.push_back(Res.getValue(1));
29172 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
29173 Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
29177 if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
29178 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
29179 SDValue Zero = DAG.getConstant(0, dl, SrcVT);
29180 SDValue One = DAG.getConstant(1, dl, SrcVT);
29181 SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
29182 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
29183 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
29184 SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
29185 SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
29186 SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
29187 for (int i = 0; i != 2; ++i) {
29188 SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
29189 SignSrc, DAG.getIntPtrConstant(i, dl));
29192 DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
29193 {N->getOperand(0), Src});
29195 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Src);
29197 SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
29198 SDValue Slow, Chain;
29200 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29201 SignCvts[0].getValue(1), SignCvts[1].getValue(1));
29202 Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
29203 {Chain, SignCvt, SignCvt});
29204 Chain = Slow.getValue(1);
29206 Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
29208 IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
29210 DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
29211 SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
29212 Results.push_back(Cvt);
29214 Results.push_back(Chain);
29218 if (SrcVT != MVT::v2i32)
29221 if (IsSigned || Subtarget.hasAVX512()) {
29225 // Custom widen strict v2i32->v2f32 to avoid scalarization.
29226 // FIXME: Should generic type legalizer do this?
29227 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
29228 DAG.getConstant(0, dl, MVT::v2i32));
29229 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
29230 {N->getOperand(0), Src});
29231 Results.push_back(Res);
29232 Results.push_back(Res.getValue(1));
29236 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29237 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
29239 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
29240 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
29241 DAG.getBitcast(MVT::v2i64, VBias));
29242 Or = DAG.getBitcast(MVT::v2f64, Or);
29244 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
29245 {N->getOperand(0), Or, VBias});
29246 SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
29247 {MVT::v4f32, MVT::Other},
29248 {Sub.getValue(1), Sub});
29249 Results.push_back(Res);
29250 Results.push_back(Res.getValue(1));
29252 // TODO: Are there any fast-math-flags to propagate here?
29253 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
29254 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
29258 case ISD::STRICT_FP_ROUND:
29259 case ISD::FP_ROUND: {
29260 bool IsStrict = N->isStrictFPOpcode();
29261 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29262 if (!isTypeLegal(Src.getValueType()))
29266 V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {MVT::v4f32, MVT::Other},
29267 {N->getOperand(0), N->getOperand(1)});
29269 V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
29270 Results.push_back(V);
29272 Results.push_back(V.getValue(1));
29275 case ISD::FP_EXTEND: {
29276 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
29277 // No other ValueType for FP_EXTEND should reach this point.
29278 assert(N->getValueType(0) == MVT::v2f32 &&
29279 "Do not know how to legalize this Node");
29282 case ISD::INTRINSIC_W_CHAIN: {
29283 unsigned IntNo = N->getConstantOperandVal(1);
29285 default : llvm_unreachable("Do not know how to custom type "
29286 "legalize this intrinsic operation!");
29287 case Intrinsic::x86_rdtsc:
29288 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
29290 case Intrinsic::x86_rdtscp:
29291 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
29293 case Intrinsic::x86_rdpmc:
29294 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
29297 case Intrinsic::x86_xgetbv:
29298 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
29303 case ISD::READCYCLECOUNTER: {
29304 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
29306 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
29307 EVT T = N->getValueType(0);
29308 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
29309 bool Regs64bit = T == MVT::i128;
29310 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
29311 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
29312 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
29313 SDValue cpInL, cpInH;
29314 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
29315 DAG.getConstant(0, dl, HalfT));
29316 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
29317 DAG.getConstant(1, dl, HalfT));
29318 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
29319 Regs64bit ? X86::RAX : X86::EAX,
29321 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
29322 Regs64bit ? X86::RDX : X86::EDX,
29323 cpInH, cpInL.getValue(1));
29324 SDValue swapInL, swapInH;
29325 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
29326 DAG.getConstant(0, dl, HalfT));
29327 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
29328 DAG.getConstant(1, dl, HalfT));
29330 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
29331 swapInH, cpInH.getValue(1));
29332 // If the current function needs the base pointer, RBX,
29333 // we shouldn't use cmpxchg directly.
29334 // Indeed the lowering of that instruction will clobber
29335 // that register and since RBX will be a reserved register
29336 // the register allocator will not make sure its value will
29337 // be properly saved and restored around this live-range.
29338 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
29340 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
29341 Register BasePtr = TRI->getBaseRegister();
29342 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
29343 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
29344 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
29345 // ISel prefers the LCMPXCHG64 variant.
29346 // If that assert breaks, that means it is not the case anymore,
29347 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
29348 // not just EBX. This is a matter of accepting i64 input for that
29349 // pseudo, and restoring into the register of the right wide
29350 // in expand pseudo. Everything else should just work.
29351 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
29352 "Saving only half of the RBX");
29353 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
29354 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
29355 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
29356 Regs64bit ? X86::RBX : X86::EBX,
29357 HalfT, swapInH.getValue(1));
29358 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
29360 /*Glue*/ RBXSave.getValue(2)};
29361 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
29364 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
29365 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
29366 Regs64bit ? X86::RBX : X86::EBX, swapInL,
29367 swapInH.getValue(1));
29368 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
29369 swapInL.getValue(1)};
29370 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
29372 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
29373 Regs64bit ? X86::RAX : X86::EAX,
29374 HalfT, Result.getValue(1));
29375 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
29376 Regs64bit ? X86::RDX : X86::EDX,
29377 HalfT, cpOutL.getValue(2));
29378 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
29380 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
29381 MVT::i32, cpOutH.getValue(2));
29382 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
29383 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
29385 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
29386 Results.push_back(Success);
29387 Results.push_back(EFLAGS.getValue(1));
29390 case ISD::ATOMIC_LOAD: {
29391 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
29392 bool NoImplicitFloatOps =
29393 DAG.getMachineFunction().getFunction().hasFnAttribute(
29394 Attribute::NoImplicitFloat);
29395 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
29396 auto *Node = cast<AtomicSDNode>(N);
29397 if (Subtarget.hasSSE2()) {
29398 // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
29400 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
29401 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
29402 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
29403 MVT::i64, Node->getMemOperand());
29404 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
29405 DAG.getIntPtrConstant(0, dl));
29406 Results.push_back(Res);
29407 Results.push_back(Ld.getValue(1));
29410 if (Subtarget.hasX87()) {
29411 // First load this into an 80-bit X87 register. This will put the whole
29412 // integer into the significand.
29413 // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
29414 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
29415 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
29416 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
29417 dl, Tys, Ops, MVT::i64,
29418 Node->getMemOperand());
29419 SDValue Chain = Result.getValue(1);
29420 SDValue InFlag = Result.getValue(2);
29422 // Now store the X87 register to a stack temporary and convert to i64.
29423 // This store is not atomic and doesn't need to be.
29424 // FIXME: We don't need a stack temporary if the result of the load
29425 // is already being stored. We could just directly store there.
29426 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
29427 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
29428 MachinePointerInfo MPI =
29429 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
29430 SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
29431 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
29432 DAG.getVTList(MVT::Other), StoreOps,
29433 MVT::i64, MPI, 0 /*Align*/,
29434 MachineMemOperand::MOStore);
29436 // Finally load the value back from the stack temporary and return it.
29437 // This load is not atomic and doesn't need to be.
29438 // This load will be further type legalized.
29439 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
29440 Results.push_back(Result);
29441 Results.push_back(Result.getValue(1));
29445 // TODO: Use MOVLPS when SSE1 is available?
29446 // Delegate to generic TypeLegalization. Situations we can really handle
29447 // should have already been dealt with by AtomicExpandPass.cpp.
29450 case ISD::ATOMIC_SWAP:
29451 case ISD::ATOMIC_LOAD_ADD:
29452 case ISD::ATOMIC_LOAD_SUB:
29453 case ISD::ATOMIC_LOAD_AND:
29454 case ISD::ATOMIC_LOAD_OR:
29455 case ISD::ATOMIC_LOAD_XOR:
29456 case ISD::ATOMIC_LOAD_NAND:
29457 case ISD::ATOMIC_LOAD_MIN:
29458 case ISD::ATOMIC_LOAD_MAX:
29459 case ISD::ATOMIC_LOAD_UMIN:
29460 case ISD::ATOMIC_LOAD_UMAX:
29461 // Delegate to generic TypeLegalization. Situations we can really handle
29462 // should have already been dealt with by AtomicExpandPass.cpp.
29465 case ISD::BITCAST: {
29466 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29467 EVT DstVT = N->getValueType(0);
29468 EVT SrcVT = N->getOperand(0).getValueType();
29470 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
29471 // we can split using the k-register rather than memory.
29472 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
29473 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
29475 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29476 Lo = DAG.getBitcast(MVT::i32, Lo);
29477 Hi = DAG.getBitcast(MVT::i32, Hi);
29478 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
29479 Results.push_back(Res);
29483 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
29484 if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
29485 SrcVT.isVector() && isTypeLegal(SrcVT)) {
29487 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29488 MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
29489 Lo = DAG.getBitcast(CastVT, Lo);
29490 Hi = DAG.getBitcast(CastVT, Hi);
29491 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
29492 Results.push_back(Res);
29496 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
29497 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
29498 "Unexpected type action!");
29499 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
29500 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, WideVT, N->getOperand(0));
29501 Results.push_back(Res);
29507 case ISD::MGATHER: {
29508 EVT VT = N->getValueType(0);
29509 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
29510 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
29511 auto *Gather = cast<MaskedGatherSDNode>(N);
29512 SDValue Index = Gather->getIndex();
29513 if (Index.getValueType() != MVT::v2i64)
29515 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29516 "Unexpected type action!");
29517 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
29518 SDValue Mask = Gather->getMask();
29519 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
29520 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
29521 Gather->getPassThru(),
29523 if (!Subtarget.hasVLX()) {
29524 // We need to widen the mask, but the instruction will only use 2
29525 // of its elements. So we can use undef.
29526 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
29527 DAG.getUNDEF(MVT::v2i1));
29528 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
29530 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
29531 Gather->getBasePtr(), Index, Gather->getScale() };
29532 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
29533 DAG.getVTList(WideVT, Mask.getValueType(), MVT::Other), Ops, dl,
29534 Gather->getMemoryVT(), Gather->getMemOperand());
29535 Results.push_back(Res);
29536 Results.push_back(Res.getValue(2));
29542 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
29543 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
29544 // cast since type legalization will try to use an i64 load.
29545 MVT VT = N->getSimpleValueType(0);
29546 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
29547 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29548 "Unexpected type action!");
29549 if (!ISD::isNON_EXTLoad(N))
29551 auto *Ld = cast<LoadSDNode>(N);
29552 if (Subtarget.hasSSE2()) {
29553 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
29554 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
29555 Ld->getPointerInfo(), Ld->getAlignment(),
29556 Ld->getMemOperand()->getFlags());
29557 SDValue Chain = Res.getValue(1);
29558 MVT VecVT = MVT::getVectorVT(LdVT, 2);
29559 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
29560 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
29561 Res = DAG.getBitcast(WideVT, Res);
29562 Results.push_back(Res);
29563 Results.push_back(Chain);
29566 assert(Subtarget.hasSSE1() && "Expected SSE");
29567 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
29568 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
29569 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
29570 MVT::i64, Ld->getMemOperand());
29571 Results.push_back(Res);
29572 Results.push_back(Res.getValue(1));
29575 case ISD::ADDRSPACECAST: {
29576 SDValue Src = N->getOperand(0);
29577 EVT DstVT = N->getValueType(0);
29578 AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
29579 unsigned SrcAS = CastN->getSrcAddressSpace();
29581 assert(SrcAS != CastN->getDestAddressSpace() &&
29582 "addrspacecast must be between different address spaces");
29585 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64)
29586 Res = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
29587 else if (DstVT == MVT::i64)
29588 Res = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
29589 else if (DstVT == MVT::i32)
29590 Res = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
29592 report_fatal_error("Unrecognized addrspacecast type legalization");
29594 Results.push_back(Res);
29600 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
29601 switch ((X86ISD::NodeType)Opcode) {
29602 case X86ISD::FIRST_NUMBER: break;
29603 case X86ISD::BSF: return "X86ISD::BSF";
29604 case X86ISD::BSR: return "X86ISD::BSR";
29605 case X86ISD::SHLD: return "X86ISD::SHLD";
29606 case X86ISD::SHRD: return "X86ISD::SHRD";
29607 case X86ISD::FAND: return "X86ISD::FAND";
29608 case X86ISD::FANDN: return "X86ISD::FANDN";
29609 case X86ISD::FOR: return "X86ISD::FOR";
29610 case X86ISD::FXOR: return "X86ISD::FXOR";
29611 case X86ISD::FILD: return "X86ISD::FILD";
29612 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
29613 case X86ISD::FIST: return "X86ISD::FIST";
29614 case X86ISD::FP_TO_INT_IN_MEM: return "X86ISD::FP_TO_INT_IN_MEM";
29615 case X86ISD::FLD: return "X86ISD::FLD";
29616 case X86ISD::FST: return "X86ISD::FST";
29617 case X86ISD::CALL: return "X86ISD::CALL";
29618 case X86ISD::BT: return "X86ISD::BT";
29619 case X86ISD::CMP: return "X86ISD::CMP";
29620 case X86ISD::STRICT_FCMP: return "X86ISD::STRICT_FCMP";
29621 case X86ISD::STRICT_FCMPS: return "X86ISD::STRICT_FCMPS";
29622 case X86ISD::COMI: return "X86ISD::COMI";
29623 case X86ISD::UCOMI: return "X86ISD::UCOMI";
29624 case X86ISD::CMPM: return "X86ISD::CMPM";
29625 case X86ISD::STRICT_CMPM: return "X86ISD::STRICT_CMPM";
29626 case X86ISD::CMPM_SAE: return "X86ISD::CMPM_SAE";
29627 case X86ISD::SETCC: return "X86ISD::SETCC";
29628 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
29629 case X86ISD::FSETCC: return "X86ISD::FSETCC";
29630 case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
29631 case X86ISD::FSETCCM_SAE: return "X86ISD::FSETCCM_SAE";
29632 case X86ISD::CMOV: return "X86ISD::CMOV";
29633 case X86ISD::BRCOND: return "X86ISD::BRCOND";
29634 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
29635 case X86ISD::IRET: return "X86ISD::IRET";
29636 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
29637 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
29638 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
29639 case X86ISD::Wrapper: return "X86ISD::Wrapper";
29640 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
29641 case X86ISD::MOVQ2DQ: return "X86ISD::MOVQ2DQ";
29642 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
29643 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
29644 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
29645 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
29646 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
29647 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
29648 case X86ISD::PINSRB: return "X86ISD::PINSRB";
29649 case X86ISD::PINSRW: return "X86ISD::PINSRW";
29650 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
29651 case X86ISD::ANDNP: return "X86ISD::ANDNP";
29652 case X86ISD::BLENDI: return "X86ISD::BLENDI";
29653 case X86ISD::BLENDV: return "X86ISD::BLENDV";
29654 case X86ISD::HADD: return "X86ISD::HADD";
29655 case X86ISD::HSUB: return "X86ISD::HSUB";
29656 case X86ISD::FHADD: return "X86ISD::FHADD";
29657 case X86ISD::FHSUB: return "X86ISD::FHSUB";
29658 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
29659 case X86ISD::FMAX: return "X86ISD::FMAX";
29660 case X86ISD::FMAXS: return "X86ISD::FMAXS";
29661 case X86ISD::FMAX_SAE: return "X86ISD::FMAX_SAE";
29662 case X86ISD::FMAXS_SAE: return "X86ISD::FMAXS_SAE";
29663 case X86ISD::FMIN: return "X86ISD::FMIN";
29664 case X86ISD::FMINS: return "X86ISD::FMINS";
29665 case X86ISD::FMIN_SAE: return "X86ISD::FMIN_SAE";
29666 case X86ISD::FMINS_SAE: return "X86ISD::FMINS_SAE";
29667 case X86ISD::FMAXC: return "X86ISD::FMAXC";
29668 case X86ISD::FMINC: return "X86ISD::FMINC";
29669 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
29670 case X86ISD::FRCP: return "X86ISD::FRCP";
29671 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
29672 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
29673 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
29674 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
29675 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
29676 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
29677 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
29678 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
29679 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
29680 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
29681 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
29682 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
29683 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
29684 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
29685 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
29686 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
29687 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
29688 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
29689 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
29690 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
29691 case X86ISD::LADD: return "X86ISD::LADD";
29692 case X86ISD::LSUB: return "X86ISD::LSUB";
29693 case X86ISD::LOR: return "X86ISD::LOR";
29694 case X86ISD::LXOR: return "X86ISD::LXOR";
29695 case X86ISD::LAND: return "X86ISD::LAND";
29696 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
29697 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
29698 case X86ISD::VEXTRACT_STORE: return "X86ISD::VEXTRACT_STORE";
29699 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
29700 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
29701 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
29702 case X86ISD::VMTRUNC: return "X86ISD::VMTRUNC";
29703 case X86ISD::VMTRUNCS: return "X86ISD::VMTRUNCS";
29704 case X86ISD::VMTRUNCUS: return "X86ISD::VMTRUNCUS";
29705 case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
29706 case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
29707 case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
29708 case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
29709 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
29710 case X86ISD::STRICT_VFPEXT: return "X86ISD::STRICT_VFPEXT";
29711 case X86ISD::VFPEXT_SAE: return "X86ISD::VFPEXT_SAE";
29712 case X86ISD::VFPEXTS: return "X86ISD::VFPEXTS";
29713 case X86ISD::VFPEXTS_SAE: return "X86ISD::VFPEXTS_SAE";
29714 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
29715 case X86ISD::STRICT_VFPROUND: return "X86ISD::STRICT_VFPROUND";
29716 case X86ISD::VMFPROUND: return "X86ISD::VMFPROUND";
29717 case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
29718 case X86ISD::VFPROUNDS: return "X86ISD::VFPROUNDS";
29719 case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
29720 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
29721 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
29722 case X86ISD::VSHL: return "X86ISD::VSHL";
29723 case X86ISD::VSRL: return "X86ISD::VSRL";
29724 case X86ISD::VSRA: return "X86ISD::VSRA";
29725 case X86ISD::VSHLI: return "X86ISD::VSHLI";
29726 case X86ISD::VSRLI: return "X86ISD::VSRLI";
29727 case X86ISD::VSRAI: return "X86ISD::VSRAI";
29728 case X86ISD::VSHLV: return "X86ISD::VSHLV";
29729 case X86ISD::VSRLV: return "X86ISD::VSRLV";
29730 case X86ISD::VSRAV: return "X86ISD::VSRAV";
29731 case X86ISD::VROTLI: return "X86ISD::VROTLI";
29732 case X86ISD::VROTRI: return "X86ISD::VROTRI";
29733 case X86ISD::VPPERM: return "X86ISD::VPPERM";
29734 case X86ISD::CMPP: return "X86ISD::CMPP";
29735 case X86ISD::STRICT_CMPP: return "X86ISD::STRICT_CMPP";
29736 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
29737 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
29738 case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
29739 case X86ISD::ADD: return "X86ISD::ADD";
29740 case X86ISD::SUB: return "X86ISD::SUB";
29741 case X86ISD::ADC: return "X86ISD::ADC";
29742 case X86ISD::SBB: return "X86ISD::SBB";
29743 case X86ISD::SMUL: return "X86ISD::SMUL";
29744 case X86ISD::UMUL: return "X86ISD::UMUL";
29745 case X86ISD::OR: return "X86ISD::OR";
29746 case X86ISD::XOR: return "X86ISD::XOR";
29747 case X86ISD::AND: return "X86ISD::AND";
29748 case X86ISD::BEXTR: return "X86ISD::BEXTR";
29749 case X86ISD::BZHI: return "X86ISD::BZHI";
29750 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
29751 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
29752 case X86ISD::PTEST: return "X86ISD::PTEST";
29753 case X86ISD::TESTP: return "X86ISD::TESTP";
29754 case X86ISD::KORTEST: return "X86ISD::KORTEST";
29755 case X86ISD::KTEST: return "X86ISD::KTEST";
29756 case X86ISD::KADD: return "X86ISD::KADD";
29757 case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
29758 case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
29759 case X86ISD::PACKSS: return "X86ISD::PACKSS";
29760 case X86ISD::PACKUS: return "X86ISD::PACKUS";
29761 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
29762 case X86ISD::VALIGN: return "X86ISD::VALIGN";
29763 case X86ISD::VSHLD: return "X86ISD::VSHLD";
29764 case X86ISD::VSHRD: return "X86ISD::VSHRD";
29765 case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
29766 case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
29767 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
29768 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
29769 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
29770 case X86ISD::SHUFP: return "X86ISD::SHUFP";
29771 case X86ISD::SHUF128: return "X86ISD::SHUF128";
29772 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
29773 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
29774 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
29775 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
29776 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
29777 case X86ISD::MOVSD: return "X86ISD::MOVSD";
29778 case X86ISD::MOVSS: return "X86ISD::MOVSS";
29779 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
29780 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
29781 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
29782 case X86ISD::VBROADCAST_LOAD: return "X86ISD::VBROADCAST_LOAD";
29783 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
29784 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
29785 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
29786 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
29787 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
29788 case X86ISD::VPERMV: return "X86ISD::VPERMV";
29789 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
29790 case X86ISD::VPERMI: return "X86ISD::VPERMI";
29791 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
29792 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
29793 case X86ISD::VFIXUPIMM_SAE: return "X86ISD::VFIXUPIMM_SAE";
29794 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
29795 case X86ISD::VFIXUPIMMS_SAE: return "X86ISD::VFIXUPIMMS_SAE";
29796 case X86ISD::VRANGE: return "X86ISD::VRANGE";
29797 case X86ISD::VRANGE_SAE: return "X86ISD::VRANGE_SAE";
29798 case X86ISD::VRANGES: return "X86ISD::VRANGES";
29799 case X86ISD::VRANGES_SAE: return "X86ISD::VRANGES_SAE";
29800 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
29801 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
29802 case X86ISD::PSADBW: return "X86ISD::PSADBW";
29803 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
29804 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
29805 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
29806 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
29807 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
29808 case X86ISD::MFENCE: return "X86ISD::MFENCE";
29809 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
29810 case X86ISD::SAHF: return "X86ISD::SAHF";
29811 case X86ISD::RDRAND: return "X86ISD::RDRAND";
29812 case X86ISD::RDSEED: return "X86ISD::RDSEED";
29813 case X86ISD::RDPKRU: return "X86ISD::RDPKRU";
29814 case X86ISD::WRPKRU: return "X86ISD::WRPKRU";
29815 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
29816 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
29817 case X86ISD::VPSHA: return "X86ISD::VPSHA";
29818 case X86ISD::VPSHL: return "X86ISD::VPSHL";
29819 case X86ISD::VPCOM: return "X86ISD::VPCOM";
29820 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
29821 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
29822 case X86ISD::FMSUB: return "X86ISD::FMSUB";
29823 case X86ISD::FNMADD: return "X86ISD::FNMADD";
29824 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
29825 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
29826 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
29827 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
29828 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
29829 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
29830 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
29831 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
29832 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
29833 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
29834 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
29835 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
29836 case X86ISD::STRICT_VRNDSCALE: return "X86ISD::STRICT_VRNDSCALE";
29837 case X86ISD::VRNDSCALE_SAE: return "X86ISD::VRNDSCALE_SAE";
29838 case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
29839 case X86ISD::VRNDSCALES_SAE: return "X86ISD::VRNDSCALES_SAE";
29840 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
29841 case X86ISD::VREDUCE_SAE: return "X86ISD::VREDUCE_SAE";
29842 case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
29843 case X86ISD::VREDUCES_SAE: return "X86ISD::VREDUCES_SAE";
29844 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
29845 case X86ISD::VGETMANT_SAE: return "X86ISD::VGETMANT_SAE";
29846 case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
29847 case X86ISD::VGETMANTS_SAE: return "X86ISD::VGETMANTS_SAE";
29848 case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
29849 case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
29850 case X86ISD::XTEST: return "X86ISD::XTEST";
29851 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
29852 case X86ISD::EXPAND: return "X86ISD::EXPAND";
29853 case X86ISD::SELECTS: return "X86ISD::SELECTS";
29854 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
29855 case X86ISD::RCP14: return "X86ISD::RCP14";
29856 case X86ISD::RCP14S: return "X86ISD::RCP14S";
29857 case X86ISD::RCP28: return "X86ISD::RCP28";
29858 case X86ISD::RCP28_SAE: return "X86ISD::RCP28_SAE";
29859 case X86ISD::RCP28S: return "X86ISD::RCP28S";
29860 case X86ISD::RCP28S_SAE: return "X86ISD::RCP28S_SAE";
29861 case X86ISD::EXP2: return "X86ISD::EXP2";
29862 case X86ISD::EXP2_SAE: return "X86ISD::EXP2_SAE";
29863 case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
29864 case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
29865 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
29866 case X86ISD::RSQRT28_SAE: return "X86ISD::RSQRT28_SAE";
29867 case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
29868 case X86ISD::RSQRT28S_SAE: return "X86ISD::RSQRT28S_SAE";
29869 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
29870 case X86ISD::FADDS: return "X86ISD::FADDS";
29871 case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
29872 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
29873 case X86ISD::FSUBS: return "X86ISD::FSUBS";
29874 case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
29875 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
29876 case X86ISD::FMULS: return "X86ISD::FMULS";
29877 case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
29878 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
29879 case X86ISD::FDIVS: return "X86ISD::FDIVS";
29880 case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
29881 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
29882 case X86ISD::FSQRTS: return "X86ISD::FSQRTS";
29883 case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
29884 case X86ISD::FGETEXP: return "X86ISD::FGETEXP";
29885 case X86ISD::FGETEXP_SAE: return "X86ISD::FGETEXP_SAE";
29886 case X86ISD::FGETEXPS: return "X86ISD::FGETEXPS";
29887 case X86ISD::FGETEXPS_SAE: return "X86ISD::FGETEXPS_SAE";
29888 case X86ISD::SCALEF: return "X86ISD::SCALEF";
29889 case X86ISD::SCALEF_RND: return "X86ISD::SCALEF_RND";
29890 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
29891 case X86ISD::SCALEFS_RND: return "X86ISD::SCALEFS_RND";
29892 case X86ISD::AVG: return "X86ISD::AVG";
29893 case X86ISD::MULHRS: return "X86ISD::MULHRS";
29894 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
29895 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
29896 case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
29897 case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
29898 case X86ISD::STRICT_CVTTP2SI: return "X86ISD::STRICT_CVTTP2SI";
29899 case X86ISD::STRICT_CVTTP2UI: return "X86ISD::STRICT_CVTTP2UI";
29900 case X86ISD::MCVTTP2SI: return "X86ISD::MCVTTP2SI";
29901 case X86ISD::MCVTTP2UI: return "X86ISD::MCVTTP2UI";
29902 case X86ISD::CVTTP2SI_SAE: return "X86ISD::CVTTP2SI_SAE";
29903 case X86ISD::CVTTP2UI_SAE: return "X86ISD::CVTTP2UI_SAE";
29904 case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI";
29905 case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI";
29906 case X86ISD::CVTTS2SI_SAE: return "X86ISD::CVTTS2SI_SAE";
29907 case X86ISD::CVTTS2UI_SAE: return "X86ISD::CVTTS2UI_SAE";
29908 case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
29909 case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
29910 case X86ISD::STRICT_CVTSI2P: return "X86ISD::STRICT_CVTSI2P";
29911 case X86ISD::STRICT_CVTUI2P: return "X86ISD::STRICT_CVTUI2P";
29912 case X86ISD::MCVTSI2P: return "X86ISD::MCVTSI2P";
29913 case X86ISD::MCVTUI2P: return "X86ISD::MCVTUI2P";
29914 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
29915 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
29916 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
29917 case X86ISD::SCALAR_SINT_TO_FP: return "X86ISD::SCALAR_SINT_TO_FP";
29918 case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
29919 case X86ISD::SCALAR_UINT_TO_FP: return "X86ISD::SCALAR_UINT_TO_FP";
29920 case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
29921 case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
29922 case X86ISD::MCVTPS2PH: return "X86ISD::MCVTPS2PH";
29923 case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
29924 case X86ISD::CVTPH2PS_SAE: return "X86ISD::CVTPH2PS_SAE";
29925 case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
29926 case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
29927 case X86ISD::MCVTP2SI: return "X86ISD::MCVTP2SI";
29928 case X86ISD::MCVTP2UI: return "X86ISD::MCVTP2UI";
29929 case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
29930 case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
29931 case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI";
29932 case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI";
29933 case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
29934 case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
29935 case X86ISD::CVTNE2PS2BF16: return "X86ISD::CVTNE2PS2BF16";
29936 case X86ISD::CVTNEPS2BF16: return "X86ISD::CVTNEPS2BF16";
29937 case X86ISD::MCVTNEPS2BF16: return "X86ISD::MCVTNEPS2BF16";
29938 case X86ISD::DPBF16PS: return "X86ISD::DPBF16PS";
29939 case X86ISD::LWPINS: return "X86ISD::LWPINS";
29940 case X86ISD::MGATHER: return "X86ISD::MGATHER";
29941 case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
29942 case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
29943 case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
29944 case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
29945 case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
29946 case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
29947 case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
29948 case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
29949 case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
29950 case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
29951 case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
29952 case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
29953 case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
29954 case X86ISD::ENQCMD: return "X86ISD:ENQCMD";
29955 case X86ISD::ENQCMDS: return "X86ISD:ENQCMDS";
29956 case X86ISD::VP2INTERSECT: return "X86ISD::VP2INTERSECT";
29961 /// Return true if the addressing mode represented by AM is legal for this
29962 /// target, for a load/store of the specified type.
29963 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
29964 const AddrMode &AM, Type *Ty,
29966 Instruction *I) const {
29967 // X86 supports extremely general addressing modes.
29968 CodeModel::Model M = getTargetMachine().getCodeModel();
29970 // X86 allows a sign-extended 32-bit immediate field as a displacement.
29971 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
29975 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
29977 // If a reference to this global requires an extra load, we can't fold it.
29978 if (isGlobalStubReference(GVFlags))
29981 // If BaseGV requires a register for the PIC base, we cannot also have a
29982 // BaseReg specified.
29983 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
29986 // If lower 4G is not available, then we must use rip-relative addressing.
29987 if ((M != CodeModel::Small || isPositionIndependent()) &&
29988 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
29992 switch (AM.Scale) {
29998 // These scales always work.
30003 // These scales are formed with basereg+scalereg. Only accept if there is
30008 default: // Other stuff never works.
30015 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
30016 unsigned Bits = Ty->getScalarSizeInBits();
30018 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
30019 // particularly cheaper than those without.
30023 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
30024 if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
30025 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
30028 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
30029 // shifts just as cheap as scalar ones.
30030 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
30033 // AVX512BW has shifts such as vpsllvw.
30034 if (Subtarget.hasBWI() && Bits == 16)
30037 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
30038 // fully general vector.
30042 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
30044 // These are non-commutative binops.
30045 // TODO: Add more X86ISD opcodes once we have test coverage.
30046 case X86ISD::ANDNP:
30047 case X86ISD::PCMPGT:
30050 case X86ISD::FANDN:
30054 return TargetLoweringBase::isBinOp(Opcode);
30057 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
30059 // TODO: Add more X86ISD opcodes once we have test coverage.
30060 case X86ISD::PCMPEQ:
30061 case X86ISD::PMULDQ:
30062 case X86ISD::PMULUDQ:
30063 case X86ISD::FMAXC:
30064 case X86ISD::FMINC:
30071 return TargetLoweringBase::isCommutativeBinOp(Opcode);
30074 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
30075 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30077 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
30078 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
30079 return NumBits1 > NumBits2;
30082 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
30083 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30086 if (!isTypeLegal(EVT::getEVT(Ty1)))
30089 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
30091 // Assuming the caller doesn't have a zeroext or signext return parameter,
30092 // truncation all the way down to i1 is valid.
30096 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
30097 return isInt<32>(Imm);
30100 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
30101 // Can also use sub to handle negated immediates.
30102 return isInt<32>(Imm);
30105 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
30106 return isInt<32>(Imm);
30109 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
30110 if (!VT1.isInteger() || !VT2.isInteger())
30112 unsigned NumBits1 = VT1.getSizeInBits();
30113 unsigned NumBits2 = VT2.getSizeInBits();
30114 return NumBits1 > NumBits2;
30117 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
30118 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30119 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
30122 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
30123 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30124 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
30127 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
30128 EVT VT1 = Val.getValueType();
30129 if (isZExtFree(VT1, VT2))
30132 if (Val.getOpcode() != ISD::LOAD)
30135 if (!VT1.isSimple() || !VT1.isInteger() ||
30136 !VT2.isSimple() || !VT2.isInteger())
30139 switch (VT1.getSimpleVT().SimpleTy) {
30144 // X86 has 8, 16, and 32-bit zero-extending loads.
30151 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
30152 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
30155 EVT SrcVT = ExtVal.getOperand(0).getValueType();
30157 // There is no extending load for vXi1.
30158 if (SrcVT.getScalarType() == MVT::i1)
30164 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
30166 if (!Subtarget.hasAnyFMA())
30169 VT = VT.getScalarType();
30171 if (!VT.isSimple())
30174 switch (VT.getSimpleVT().SimpleTy) {
30185 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
30186 // i16 instructions are longer (0x66 prefix) and potentially slower.
30187 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
30190 /// Targets can use this to indicate that they only support *some*
30191 /// VECTOR_SHUFFLE operations, those with specific masks.
30192 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
30193 /// are assumed to be legal.
30194 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
30195 if (!VT.isSimple())
30198 // Not for i1 vectors
30199 if (VT.getSimpleVT().getScalarType() == MVT::i1)
30202 // Very little shuffling can be done for 64-bit vectors right now.
30203 if (VT.getSimpleVT().getSizeInBits() == 64)
30206 // We only care that the types being shuffled are legal. The lowering can
30207 // handle any possible shuffle mask that results.
30208 return isTypeLegal(VT.getSimpleVT());
30211 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
30213 // Don't convert an 'and' into a shuffle that we don't directly support.
30214 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
30215 if (!Subtarget.hasAVX2())
30216 if (VT == MVT::v32i8 || VT == MVT::v16i16)
30219 // Just delegate to the generic legality, clear masks aren't special.
30220 return isShuffleMaskLegal(Mask, VT);
30223 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
30224 // If the subtarget is using retpolines, we need to not generate jump tables.
30225 if (Subtarget.useRetpolineIndirectBranches())
30228 // Otherwise, fallback on the generic logic.
30229 return TargetLowering::areJTsAllowed(Fn);
30232 //===----------------------------------------------------------------------===//
30233 // X86 Scheduler Hooks
30234 //===----------------------------------------------------------------------===//
30236 /// Utility function to emit xbegin specifying the start of an RTM region.
30237 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
30238 const TargetInstrInfo *TII) {
30239 DebugLoc DL = MI.getDebugLoc();
30241 const BasicBlock *BB = MBB->getBasicBlock();
30242 MachineFunction::iterator I = ++MBB->getIterator();
30244 // For the v = xbegin(), we generate
30253 // eax = # XABORT_DEF
30257 // v = phi(s0/mainBB, s1/fallBB)
30259 MachineBasicBlock *thisMBB = MBB;
30260 MachineFunction *MF = MBB->getParent();
30261 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30262 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30263 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30264 MF->insert(I, mainMBB);
30265 MF->insert(I, fallMBB);
30266 MF->insert(I, sinkMBB);
30268 // Transfer the remainder of BB and its successor edges to sinkMBB.
30269 sinkMBB->splice(sinkMBB->begin(), MBB,
30270 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30271 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30273 MachineRegisterInfo &MRI = MF->getRegInfo();
30274 Register DstReg = MI.getOperand(0).getReg();
30275 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30276 Register mainDstReg = MRI.createVirtualRegister(RC);
30277 Register fallDstReg = MRI.createVirtualRegister(RC);
30281 // # fallthrough to mainMBB
30282 // # abortion to fallMBB
30283 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
30284 thisMBB->addSuccessor(mainMBB);
30285 thisMBB->addSuccessor(fallMBB);
30288 // mainDstReg := -1
30289 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
30290 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30291 mainMBB->addSuccessor(sinkMBB);
30294 // ; pseudo instruction to model hardware's definition from XABORT
30295 // EAX := XABORT_DEF
30296 // fallDstReg := EAX
30297 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
30298 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
30300 fallMBB->addSuccessor(sinkMBB);
30303 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
30304 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
30305 .addReg(mainDstReg).addMBB(mainMBB)
30306 .addReg(fallDstReg).addMBB(fallMBB);
30308 MI.eraseFromParent();
30314 MachineBasicBlock *
30315 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
30316 MachineBasicBlock *MBB) const {
30317 // Emit va_arg instruction on X86-64.
30319 // Operands to this pseudo-instruction:
30320 // 0 ) Output : destination address (reg)
30321 // 1-5) Input : va_list address (addr, i64mem)
30322 // 6 ) ArgSize : Size (in bytes) of vararg type
30323 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
30324 // 8 ) Align : Alignment of type
30325 // 9 ) EFLAGS (implicit-def)
30327 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
30328 static_assert(X86::AddrNumOperands == 5,
30329 "VAARG_64 assumes 5 address operands");
30331 Register DestReg = MI.getOperand(0).getReg();
30332 MachineOperand &Base = MI.getOperand(1);
30333 MachineOperand &Scale = MI.getOperand(2);
30334 MachineOperand &Index = MI.getOperand(3);
30335 MachineOperand &Disp = MI.getOperand(4);
30336 MachineOperand &Segment = MI.getOperand(5);
30337 unsigned ArgSize = MI.getOperand(6).getImm();
30338 unsigned ArgMode = MI.getOperand(7).getImm();
30339 unsigned Align = MI.getOperand(8).getImm();
30341 MachineFunction *MF = MBB->getParent();
30343 // Memory Reference
30344 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
30346 MachineMemOperand *OldMMO = MI.memoperands().front();
30348 // Clone the MMO into two separate MMOs for loading and storing
30349 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
30350 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
30351 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
30352 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
30354 // Machine Information
30355 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30356 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
30357 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
30358 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
30359 DebugLoc DL = MI.getDebugLoc();
30361 // struct va_list {
30364 // i64 overflow_area (address)
30365 // i64 reg_save_area (address)
30367 // sizeof(va_list) = 24
30368 // alignment(va_list) = 8
30370 unsigned TotalNumIntRegs = 6;
30371 unsigned TotalNumXMMRegs = 8;
30372 bool UseGPOffset = (ArgMode == 1);
30373 bool UseFPOffset = (ArgMode == 2);
30374 unsigned MaxOffset = TotalNumIntRegs * 8 +
30375 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
30377 /* Align ArgSize to a multiple of 8 */
30378 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
30379 bool NeedsAlign = (Align > 8);
30381 MachineBasicBlock *thisMBB = MBB;
30382 MachineBasicBlock *overflowMBB;
30383 MachineBasicBlock *offsetMBB;
30384 MachineBasicBlock *endMBB;
30386 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
30387 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
30388 unsigned OffsetReg = 0;
30390 if (!UseGPOffset && !UseFPOffset) {
30391 // If we only pull from the overflow region, we don't create a branch.
30392 // We don't need to alter control flow.
30393 OffsetDestReg = 0; // unused
30394 OverflowDestReg = DestReg;
30396 offsetMBB = nullptr;
30397 overflowMBB = thisMBB;
30400 // First emit code to check if gp_offset (or fp_offset) is below the bound.
30401 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
30402 // If not, pull from overflow_area. (branch to overflowMBB)
30407 // offsetMBB overflowMBB
30412 // Registers for the PHI in endMBB
30413 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
30414 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
30416 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
30417 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30418 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30419 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30421 MachineFunction::iterator MBBIter = ++MBB->getIterator();
30423 // Insert the new basic blocks
30424 MF->insert(MBBIter, offsetMBB);
30425 MF->insert(MBBIter, overflowMBB);
30426 MF->insert(MBBIter, endMBB);
30428 // Transfer the remainder of MBB and its successor edges to endMBB.
30429 endMBB->splice(endMBB->begin(), thisMBB,
30430 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
30431 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
30433 // Make offsetMBB and overflowMBB successors of thisMBB
30434 thisMBB->addSuccessor(offsetMBB);
30435 thisMBB->addSuccessor(overflowMBB);
30437 // endMBB is a successor of both offsetMBB and overflowMBB
30438 offsetMBB->addSuccessor(endMBB);
30439 overflowMBB->addSuccessor(endMBB);
30441 // Load the offset value into a register
30442 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
30443 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
30447 .addDisp(Disp, UseFPOffset ? 4 : 0)
30449 .setMemRefs(LoadOnlyMMO);
30451 // Check if there is enough room left to pull this argument.
30452 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
30454 .addImm(MaxOffset + 8 - ArgSizeA8);
30456 // Branch to "overflowMBB" if offset >= max
30457 // Fall through to "offsetMBB" otherwise
30458 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
30459 .addMBB(overflowMBB).addImm(X86::COND_AE);
30462 // In offsetMBB, emit code to use the reg_save_area.
30464 assert(OffsetReg != 0);
30466 // Read the reg_save_area address.
30467 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
30468 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
30474 .setMemRefs(LoadOnlyMMO);
30476 // Zero-extend the offset
30477 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
30478 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
30481 .addImm(X86::sub_32bit);
30483 // Add the offset to the reg_save_area to get the final address.
30484 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
30485 .addReg(OffsetReg64)
30486 .addReg(RegSaveReg);
30488 // Compute the offset for the next argument
30489 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
30490 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
30492 .addImm(UseFPOffset ? 16 : 8);
30494 // Store it back into the va_list.
30495 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
30499 .addDisp(Disp, UseFPOffset ? 4 : 0)
30501 .addReg(NextOffsetReg)
30502 .setMemRefs(StoreOnlyMMO);
30505 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
30510 // Emit code to use overflow area
30513 // Load the overflow_area address into a register.
30514 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
30515 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
30521 .setMemRefs(LoadOnlyMMO);
30523 // If we need to align it, do so. Otherwise, just copy the address
30524 // to OverflowDestReg.
30526 // Align the overflow address
30527 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
30528 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
30530 // aligned_addr = (addr + (align-1)) & ~(align-1)
30531 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
30532 .addReg(OverflowAddrReg)
30535 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
30537 .addImm(~(uint64_t)(Align-1));
30539 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
30540 .addReg(OverflowAddrReg);
30543 // Compute the next overflow address after this argument.
30544 // (the overflow address should be kept 8-byte aligned)
30545 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
30546 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
30547 .addReg(OverflowDestReg)
30548 .addImm(ArgSizeA8);
30550 // Store the new overflow address.
30551 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
30557 .addReg(NextAddrReg)
30558 .setMemRefs(StoreOnlyMMO);
30560 // If we branched, emit the PHI to the front of endMBB.
30562 BuildMI(*endMBB, endMBB->begin(), DL,
30563 TII->get(X86::PHI), DestReg)
30564 .addReg(OffsetDestReg).addMBB(offsetMBB)
30565 .addReg(OverflowDestReg).addMBB(overflowMBB);
30568 // Erase the pseudo instruction
30569 MI.eraseFromParent();
30574 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
30575 MachineInstr &MI, MachineBasicBlock *MBB) const {
30576 // Emit code to save XMM registers to the stack. The ABI says that the
30577 // number of registers to save is given in %al, so it's theoretically
30578 // possible to do an indirect jump trick to avoid saving all of them,
30579 // however this code takes a simpler approach and just executes all
30580 // of the stores if %al is non-zero. It's less code, and it's probably
30581 // easier on the hardware branch predictor, and stores aren't all that
30582 // expensive anyway.
30584 // Create the new basic blocks. One block contains all the XMM stores,
30585 // and one block is the final destination regardless of whether any
30586 // stores were performed.
30587 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
30588 MachineFunction *F = MBB->getParent();
30589 MachineFunction::iterator MBBIter = ++MBB->getIterator();
30590 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
30591 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
30592 F->insert(MBBIter, XMMSaveMBB);
30593 F->insert(MBBIter, EndMBB);
30595 // Transfer the remainder of MBB and its successor edges to EndMBB.
30596 EndMBB->splice(EndMBB->begin(), MBB,
30597 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30598 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
30600 // The original block will now fall through to the XMM save block.
30601 MBB->addSuccessor(XMMSaveMBB);
30602 // The XMMSaveMBB will fall through to the end block.
30603 XMMSaveMBB->addSuccessor(EndMBB);
30605 // Now add the instructions.
30606 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30607 DebugLoc DL = MI.getDebugLoc();
30609 Register CountReg = MI.getOperand(0).getReg();
30610 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
30611 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
30613 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
30614 // If %al is 0, branch around the XMM save block.
30615 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
30616 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
30617 MBB->addSuccessor(EndMBB);
30620 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
30621 // that was just emitted, but clearly shouldn't be "saved".
30622 assert((MI.getNumOperands() <= 3 ||
30623 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
30624 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
30625 "Expected last argument to be EFLAGS");
30626 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
30627 // In the XMM save block, save all the XMM argument registers.
30628 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
30629 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
30630 MachineMemOperand *MMO = F->getMachineMemOperand(
30631 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
30632 MachineMemOperand::MOStore,
30633 /*Size=*/16, /*Align=*/16);
30634 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
30635 .addFrameIndex(RegSaveFrameIndex)
30636 .addImm(/*Scale=*/1)
30637 .addReg(/*IndexReg=*/0)
30638 .addImm(/*Disp=*/Offset)
30639 .addReg(/*Segment=*/0)
30640 .addReg(MI.getOperand(i).getReg())
30641 .addMemOperand(MMO);
30644 MI.eraseFromParent(); // The pseudo instruction is gone now.
30649 // The EFLAGS operand of SelectItr might be missing a kill marker
30650 // because there were multiple uses of EFLAGS, and ISel didn't know
30651 // which to mark. Figure out whether SelectItr should have had a
30652 // kill marker, and set it if it should. Returns the correct kill
30654 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
30655 MachineBasicBlock* BB,
30656 const TargetRegisterInfo* TRI) {
30657 // Scan forward through BB for a use/def of EFLAGS.
30658 MachineBasicBlock::iterator miI(std::next(SelectItr));
30659 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
30660 const MachineInstr& mi = *miI;
30661 if (mi.readsRegister(X86::EFLAGS))
30663 if (mi.definesRegister(X86::EFLAGS))
30664 break; // Should have kill-flag - update below.
30667 // If we hit the end of the block, check whether EFLAGS is live into a
30669 if (miI == BB->end()) {
30670 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
30671 sEnd = BB->succ_end();
30672 sItr != sEnd; ++sItr) {
30673 MachineBasicBlock* succ = *sItr;
30674 if (succ->isLiveIn(X86::EFLAGS))
30679 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
30680 // out. SelectMI should have a kill flag on EFLAGS.
30681 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
30685 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
30686 // together with other CMOV pseudo-opcodes into a single basic-block with
30687 // conditional jump around it.
30688 static bool isCMOVPseudo(MachineInstr &MI) {
30689 switch (MI.getOpcode()) {
30690 case X86::CMOV_FR32:
30691 case X86::CMOV_FR32X:
30692 case X86::CMOV_FR64:
30693 case X86::CMOV_FR64X:
30694 case X86::CMOV_GR8:
30695 case X86::CMOV_GR16:
30696 case X86::CMOV_GR32:
30697 case X86::CMOV_RFP32:
30698 case X86::CMOV_RFP64:
30699 case X86::CMOV_RFP80:
30700 case X86::CMOV_VR128:
30701 case X86::CMOV_VR128X:
30702 case X86::CMOV_VR256:
30703 case X86::CMOV_VR256X:
30704 case X86::CMOV_VR512:
30705 case X86::CMOV_VK2:
30706 case X86::CMOV_VK4:
30707 case X86::CMOV_VK8:
30708 case X86::CMOV_VK16:
30709 case X86::CMOV_VK32:
30710 case X86::CMOV_VK64:
30718 // Helper function, which inserts PHI functions into SinkMBB:
30719 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
30720 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
30721 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
30722 // the last PHI function inserted.
30723 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
30724 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
30725 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
30726 MachineBasicBlock *SinkMBB) {
30727 MachineFunction *MF = TrueMBB->getParent();
30728 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
30729 DebugLoc DL = MIItBegin->getDebugLoc();
30731 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
30732 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
30734 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
30736 // As we are creating the PHIs, we have to be careful if there is more than
30737 // one. Later CMOVs may reference the results of earlier CMOVs, but later
30738 // PHIs have to reference the individual true/false inputs from earlier PHIs.
30739 // That also means that PHI construction must work forward from earlier to
30740 // later, and that the code must maintain a mapping from earlier PHI's
30741 // destination registers, and the registers that went into the PHI.
30742 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
30743 MachineInstrBuilder MIB;
30745 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
30746 Register DestReg = MIIt->getOperand(0).getReg();
30747 Register Op1Reg = MIIt->getOperand(1).getReg();
30748 Register Op2Reg = MIIt->getOperand(2).getReg();
30750 // If this CMOV we are generating is the opposite condition from
30751 // the jump we generated, then we have to swap the operands for the
30752 // PHI that is going to be generated.
30753 if (MIIt->getOperand(3).getImm() == OppCC)
30754 std::swap(Op1Reg, Op2Reg);
30756 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
30757 Op1Reg = RegRewriteTable[Op1Reg].first;
30759 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
30760 Op2Reg = RegRewriteTable[Op2Reg].second;
30762 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
30768 // Add this PHI to the rewrite table.
30769 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
30775 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
30776 MachineBasicBlock *
30777 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
30778 MachineInstr &SecondCascadedCMOV,
30779 MachineBasicBlock *ThisMBB) const {
30780 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30781 DebugLoc DL = FirstCMOV.getDebugLoc();
30783 // We lower cascaded CMOVs such as
30785 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
30787 // to two successive branches.
30789 // Without this, we would add a PHI between the two jumps, which ends up
30790 // creating a few copies all around. For instance, for
30792 // (sitofp (zext (fcmp une)))
30794 // we would generate:
30796 // ucomiss %xmm1, %xmm0
30797 // movss <1.0f>, %xmm0
30798 // movaps %xmm0, %xmm1
30800 // xorps %xmm1, %xmm1
30803 // movaps %xmm1, %xmm0
30807 // because this custom-inserter would have generated:
30819 // A: X = ...; Y = ...
30821 // C: Z = PHI [X, A], [Y, B]
30823 // E: PHI [X, C], [Z, D]
30825 // If we lower both CMOVs in a single step, we can instead generate:
30837 // A: X = ...; Y = ...
30839 // E: PHI [X, A], [X, C], [Y, D]
30841 // Which, in our sitofp/fcmp example, gives us something like:
30843 // ucomiss %xmm1, %xmm0
30844 // movss <1.0f>, %xmm0
30847 // xorps %xmm0, %xmm0
30852 // We lower cascaded CMOV into two successive branches to the same block.
30853 // EFLAGS is used by both, so mark it as live in the second.
30854 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
30855 MachineFunction *F = ThisMBB->getParent();
30856 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
30857 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
30858 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
30860 MachineFunction::iterator It = ++ThisMBB->getIterator();
30861 F->insert(It, FirstInsertedMBB);
30862 F->insert(It, SecondInsertedMBB);
30863 F->insert(It, SinkMBB);
30865 // For a cascaded CMOV, we lower it to two successive branches to
30866 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
30867 // the FirstInsertedMBB.
30868 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
30870 // If the EFLAGS register isn't dead in the terminator, then claim that it's
30871 // live into the sink and copy blocks.
30872 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30873 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
30874 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
30875 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
30876 SinkMBB->addLiveIn(X86::EFLAGS);
30879 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
30880 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
30881 std::next(MachineBasicBlock::iterator(FirstCMOV)),
30883 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
30885 // Fallthrough block for ThisMBB.
30886 ThisMBB->addSuccessor(FirstInsertedMBB);
30887 // The true block target of the first branch is always SinkMBB.
30888 ThisMBB->addSuccessor(SinkMBB);
30889 // Fallthrough block for FirstInsertedMBB.
30890 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
30891 // The true block for the branch of FirstInsertedMBB.
30892 FirstInsertedMBB->addSuccessor(SinkMBB);
30893 // This is fallthrough.
30894 SecondInsertedMBB->addSuccessor(SinkMBB);
30896 // Create the conditional branch instructions.
30897 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
30898 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
30900 X86::CondCode SecondCC =
30901 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
30902 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
30905 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
30906 Register DestReg = FirstCMOV.getOperand(0).getReg();
30907 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
30908 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
30909 MachineInstrBuilder MIB =
30910 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
30912 .addMBB(SecondInsertedMBB)
30916 // The second SecondInsertedMBB provides the same incoming value as the
30917 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
30918 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
30919 // Copy the PHI result to the register defined by the second CMOV.
30920 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
30921 TII->get(TargetOpcode::COPY),
30922 SecondCascadedCMOV.getOperand(0).getReg())
30923 .addReg(FirstCMOV.getOperand(0).getReg());
30925 // Now remove the CMOVs.
30926 FirstCMOV.eraseFromParent();
30927 SecondCascadedCMOV.eraseFromParent();
30932 MachineBasicBlock *
30933 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
30934 MachineBasicBlock *ThisMBB) const {
30935 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30936 DebugLoc DL = MI.getDebugLoc();
30938 // To "insert" a SELECT_CC instruction, we actually have to insert the
30939 // diamond control-flow pattern. The incoming instruction knows the
30940 // destination vreg to set, the condition code register to branch on, the
30941 // true/false values to select between and a branch opcode to use.
30946 // cmpTY ccX, r1, r2
30948 // fallthrough --> FalseMBB
30950 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
30951 // as described above, by inserting a BB, and then making a PHI at the join
30952 // point to select the true and false operands of the CMOV in the PHI.
30954 // The code also handles two different cases of multiple CMOV opcodes
30958 // In this case, there are multiple CMOVs in a row, all which are based on
30959 // the same condition setting (or the exact opposite condition setting).
30960 // In this case we can lower all the CMOVs using a single inserted BB, and
30961 // then make a number of PHIs at the join point to model the CMOVs. The only
30962 // trickiness here, is that in a case like:
30964 // t2 = CMOV cond1 t1, f1
30965 // t3 = CMOV cond1 t2, f2
30967 // when rewriting this into PHIs, we have to perform some renaming on the
30968 // temps since you cannot have a PHI operand refer to a PHI result earlier
30969 // in the same block. The "simple" but wrong lowering would be:
30971 // t2 = PHI t1(BB1), f1(BB2)
30972 // t3 = PHI t2(BB1), f2(BB2)
30974 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
30975 // renaming is to note that on the path through BB1, t2 is really just a
30976 // copy of t1, and do that renaming, properly generating:
30978 // t2 = PHI t1(BB1), f1(BB2)
30979 // t3 = PHI t1(BB1), f2(BB2)
30982 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
30983 // function - EmitLoweredCascadedSelect.
30985 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
30986 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
30987 MachineInstr *LastCMOV = &MI;
30988 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
30990 // Check for case 1, where there are multiple CMOVs with the same condition
30991 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
30992 // number of jumps the most.
30994 if (isCMOVPseudo(MI)) {
30995 // See if we have a string of CMOVS with the same condition. Skip over
30996 // intervening debug insts.
30997 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
30998 (NextMIIt->getOperand(3).getImm() == CC ||
30999 NextMIIt->getOperand(3).getImm() == OppCC)) {
31000 LastCMOV = &*NextMIIt;
31002 NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
31006 // This checks for case 2, but only do this if we didn't already find
31007 // case 1, as indicated by LastCMOV == MI.
31008 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
31009 NextMIIt->getOpcode() == MI.getOpcode() &&
31010 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
31011 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
31012 NextMIIt->getOperand(1).isKill()) {
31013 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
31016 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31017 MachineFunction *F = ThisMBB->getParent();
31018 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
31019 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31021 MachineFunction::iterator It = ++ThisMBB->getIterator();
31022 F->insert(It, FalseMBB);
31023 F->insert(It, SinkMBB);
31025 // If the EFLAGS register isn't dead in the terminator, then claim that it's
31026 // live into the sink and copy blocks.
31027 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31028 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
31029 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
31030 FalseMBB->addLiveIn(X86::EFLAGS);
31031 SinkMBB->addLiveIn(X86::EFLAGS);
31034 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
31035 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
31036 auto DbgIt = MachineBasicBlock::iterator(MI);
31037 while (DbgIt != DbgEnd) {
31038 auto Next = std::next(DbgIt);
31039 if (DbgIt->isDebugInstr())
31040 SinkMBB->push_back(DbgIt->removeFromParent());
31044 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31045 SinkMBB->splice(SinkMBB->end(), ThisMBB,
31046 std::next(MachineBasicBlock::iterator(LastCMOV)),
31048 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31050 // Fallthrough block for ThisMBB.
31051 ThisMBB->addSuccessor(FalseMBB);
31052 // The true block target of the first (or only) branch is always a SinkMBB.
31053 ThisMBB->addSuccessor(SinkMBB);
31054 // Fallthrough block for FalseMBB.
31055 FalseMBB->addSuccessor(SinkMBB);
31057 // Create the conditional branch instruction.
31058 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
31061 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
31063 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
31064 MachineBasicBlock::iterator MIItEnd =
31065 std::next(MachineBasicBlock::iterator(LastCMOV));
31066 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
31068 // Now remove the CMOV(s).
31069 ThisMBB->erase(MIItBegin, MIItEnd);
31074 MachineBasicBlock *
31075 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
31076 MachineBasicBlock *BB) const {
31077 MachineFunction *MF = BB->getParent();
31078 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31079 DebugLoc DL = MI.getDebugLoc();
31080 const BasicBlock *LLVM_BB = BB->getBasicBlock();
31082 assert(MF->shouldSplitStack());
31084 const bool Is64Bit = Subtarget.is64Bit();
31085 const bool IsLP64 = Subtarget.isTarget64BitLP64();
31087 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
31088 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
31091 // ... [Till the alloca]
31092 // If stacklet is not large enough, jump to mallocMBB
31095 // Allocate by subtracting from RSP
31096 // Jump to continueMBB
31099 // Allocate by call to runtime
31103 // [rest of original BB]
31106 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31107 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31108 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31110 MachineRegisterInfo &MRI = MF->getRegInfo();
31111 const TargetRegisterClass *AddrRegClass =
31112 getRegClassFor(getPointerTy(MF->getDataLayout()));
31114 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31115 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31116 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
31117 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
31118 sizeVReg = MI.getOperand(1).getReg(),
31120 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
31122 MachineFunction::iterator MBBIter = ++BB->getIterator();
31124 MF->insert(MBBIter, bumpMBB);
31125 MF->insert(MBBIter, mallocMBB);
31126 MF->insert(MBBIter, continueMBB);
31128 continueMBB->splice(continueMBB->begin(), BB,
31129 std::next(MachineBasicBlock::iterator(MI)), BB->end());
31130 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
31132 // Add code to the main basic block to check if the stack limit has been hit,
31133 // and if so, jump to mallocMBB otherwise to bumpMBB.
31134 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
31135 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
31136 .addReg(tmpSPVReg).addReg(sizeVReg);
31137 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
31138 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
31139 .addReg(SPLimitVReg);
31140 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
31142 // bumpMBB simply decreases the stack pointer, since we know the current
31143 // stacklet has enough space.
31144 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
31145 .addReg(SPLimitVReg);
31146 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
31147 .addReg(SPLimitVReg);
31148 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31150 // Calls into a routine in libgcc to allocate more space from the heap.
31151 const uint32_t *RegMask =
31152 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
31154 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
31156 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31157 .addExternalSymbol("__morestack_allocate_stack_space")
31158 .addRegMask(RegMask)
31159 .addReg(X86::RDI, RegState::Implicit)
31160 .addReg(X86::RAX, RegState::ImplicitDefine);
31161 } else if (Is64Bit) {
31162 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
31164 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31165 .addExternalSymbol("__morestack_allocate_stack_space")
31166 .addRegMask(RegMask)
31167 .addReg(X86::EDI, RegState::Implicit)
31168 .addReg(X86::EAX, RegState::ImplicitDefine);
31170 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
31172 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
31173 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
31174 .addExternalSymbol("__morestack_allocate_stack_space")
31175 .addRegMask(RegMask)
31176 .addReg(X86::EAX, RegState::ImplicitDefine);
31180 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
31183 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
31184 .addReg(IsLP64 ? X86::RAX : X86::EAX);
31185 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31187 // Set up the CFG correctly.
31188 BB->addSuccessor(bumpMBB);
31189 BB->addSuccessor(mallocMBB);
31190 mallocMBB->addSuccessor(continueMBB);
31191 bumpMBB->addSuccessor(continueMBB);
31193 // Take care of the PHI nodes.
31194 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
31195 MI.getOperand(0).getReg())
31196 .addReg(mallocPtrVReg)
31198 .addReg(bumpSPPtrVReg)
31201 // Delete the original pseudo instruction.
31202 MI.eraseFromParent();
31205 return continueMBB;
31208 MachineBasicBlock *
31209 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
31210 MachineBasicBlock *BB) const {
31211 MachineFunction *MF = BB->getParent();
31212 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31213 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
31214 DebugLoc DL = MI.getDebugLoc();
31216 assert(!isAsynchronousEHPersonality(
31217 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
31218 "SEH does not use catchret!");
31220 // Only 32-bit EH needs to worry about manually restoring stack pointers.
31221 if (!Subtarget.is32Bit())
31224 // C++ EH creates a new target block to hold the restore code, and wires up
31225 // the new block to the return destination with a normal JMP_4.
31226 MachineBasicBlock *RestoreMBB =
31227 MF->CreateMachineBasicBlock(BB->getBasicBlock());
31228 assert(BB->succ_size() == 1);
31229 MF->insert(std::next(BB->getIterator()), RestoreMBB);
31230 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
31231 BB->addSuccessor(RestoreMBB);
31232 MI.getOperand(0).setMBB(RestoreMBB);
31234 auto RestoreMBBI = RestoreMBB->begin();
31235 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
31236 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
31240 MachineBasicBlock *
31241 X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
31242 MachineBasicBlock *BB) const {
31243 MachineFunction *MF = BB->getParent();
31244 const Constant *PerFn = MF->getFunction().getPersonalityFn();
31245 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
31246 // Only 32-bit SEH requires special handling for catchpad.
31247 if (IsSEH && Subtarget.is32Bit()) {
31248 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31249 DebugLoc DL = MI.getDebugLoc();
31250 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
31252 MI.eraseFromParent();
31256 MachineBasicBlock *
31257 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
31258 MachineBasicBlock *BB) const {
31259 // So, here we replace TLSADDR with the sequence:
31260 // adjust_stackdown -> TLSADDR -> adjust_stackup.
31261 // We need this because TLSADDR is lowered into calls
31262 // inside MC, therefore without the two markers shrink-wrapping
31263 // may push the prologue/epilogue pass them.
31264 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31265 DebugLoc DL = MI.getDebugLoc();
31266 MachineFunction &MF = *BB->getParent();
31268 // Emit CALLSEQ_START right before the instruction.
31269 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
31270 MachineInstrBuilder CallseqStart =
31271 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
31272 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
31274 // Emit CALLSEQ_END right after the instruction.
31275 // We don't call erase from parent because we want to keep the
31276 // original instruction around.
31277 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
31278 MachineInstrBuilder CallseqEnd =
31279 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
31280 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
31285 MachineBasicBlock *
31286 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
31287 MachineBasicBlock *BB) const {
31288 // This is pretty easy. We're taking the value that we received from
31289 // our load from the relocation, sticking it in either RDI (x86-64)
31290 // or EAX and doing an indirect call. The return value will then
31291 // be in the normal return register.
31292 MachineFunction *F = BB->getParent();
31293 const X86InstrInfo *TII = Subtarget.getInstrInfo();
31294 DebugLoc DL = MI.getDebugLoc();
31296 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
31297 assert(MI.getOperand(3).isGlobal() && "This should be a global");
31299 // Get a register mask for the lowered call.
31300 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
31301 // proper register mask.
31302 const uint32_t *RegMask =
31303 Subtarget.is64Bit() ?
31304 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
31305 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
31306 if (Subtarget.is64Bit()) {
31307 MachineInstrBuilder MIB =
31308 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
31312 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31313 MI.getOperand(3).getTargetFlags())
31315 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
31316 addDirectMem(MIB, X86::RDI);
31317 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
31318 } else if (!isPositionIndependent()) {
31319 MachineInstrBuilder MIB =
31320 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
31324 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31325 MI.getOperand(3).getTargetFlags())
31327 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
31328 addDirectMem(MIB, X86::EAX);
31329 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
31331 MachineInstrBuilder MIB =
31332 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
31333 .addReg(TII->getGlobalBaseReg(F))
31336 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31337 MI.getOperand(3).getTargetFlags())
31339 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
31340 addDirectMem(MIB, X86::EAX);
31341 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
31344 MI.eraseFromParent(); // The pseudo instruction is gone now.
31348 static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
31350 case X86::RETPOLINE_CALL32:
31351 return X86::CALLpcrel32;
31352 case X86::RETPOLINE_CALL64:
31353 return X86::CALL64pcrel32;
31354 case X86::RETPOLINE_TCRETURN32:
31355 return X86::TCRETURNdi;
31356 case X86::RETPOLINE_TCRETURN64:
31357 return X86::TCRETURNdi64;
31359 llvm_unreachable("not retpoline opcode");
31362 static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
31364 if (Subtarget.useRetpolineExternalThunk()) {
31365 // When using an external thunk for retpolines, we pick names that match the
31366 // names GCC happens to use as well. This helps simplify the implementation
31367 // of the thunks for kernels where they have no easy ability to create
31368 // aliases and are doing non-trivial configuration of the thunk's body. For
31369 // example, the Linux kernel will do boot-time hot patching of the thunk
31370 // bodies and cannot easily export aliases of these to loaded modules.
31372 // Note that at any point in the future, we may need to change the semantics
31373 // of how we implement retpolines and at that time will likely change the
31374 // name of the called thunk. Essentially, there is no hard guarantee that
31375 // LLVM will generate calls to specific thunks, we merely make a best-effort
31376 // attempt to help out kernels and other systems where duplicating the
31377 // thunks is costly.
31380 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31381 return "__x86_indirect_thunk_eax";
31383 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31384 return "__x86_indirect_thunk_ecx";
31386 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31387 return "__x86_indirect_thunk_edx";
31389 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31390 return "__x86_indirect_thunk_edi";
31392 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31393 return "__x86_indirect_thunk_r11";
31395 llvm_unreachable("unexpected reg for retpoline");
31398 // When targeting an internal COMDAT thunk use an LLVM-specific name.
31401 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31402 return "__llvm_retpoline_eax";
31404 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31405 return "__llvm_retpoline_ecx";
31407 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31408 return "__llvm_retpoline_edx";
31410 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31411 return "__llvm_retpoline_edi";
31413 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31414 return "__llvm_retpoline_r11";
31416 llvm_unreachable("unexpected reg for retpoline");
31419 MachineBasicBlock *
31420 X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
31421 MachineBasicBlock *BB) const {
31422 // Copy the virtual register into the R11 physical register and
31423 // call the retpoline thunk.
31424 DebugLoc DL = MI.getDebugLoc();
31425 const X86InstrInfo *TII = Subtarget.getInstrInfo();
31426 Register CalleeVReg = MI.getOperand(0).getReg();
31427 unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
31429 // Find an available scratch register to hold the callee. On 64-bit, we can
31430 // just use R11, but we scan for uses anyway to ensure we don't generate
31431 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
31432 // already a register use operand to the call to hold the callee. If none
31433 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
31434 // register and ESI is the base pointer to realigned stack frames with VLAs.
31435 SmallVector<unsigned, 3> AvailableRegs;
31436 if (Subtarget.is64Bit())
31437 AvailableRegs.push_back(X86::R11);
31439 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
31441 // Zero out any registers that are already used.
31442 for (const auto &MO : MI.operands()) {
31443 if (MO.isReg() && MO.isUse())
31444 for (unsigned &Reg : AvailableRegs)
31445 if (Reg == MO.getReg())
31449 // Choose the first remaining non-zero available register.
31450 unsigned AvailableReg = 0;
31451 for (unsigned MaybeReg : AvailableRegs) {
31453 AvailableReg = MaybeReg;
31458 report_fatal_error("calling convention incompatible with retpoline, no "
31459 "available registers");
31461 const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
31463 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
31464 .addReg(CalleeVReg);
31465 MI.getOperand(0).ChangeToES(Symbol);
31466 MI.setDesc(TII->get(Opc));
31467 MachineInstrBuilder(*BB->getParent(), &MI)
31468 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
31472 /// SetJmp implies future control flow change upon calling the corresponding
31474 /// Instead of using the 'return' instruction, the long jump fixes the stack and
31475 /// performs an indirect branch. To do so it uses the registers that were stored
31476 /// in the jump buffer (when calling SetJmp).
31477 /// In case the shadow stack is enabled we need to fix it as well, because some
31478 /// return addresses will be skipped.
31479 /// The function will save the SSP for future fixing in the function
31480 /// emitLongJmpShadowStackFix.
31481 /// \sa emitLongJmpShadowStackFix
31482 /// \param [in] MI The temporary Machine Instruction for the builtin.
31483 /// \param [in] MBB The Machine Basic Block that will be modified.
31484 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
31485 MachineBasicBlock *MBB) const {
31486 DebugLoc DL = MI.getDebugLoc();
31487 MachineFunction *MF = MBB->getParent();
31488 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31489 MachineRegisterInfo &MRI = MF->getRegInfo();
31490 MachineInstrBuilder MIB;
31492 // Memory Reference.
31493 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31494 MI.memoperands_end());
31496 // Initialize a register with zero.
31497 MVT PVT = getPointerTy(MF->getDataLayout());
31498 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31499 Register ZReg = MRI.createVirtualRegister(PtrRC);
31500 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
31501 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
31503 .addReg(ZReg, RegState::Undef)
31504 .addReg(ZReg, RegState::Undef);
31506 // Read the current SSP Register value to the zeroed register.
31507 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
31508 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
31509 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
31511 // Write the SSP register value to offset 3 in input memory buffer.
31512 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31513 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
31514 const int64_t SSPOffset = 3 * PVT.getStoreSize();
31515 const unsigned MemOpndSlot = 1;
31516 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31517 if (i == X86::AddrDisp)
31518 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
31520 MIB.add(MI.getOperand(MemOpndSlot + i));
31522 MIB.addReg(SSPCopyReg);
31523 MIB.setMemRefs(MMOs);
31526 MachineBasicBlock *
31527 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
31528 MachineBasicBlock *MBB) const {
31529 DebugLoc DL = MI.getDebugLoc();
31530 MachineFunction *MF = MBB->getParent();
31531 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31532 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31533 MachineRegisterInfo &MRI = MF->getRegInfo();
31535 const BasicBlock *BB = MBB->getBasicBlock();
31536 MachineFunction::iterator I = ++MBB->getIterator();
31538 // Memory Reference
31539 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31540 MI.memoperands_end());
31543 unsigned MemOpndSlot = 0;
31545 unsigned CurOp = 0;
31547 DstReg = MI.getOperand(CurOp++).getReg();
31548 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
31549 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
31551 Register mainDstReg = MRI.createVirtualRegister(RC);
31552 Register restoreDstReg = MRI.createVirtualRegister(RC);
31554 MemOpndSlot = CurOp;
31556 MVT PVT = getPointerTy(MF->getDataLayout());
31557 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
31558 "Invalid Pointer Size!");
31560 // For v = setjmp(buf), we generate
31563 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
31564 // SjLjSetup restoreMBB
31570 // v = phi(main, restore)
31573 // if base pointer being used, load it from frame
31576 MachineBasicBlock *thisMBB = MBB;
31577 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
31578 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31579 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
31580 MF->insert(I, mainMBB);
31581 MF->insert(I, sinkMBB);
31582 MF->push_back(restoreMBB);
31583 restoreMBB->setHasAddressTaken();
31585 MachineInstrBuilder MIB;
31587 // Transfer the remainder of BB and its successor edges to sinkMBB.
31588 sinkMBB->splice(sinkMBB->begin(), MBB,
31589 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31590 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31593 unsigned PtrStoreOpc = 0;
31594 unsigned LabelReg = 0;
31595 const int64_t LabelOffset = 1 * PVT.getStoreSize();
31596 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
31597 !isPositionIndependent();
31599 // Prepare IP either in reg or imm.
31600 if (!UseImmLabel) {
31601 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31602 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31603 LabelReg = MRI.createVirtualRegister(PtrRC);
31604 if (Subtarget.is64Bit()) {
31605 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
31609 .addMBB(restoreMBB)
31612 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
31613 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
31614 .addReg(XII->getGlobalBaseReg(MF))
31617 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
31621 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
31623 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
31624 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31625 if (i == X86::AddrDisp)
31626 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
31628 MIB.add(MI.getOperand(MemOpndSlot + i));
31631 MIB.addReg(LabelReg);
31633 MIB.addMBB(restoreMBB);
31634 MIB.setMemRefs(MMOs);
31636 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
31637 emitSetJmpShadowStackFix(MI, thisMBB);
31641 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
31642 .addMBB(restoreMBB);
31644 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
31645 MIB.addRegMask(RegInfo->getNoPreservedMask());
31646 thisMBB->addSuccessor(mainMBB);
31647 thisMBB->addSuccessor(restoreMBB);
31651 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
31652 mainMBB->addSuccessor(sinkMBB);
31655 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
31656 TII->get(X86::PHI), DstReg)
31657 .addReg(mainDstReg).addMBB(mainMBB)
31658 .addReg(restoreDstReg).addMBB(restoreMBB);
31661 if (RegInfo->hasBasePointer(*MF)) {
31662 const bool Uses64BitFramePtr =
31663 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
31664 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
31665 X86FI->setRestoreBasePointer(MF);
31666 Register FramePtr = RegInfo->getFrameRegister(*MF);
31667 Register BasePtr = RegInfo->getBaseRegister();
31668 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
31669 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
31670 FramePtr, true, X86FI->getRestoreBasePointerOffset())
31671 .setMIFlag(MachineInstr::FrameSetup);
31673 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
31674 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
31675 restoreMBB->addSuccessor(sinkMBB);
31677 MI.eraseFromParent();
31681 /// Fix the shadow stack using the previously saved SSP pointer.
31682 /// \sa emitSetJmpShadowStackFix
31683 /// \param [in] MI The temporary Machine Instruction for the builtin.
31684 /// \param [in] MBB The Machine Basic Block that will be modified.
31685 /// \return The sink MBB that will perform the future indirect branch.
31686 MachineBasicBlock *
31687 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
31688 MachineBasicBlock *MBB) const {
31689 DebugLoc DL = MI.getDebugLoc();
31690 MachineFunction *MF = MBB->getParent();
31691 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31692 MachineRegisterInfo &MRI = MF->getRegInfo();
31694 // Memory Reference
31695 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31696 MI.memoperands_end());
31698 MVT PVT = getPointerTy(MF->getDataLayout());
31699 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31702 // xor vreg1, vreg1
31704 // test vreg1, vreg1
31705 // je sinkMBB # Jump if Shadow Stack is not supported
31707 // mov buf+24/12(%rip), vreg2
31708 // sub vreg1, vreg2
31709 // jbe sinkMBB # No need to fix the Shadow Stack
31712 // incssp vreg2 # fix the SSP according to the lower 8 bits
31715 // fixShadowLoopPrepareMBB:
31718 // fixShadowLoopMBB:
31721 // jne fixShadowLoopMBB # Iterate until you finish fixing
31722 // # the Shadow Stack
31725 MachineFunction::iterator I = ++MBB->getIterator();
31726 const BasicBlock *BB = MBB->getBasicBlock();
31728 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
31729 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
31730 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
31731 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
31732 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
31733 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31734 MF->insert(I, checkSspMBB);
31735 MF->insert(I, fallMBB);
31736 MF->insert(I, fixShadowMBB);
31737 MF->insert(I, fixShadowLoopPrepareMBB);
31738 MF->insert(I, fixShadowLoopMBB);
31739 MF->insert(I, sinkMBB);
31741 // Transfer the remainder of BB and its successor edges to sinkMBB.
31742 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
31744 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31746 MBB->addSuccessor(checkSspMBB);
31748 // Initialize a register with zero.
31749 Register ZReg = MRI.createVirtualRegister(PtrRC);
31750 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
31751 BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
31753 .addReg(ZReg, RegState::Undef)
31754 .addReg(ZReg, RegState::Undef);
31756 // Read the current SSP Register value to the zeroed register.
31757 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
31758 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
31759 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
31761 // Check whether the result of the SSP register is zero and jump directly
31763 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
31764 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
31765 .addReg(SSPCopyReg)
31766 .addReg(SSPCopyReg);
31767 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
31768 checkSspMBB->addSuccessor(sinkMBB);
31769 checkSspMBB->addSuccessor(fallMBB);
31771 // Reload the previously saved SSP register value.
31772 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
31773 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
31774 const int64_t SPPOffset = 3 * PVT.getStoreSize();
31775 MachineInstrBuilder MIB =
31776 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
31777 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31778 const MachineOperand &MO = MI.getOperand(i);
31779 if (i == X86::AddrDisp)
31780 MIB.addDisp(MO, SPPOffset);
31781 else if (MO.isReg()) // Don't add the whole operand, we don't want to
31782 // preserve kill flags.
31783 MIB.addReg(MO.getReg());
31787 MIB.setMemRefs(MMOs);
31789 // Subtract the current SSP from the previous SSP.
31790 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
31791 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
31792 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
31793 .addReg(PrevSSPReg)
31794 .addReg(SSPCopyReg);
31796 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
31797 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
31798 fallMBB->addSuccessor(sinkMBB);
31799 fallMBB->addSuccessor(fixShadowMBB);
31801 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
31802 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
31803 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
31804 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
31805 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
31809 // Increase SSP when looking only on the lower 8 bits of the delta.
31810 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
31811 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
31813 // Reset the lower 8 bits.
31814 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
31815 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
31816 .addReg(SspFirstShrReg)
31819 // Jump if the result of the shift is zero.
31820 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
31821 fixShadowMBB->addSuccessor(sinkMBB);
31822 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
31824 // Do a single shift left.
31825 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
31826 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
31827 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
31828 .addReg(SspSecondShrReg);
31830 // Save the value 128 to a register (will be used next with incssp).
31831 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
31832 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
31833 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
31835 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
31837 // Since incssp only looks at the lower 8 bits, we might need to do several
31838 // iterations of incssp until we finish fixing the shadow stack.
31839 Register DecReg = MRI.createVirtualRegister(PtrRC);
31840 Register CounterReg = MRI.createVirtualRegister(PtrRC);
31841 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
31842 .addReg(SspAfterShlReg)
31843 .addMBB(fixShadowLoopPrepareMBB)
31845 .addMBB(fixShadowLoopMBB);
31847 // Every iteration we increase the SSP by 128.
31848 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
31850 // Every iteration we decrement the counter by 1.
31851 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
31852 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
31854 // Jump if the counter is not zero yet.
31855 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
31856 fixShadowLoopMBB->addSuccessor(sinkMBB);
31857 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
31862 MachineBasicBlock *
31863 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
31864 MachineBasicBlock *MBB) const {
31865 DebugLoc DL = MI.getDebugLoc();
31866 MachineFunction *MF = MBB->getParent();
31867 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31868 MachineRegisterInfo &MRI = MF->getRegInfo();
31870 // Memory Reference
31871 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31872 MI.memoperands_end());
31874 MVT PVT = getPointerTy(MF->getDataLayout());
31875 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
31876 "Invalid Pointer Size!");
31878 const TargetRegisterClass *RC =
31879 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
31880 Register Tmp = MRI.createVirtualRegister(RC);
31881 // Since FP is only updated here but NOT referenced, it's treated as GPR.
31882 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
31883 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
31884 Register SP = RegInfo->getStackRegister();
31886 MachineInstrBuilder MIB;
31888 const int64_t LabelOffset = 1 * PVT.getStoreSize();
31889 const int64_t SPOffset = 2 * PVT.getStoreSize();
31891 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
31892 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
31894 MachineBasicBlock *thisMBB = MBB;
31896 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
31897 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
31898 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
31902 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
31903 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31904 const MachineOperand &MO = MI.getOperand(i);
31905 if (MO.isReg()) // Don't add the whole operand, we don't want to
31906 // preserve kill flags.
31907 MIB.addReg(MO.getReg());
31911 MIB.setMemRefs(MMOs);
31914 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
31915 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31916 const MachineOperand &MO = MI.getOperand(i);
31917 if (i == X86::AddrDisp)
31918 MIB.addDisp(MO, LabelOffset);
31919 else if (MO.isReg()) // Don't add the whole operand, we don't want to
31920 // preserve kill flags.
31921 MIB.addReg(MO.getReg());
31925 MIB.setMemRefs(MMOs);
31928 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
31929 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31930 if (i == X86::AddrDisp)
31931 MIB.addDisp(MI.getOperand(i), SPOffset);
31933 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
31934 // the last instruction of the expansion.
31936 MIB.setMemRefs(MMOs);
31939 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
31941 MI.eraseFromParent();
31945 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
31946 MachineBasicBlock *MBB,
31947 MachineBasicBlock *DispatchBB,
31949 DebugLoc DL = MI.getDebugLoc();
31950 MachineFunction *MF = MBB->getParent();
31951 MachineRegisterInfo *MRI = &MF->getRegInfo();
31952 const X86InstrInfo *TII = Subtarget.getInstrInfo();
31954 MVT PVT = getPointerTy(MF->getDataLayout());
31955 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
31960 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
31961 !isPositionIndependent();
31964 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
31966 const TargetRegisterClass *TRC =
31967 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
31968 VR = MRI->createVirtualRegister(TRC);
31969 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31971 if (Subtarget.is64Bit())
31972 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
31976 .addMBB(DispatchBB)
31979 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
31980 .addReg(0) /* TII->getGlobalBaseReg(MF) */
31983 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
31987 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
31988 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
31990 MIB.addMBB(DispatchBB);
31995 MachineBasicBlock *
31996 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
31997 MachineBasicBlock *BB) const {
31998 DebugLoc DL = MI.getDebugLoc();
31999 MachineFunction *MF = BB->getParent();
32000 MachineRegisterInfo *MRI = &MF->getRegInfo();
32001 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32002 int FI = MF->getFrameInfo().getFunctionContextIndex();
32004 // Get a mapping of the call site numbers to all of the landing pads they're
32005 // associated with.
32006 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
32007 unsigned MaxCSNum = 0;
32008 for (auto &MBB : *MF) {
32009 if (!MBB.isEHPad())
32012 MCSymbol *Sym = nullptr;
32013 for (const auto &MI : MBB) {
32014 if (MI.isDebugInstr())
32017 assert(MI.isEHLabel() && "expected EH_LABEL");
32018 Sym = MI.getOperand(0).getMCSymbol();
32022 if (!MF->hasCallSiteLandingPad(Sym))
32025 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
32026 CallSiteNumToLPad[CSI].push_back(&MBB);
32027 MaxCSNum = std::max(MaxCSNum, CSI);
32031 // Get an ordered list of the machine basic blocks for the jump table.
32032 std::vector<MachineBasicBlock *> LPadList;
32033 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
32034 LPadList.reserve(CallSiteNumToLPad.size());
32036 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
32037 for (auto &LP : CallSiteNumToLPad[CSI]) {
32038 LPadList.push_back(LP);
32039 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
32043 assert(!LPadList.empty() &&
32044 "No landing pad destinations for the dispatch jump table!");
32046 // Create the MBBs for the dispatch code.
32048 // Shove the dispatch's address into the return slot in the function context.
32049 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
32050 DispatchBB->setIsEHPad(true);
32052 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
32053 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
32054 DispatchBB->addSuccessor(TrapBB);
32056 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
32057 DispatchBB->addSuccessor(DispContBB);
32060 MF->push_back(DispatchBB);
32061 MF->push_back(DispContBB);
32062 MF->push_back(TrapBB);
32064 // Insert code into the entry block that creates and registers the function
32066 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
32068 // Create the jump table and associated information
32069 unsigned JTE = getJumpTableEncoding();
32070 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
32071 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
32073 const X86RegisterInfo &RI = TII->getRegisterInfo();
32074 // Add a register mask with no preserved registers. This results in all
32075 // registers being marked as clobbered.
32076 if (RI.hasBasePointer(*MF)) {
32077 const bool FPIs64Bit =
32078 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32079 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
32080 MFI->setRestoreBasePointer(MF);
32082 Register FP = RI.getFrameRegister(*MF);
32083 Register BP = RI.getBaseRegister();
32084 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
32085 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
32086 MFI->getRestoreBasePointerOffset())
32087 .addRegMask(RI.getNoPreservedMask());
32089 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
32090 .addRegMask(RI.getNoPreservedMask());
32093 // IReg is used as an index in a memory operand and therefore can't be SP
32094 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
32095 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
32096 Subtarget.is64Bit() ? 8 : 4);
32097 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
32099 .addImm(LPadList.size());
32100 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
32102 if (Subtarget.is64Bit()) {
32103 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32104 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
32106 // leaq .LJTI0_0(%rip), BReg
32107 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
32111 .addJumpTableIndex(MJTI)
32113 // movzx IReg64, IReg
32114 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
32117 .addImm(X86::sub_32bit);
32120 case MachineJumpTableInfo::EK_BlockAddress:
32121 // jmpq *(BReg,IReg64,8)
32122 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
32129 case MachineJumpTableInfo::EK_LabelDifference32: {
32130 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
32131 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
32132 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32134 // movl (BReg,IReg64,4), OReg
32135 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
32141 // movsx OReg64, OReg
32142 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
32143 // addq BReg, OReg64, TReg
32144 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
32148 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
32152 llvm_unreachable("Unexpected jump table encoding");
32155 // jmpl *.LJTI0_0(,IReg,4)
32156 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
32160 .addJumpTableIndex(MJTI)
32164 // Add the jump table entries as successors to the MBB.
32165 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
32166 for (auto &LP : LPadList)
32167 if (SeenMBBs.insert(LP).second)
32168 DispContBB->addSuccessor(LP);
32170 // N.B. the order the invoke BBs are processed in doesn't matter here.
32171 SmallVector<MachineBasicBlock *, 64> MBBLPads;
32172 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
32173 for (MachineBasicBlock *MBB : InvokeBBs) {
32174 // Remove the landing pad successor from the invoke block and replace it
32175 // with the new dispatch block.
32176 // Keep a copy of Successors since it's modified inside the loop.
32177 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
32179 // FIXME: Avoid quadratic complexity.
32180 for (auto MBBS : Successors) {
32181 if (MBBS->isEHPad()) {
32182 MBB->removeSuccessor(MBBS);
32183 MBBLPads.push_back(MBBS);
32187 MBB->addSuccessor(DispatchBB);
32189 // Find the invoke call and mark all of the callee-saved registers as
32190 // 'implicit defined' so that they're spilled. This prevents code from
32191 // moving instructions to before the EH block, where they will never be
32193 for (auto &II : reverse(*MBB)) {
32197 DenseMap<unsigned, bool> DefRegs;
32198 for (auto &MOp : II.operands())
32200 DefRegs[MOp.getReg()] = true;
32202 MachineInstrBuilder MIB(*MF, &II);
32203 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
32204 unsigned Reg = SavedRegs[RegIdx];
32206 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
32213 // Mark all former landing pads as non-landing pads. The dispatch is the only
32214 // landing pad now.
32215 for (auto &LP : MBBLPads)
32216 LP->setIsEHPad(false);
32218 // The instruction is gone now.
32219 MI.eraseFromParent();
32223 MachineBasicBlock *
32224 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
32225 MachineBasicBlock *BB) const {
32226 MachineFunction *MF = BB->getParent();
32227 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32228 DebugLoc DL = MI.getDebugLoc();
32230 switch (MI.getOpcode()) {
32231 default: llvm_unreachable("Unexpected instr type to insert");
32232 case X86::TLS_addr32:
32233 case X86::TLS_addr64:
32234 case X86::TLS_base_addr32:
32235 case X86::TLS_base_addr64:
32236 return EmitLoweredTLSAddr(MI, BB);
32237 case X86::RETPOLINE_CALL32:
32238 case X86::RETPOLINE_CALL64:
32239 case X86::RETPOLINE_TCRETURN32:
32240 case X86::RETPOLINE_TCRETURN64:
32241 return EmitLoweredRetpoline(MI, BB);
32242 case X86::CATCHRET:
32243 return EmitLoweredCatchRet(MI, BB);
32244 case X86::CATCHPAD:
32245 return EmitLoweredCatchPad(MI, BB);
32246 case X86::SEG_ALLOCA_32:
32247 case X86::SEG_ALLOCA_64:
32248 return EmitLoweredSegAlloca(MI, BB);
32249 case X86::TLSCall_32:
32250 case X86::TLSCall_64:
32251 return EmitLoweredTLSCall(MI, BB);
32252 case X86::CMOV_FR32:
32253 case X86::CMOV_FR32X:
32254 case X86::CMOV_FR64:
32255 case X86::CMOV_FR64X:
32256 case X86::CMOV_GR8:
32257 case X86::CMOV_GR16:
32258 case X86::CMOV_GR32:
32259 case X86::CMOV_RFP32:
32260 case X86::CMOV_RFP64:
32261 case X86::CMOV_RFP80:
32262 case X86::CMOV_VR128:
32263 case X86::CMOV_VR128X:
32264 case X86::CMOV_VR256:
32265 case X86::CMOV_VR256X:
32266 case X86::CMOV_VR512:
32267 case X86::CMOV_VK2:
32268 case X86::CMOV_VK4:
32269 case X86::CMOV_VK8:
32270 case X86::CMOV_VK16:
32271 case X86::CMOV_VK32:
32272 case X86::CMOV_VK64:
32273 return EmitLoweredSelect(MI, BB);
32275 case X86::RDFLAGS32:
32276 case X86::RDFLAGS64: {
32278 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
32279 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
32280 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
32281 // Permit reads of the EFLAGS and DF registers without them being defined.
32282 // This intrinsic exists to read external processor state in flags, such as
32283 // the trap flag, interrupt flag, and direction flag, none of which are
32284 // modeled by the backend.
32285 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
32286 "Unexpected register in operand!");
32287 Push->getOperand(2).setIsUndef();
32288 assert(Push->getOperand(3).getReg() == X86::DF &&
32289 "Unexpected register in operand!");
32290 Push->getOperand(3).setIsUndef();
32291 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
32293 MI.eraseFromParent(); // The pseudo is gone now.
32297 case X86::WRFLAGS32:
32298 case X86::WRFLAGS64: {
32300 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
32302 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
32303 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
32304 BuildMI(*BB, MI, DL, TII->get(PopF));
32306 MI.eraseFromParent(); // The pseudo is gone now.
32310 case X86::FP32_TO_INT16_IN_MEM:
32311 case X86::FP32_TO_INT32_IN_MEM:
32312 case X86::FP32_TO_INT64_IN_MEM:
32313 case X86::FP64_TO_INT16_IN_MEM:
32314 case X86::FP64_TO_INT32_IN_MEM:
32315 case X86::FP64_TO_INT64_IN_MEM:
32316 case X86::FP80_TO_INT16_IN_MEM:
32317 case X86::FP80_TO_INT32_IN_MEM:
32318 case X86::FP80_TO_INT64_IN_MEM: {
32319 // Change the floating point control register to use "round towards zero"
32320 // mode when truncating to an integer value.
32321 int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
32322 addFrameReference(BuildMI(*BB, MI, DL,
32323 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
32325 // Load the old value of the control word...
32326 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
32327 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
32330 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
32331 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
32332 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
32333 .addReg(OldCW, RegState::Kill).addImm(0xC00);
32335 // Extract to 16 bits.
32337 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
32338 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
32339 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
32341 // Prepare memory for FLDCW.
32342 int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
32343 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
32345 .addReg(NewCW16, RegState::Kill);
32347 // Reload the modified control word now...
32348 addFrameReference(BuildMI(*BB, MI, DL,
32349 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
32351 // Get the X86 opcode to use.
32353 switch (MI.getOpcode()) {
32354 default: llvm_unreachable("illegal opcode!");
32355 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
32356 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
32357 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
32358 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
32359 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
32360 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
32361 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
32362 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
32363 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
32366 X86AddressMode AM = getAddressFromInstr(&MI, 0);
32367 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
32368 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
32370 // Reload the original control word now.
32371 addFrameReference(BuildMI(*BB, MI, DL,
32372 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
32374 MI.eraseFromParent(); // The pseudo instruction is gone now.
32380 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
32382 case X86::VASTART_SAVE_XMM_REGS:
32383 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
32385 case X86::VAARG_64:
32386 return EmitVAARG64WithCustomInserter(MI, BB);
32388 case X86::EH_SjLj_SetJmp32:
32389 case X86::EH_SjLj_SetJmp64:
32390 return emitEHSjLjSetJmp(MI, BB);
32392 case X86::EH_SjLj_LongJmp32:
32393 case X86::EH_SjLj_LongJmp64:
32394 return emitEHSjLjLongJmp(MI, BB);
32396 case X86::Int_eh_sjlj_setup_dispatch:
32397 return EmitSjLjDispatchBlock(MI, BB);
32399 case TargetOpcode::STATEPOINT:
32400 // As an implementation detail, STATEPOINT shares the STACKMAP format at
32401 // this point in the process. We diverge later.
32402 return emitPatchPoint(MI, BB);
32404 case TargetOpcode::STACKMAP:
32405 case TargetOpcode::PATCHPOINT:
32406 return emitPatchPoint(MI, BB);
32408 case TargetOpcode::PATCHABLE_EVENT_CALL:
32409 return emitXRayCustomEvent(MI, BB);
32411 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
32412 return emitXRayTypedEvent(MI, BB);
32414 case X86::LCMPXCHG8B: {
32415 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
32416 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
32417 // requires a memory operand. If it happens that current architecture is
32418 // i686 and for current function we need a base pointer
32419 // - which is ESI for i686 - register allocator would not be able to
32420 // allocate registers for an address in form of X(%reg, %reg, Y)
32421 // - there never would be enough unreserved registers during regalloc
32422 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
32423 // We are giving a hand to register allocator by precomputing the address in
32424 // a new vreg using LEA.
32426 // If it is not i686 or there is no base pointer - nothing to do here.
32427 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
32430 // Even though this code does not necessarily needs the base pointer to
32431 // be ESI, we check for that. The reason: if this assert fails, there are
32432 // some changes happened in the compiler base pointer handling, which most
32433 // probably have to be addressed somehow here.
32434 assert(TRI->getBaseRegister() == X86::ESI &&
32435 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
32436 "base pointer in mind");
32438 MachineRegisterInfo &MRI = MF->getRegInfo();
32439 MVT SPTy = getPointerTy(MF->getDataLayout());
32440 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
32441 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
32443 X86AddressMode AM = getAddressFromInstr(&MI, 0);
32444 // Regalloc does not need any help when the memory operand of CMPXCHG8B
32445 // does not use index register.
32446 if (AM.IndexReg == X86::NoRegister)
32449 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
32450 // four operand definitions that are E[ABCD] registers. We skip them and
32451 // then insert the LEA.
32452 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
32453 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
32454 RMBBI->definesRegister(X86::EBX) ||
32455 RMBBI->definesRegister(X86::ECX) ||
32456 RMBBI->definesRegister(X86::EDX))) {
32459 MachineBasicBlock::iterator MBBI(RMBBI);
32461 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
32463 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
32467 case X86::LCMPXCHG16B:
32469 case X86::LCMPXCHG8B_SAVE_EBX:
32470 case X86::LCMPXCHG16B_SAVE_RBX: {
32472 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
32473 if (!BB->isLiveIn(BasePtr))
32474 BB->addLiveIn(BasePtr);
32480 //===----------------------------------------------------------------------===//
32481 // X86 Optimization Hooks
32482 //===----------------------------------------------------------------------===//
32485 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
32486 const APInt &Demanded,
32487 TargetLoweringOpt &TLO) const {
32488 // Only optimize Ands to prevent shrinking a constant that could be
32489 // matched by movzx.
32490 if (Op.getOpcode() != ISD::AND)
32493 EVT VT = Op.getValueType();
32499 unsigned Size = VT.getSizeInBits();
32501 // Make sure the RHS really is a constant.
32502 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
32506 const APInt &Mask = C->getAPIntValue();
32508 // Clear all non-demanded bits initially.
32509 APInt ShrunkMask = Mask & Demanded;
32511 // Find the width of the shrunk mask.
32512 unsigned Width = ShrunkMask.getActiveBits();
32514 // If the mask is all 0s there's nothing to do here.
32518 // Find the next power of 2 width, rounding up to a byte.
32519 Width = PowerOf2Ceil(std::max(Width, 8U));
32520 // Truncate the width to size to handle illegal types.
32521 Width = std::min(Width, Size);
32523 // Calculate a possible zero extend mask for this constant.
32524 APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
32526 // If we aren't changing the mask, just return true to keep it and prevent
32527 // the caller from optimizing.
32528 if (ZeroExtendMask == Mask)
32531 // Make sure the new mask can be represented by a combination of mask bits
32532 // and non-demanded bits.
32533 if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
32536 // Replace the constant with the zero extend mask.
32538 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
32539 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
32540 return TLO.CombineTo(Op, NewOp);
32543 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
32545 const APInt &DemandedElts,
32546 const SelectionDAG &DAG,
32547 unsigned Depth) const {
32548 unsigned BitWidth = Known.getBitWidth();
32549 unsigned Opc = Op.getOpcode();
32550 EVT VT = Op.getValueType();
32551 assert((Opc >= ISD::BUILTIN_OP_END ||
32552 Opc == ISD::INTRINSIC_WO_CHAIN ||
32553 Opc == ISD::INTRINSIC_W_CHAIN ||
32554 Opc == ISD::INTRINSIC_VOID) &&
32555 "Should use MaskedValueIsZero if you don't know whether Op"
32556 " is a target node!");
32561 case X86ISD::SETCC:
32562 Known.Zero.setBitsFrom(1);
32564 case X86ISD::MOVMSK: {
32565 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
32566 Known.Zero.setBitsFrom(NumLoBits);
32569 case X86ISD::PEXTRB:
32570 case X86ISD::PEXTRW: {
32571 SDValue Src = Op.getOperand(0);
32572 EVT SrcVT = Src.getValueType();
32573 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
32574 Op.getConstantOperandVal(1));
32575 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
32576 Known = Known.zextOrTrunc(BitWidth, false);
32577 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
32580 case X86ISD::VSRAI:
32581 case X86ISD::VSHLI:
32582 case X86ISD::VSRLI: {
32583 unsigned ShAmt = Op.getConstantOperandVal(1);
32584 if (ShAmt >= VT.getScalarSizeInBits()) {
32585 Known.setAllZero();
32589 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32590 if (Opc == X86ISD::VSHLI) {
32591 Known.Zero <<= ShAmt;
32592 Known.One <<= ShAmt;
32593 // Low bits are known zero.
32594 Known.Zero.setLowBits(ShAmt);
32595 } else if (Opc == X86ISD::VSRLI) {
32596 Known.Zero.lshrInPlace(ShAmt);
32597 Known.One.lshrInPlace(ShAmt);
32598 // High bits are known zero.
32599 Known.Zero.setHighBits(ShAmt);
32601 Known.Zero.ashrInPlace(ShAmt);
32602 Known.One.ashrInPlace(ShAmt);
32606 case X86ISD::PACKUS: {
32607 // PACKUS is just a truncation if the upper half is zero.
32608 APInt DemandedLHS, DemandedRHS;
32609 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
32611 Known.One = APInt::getAllOnesValue(BitWidth * 2);
32612 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
32615 if (!!DemandedLHS) {
32616 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
32617 Known.One &= Known2.One;
32618 Known.Zero &= Known2.Zero;
32620 if (!!DemandedRHS) {
32621 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
32622 Known.One &= Known2.One;
32623 Known.Zero &= Known2.Zero;
32626 if (Known.countMinLeadingZeros() < BitWidth)
32628 Known = Known.trunc(BitWidth);
32631 case X86ISD::ANDNP: {
32633 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
32634 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32636 // ANDNP = (~X & Y);
32637 Known.One &= Known2.Zero;
32638 Known.Zero |= Known2.One;
32641 case X86ISD::FOR: {
32643 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
32644 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32646 // Output known-0 bits are only known if clear in both the LHS & RHS.
32647 Known.Zero &= Known2.Zero;
32648 // Output known-1 are known to be set if set in either the LHS | RHS.
32649 Known.One |= Known2.One;
32652 case X86ISD::PSADBW: {
32653 assert(VT.getScalarType() == MVT::i64 &&
32654 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
32655 "Unexpected PSADBW types");
32657 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
32658 Known.Zero.setBitsFrom(16);
32661 case X86ISD::CMOV: {
32662 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
32663 // If we don't know any bits, early out.
32664 if (Known.isUnknown())
32666 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
32668 // Only known if known in both the LHS and RHS.
32669 Known.One &= Known2.One;
32670 Known.Zero &= Known2.Zero;
32675 // Handle target shuffles.
32676 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
32677 if (isTargetShuffle(Opc)) {
32679 SmallVector<int, 64> Mask;
32680 SmallVector<SDValue, 2> Ops;
32681 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
32683 unsigned NumOps = Ops.size();
32684 unsigned NumElts = VT.getVectorNumElements();
32685 if (Mask.size() == NumElts) {
32686 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
32687 Known.Zero.setAllBits(); Known.One.setAllBits();
32688 for (unsigned i = 0; i != NumElts; ++i) {
32689 if (!DemandedElts[i])
32692 if (M == SM_SentinelUndef) {
32693 // For UNDEF elements, we don't know anything about the common state
32694 // of the shuffle result.
32697 } else if (M == SM_SentinelZero) {
32698 Known.One.clearAllBits();
32701 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
32702 "Shuffle index out of range");
32704 unsigned OpIdx = (unsigned)M / NumElts;
32705 unsigned EltIdx = (unsigned)M % NumElts;
32706 if (Ops[OpIdx].getValueType() != VT) {
32707 // TODO - handle target shuffle ops with different value types.
32711 DemandedOps[OpIdx].setBit(EltIdx);
32713 // Known bits are the values that are shared by every demanded element.
32714 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
32715 if (!DemandedOps[i])
32718 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
32719 Known.One &= Known2.One;
32720 Known.Zero &= Known2.Zero;
32727 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
32728 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
32729 unsigned Depth) const {
32730 EVT VT = Op.getValueType();
32731 unsigned VTBits = VT.getScalarSizeInBits();
32732 unsigned Opcode = Op.getOpcode();
32734 case X86ISD::SETCC_CARRY:
32735 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
32738 case X86ISD::VTRUNC: {
32739 // TODO: Add DemandedElts support.
32740 SDValue Src = Op.getOperand(0);
32741 unsigned NumSrcBits = Src.getScalarValueSizeInBits();
32742 assert(VTBits < NumSrcBits && "Illegal truncation input type");
32743 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
32744 if (Tmp > (NumSrcBits - VTBits))
32745 return Tmp - (NumSrcBits - VTBits);
32749 case X86ISD::PACKSS: {
32750 // PACKSS is just a truncation if the sign bits extend to the packed size.
32751 APInt DemandedLHS, DemandedRHS;
32752 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
32755 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
32756 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
32758 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
32760 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
32761 unsigned Tmp = std::min(Tmp0, Tmp1);
32762 if (Tmp > (SrcBits - VTBits))
32763 return Tmp - (SrcBits - VTBits);
32767 case X86ISD::VSHLI: {
32768 SDValue Src = Op.getOperand(0);
32769 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
32770 if (ShiftVal.uge(VTBits))
32771 return VTBits; // Shifted all bits out --> zero.
32772 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
32773 if (ShiftVal.uge(Tmp))
32774 return 1; // Shifted all sign bits out --> unknown.
32775 return Tmp - ShiftVal.getZExtValue();
32778 case X86ISD::VSRAI: {
32779 SDValue Src = Op.getOperand(0);
32780 APInt ShiftVal = Op.getConstantOperandAPInt(1);
32781 if (ShiftVal.uge(VTBits - 1))
32782 return VTBits; // Sign splat.
32783 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
32785 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
32788 case X86ISD::PCMPGT:
32789 case X86ISD::PCMPEQ:
32791 case X86ISD::VPCOM:
32792 case X86ISD::VPCOMU:
32793 // Vector compares return zero/all-bits result values.
32796 case X86ISD::ANDNP: {
32798 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
32799 if (Tmp0 == 1) return 1; // Early out.
32801 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
32802 return std::min(Tmp0, Tmp1);
32805 case X86ISD::CMOV: {
32806 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
32807 if (Tmp0 == 1) return 1; // Early out.
32808 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
32809 return std::min(Tmp0, Tmp1);
32813 // Handle target shuffles.
32814 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
32815 if (isTargetShuffle(Opcode)) {
32817 SmallVector<int, 64> Mask;
32818 SmallVector<SDValue, 2> Ops;
32819 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
32821 unsigned NumOps = Ops.size();
32822 unsigned NumElts = VT.getVectorNumElements();
32823 if (Mask.size() == NumElts) {
32824 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
32825 for (unsigned i = 0; i != NumElts; ++i) {
32826 if (!DemandedElts[i])
32829 if (M == SM_SentinelUndef) {
32830 // For UNDEF elements, we don't know anything about the common state
32831 // of the shuffle result.
32833 } else if (M == SM_SentinelZero) {
32834 // Zero = all sign bits.
32837 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
32838 "Shuffle index out of range");
32840 unsigned OpIdx = (unsigned)M / NumElts;
32841 unsigned EltIdx = (unsigned)M % NumElts;
32842 if (Ops[OpIdx].getValueType() != VT) {
32843 // TODO - handle target shuffle ops with different value types.
32846 DemandedOps[OpIdx].setBit(EltIdx);
32848 unsigned Tmp0 = VTBits;
32849 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
32850 if (!DemandedOps[i])
32853 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
32854 Tmp0 = std::min(Tmp0, Tmp1);
32865 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
32866 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
32867 return N->getOperand(0);
32871 // Attempt to match a combined shuffle mask against supported unary shuffle
32873 // TODO: Investigate sharing more of this with shuffle lowering.
32874 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
32875 bool AllowFloatDomain, bool AllowIntDomain,
32876 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
32877 const X86Subtarget &Subtarget, unsigned &Shuffle,
32878 MVT &SrcVT, MVT &DstVT) {
32879 unsigned NumMaskElts = Mask.size();
32880 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
32882 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
32883 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
32884 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
32885 Shuffle = X86ISD::VZEXT_MOVL;
32886 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
32890 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
32891 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
32892 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
32893 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
32894 unsigned MaxScale = 64 / MaskEltSize;
32895 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
32896 bool MatchAny = true;
32897 bool MatchZero = true;
32898 unsigned NumDstElts = NumMaskElts / Scale;
32899 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
32900 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
32901 MatchAny = MatchZero = false;
32904 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
32905 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
32907 if (MatchAny || MatchZero) {
32908 assert(MatchZero && "Failed to match zext but matched aext?");
32909 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
32910 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
32911 MVT::getIntegerVT(MaskEltSize);
32912 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
32914 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
32915 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
32917 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
32918 if (SrcVT.getVectorNumElements() != NumDstElts)
32919 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
32921 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
32922 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
32928 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
32929 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
32930 isUndefOrEqual(Mask[0], 0) &&
32931 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
32932 Shuffle = X86ISD::VZEXT_MOVL;
32933 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
32937 // Check if we have SSE3 which will let us use MOVDDUP etc. The
32938 // instructions are no slower than UNPCKLPD but has the option to
32939 // fold the input operand into even an unaligned memory load.
32940 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
32941 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
32942 Shuffle = X86ISD::MOVDDUP;
32943 SrcVT = DstVT = MVT::v2f64;
32946 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
32947 Shuffle = X86ISD::MOVSLDUP;
32948 SrcVT = DstVT = MVT::v4f32;
32951 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
32952 Shuffle = X86ISD::MOVSHDUP;
32953 SrcVT = DstVT = MVT::v4f32;
32958 if (MaskVT.is256BitVector() && AllowFloatDomain) {
32959 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
32960 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
32961 Shuffle = X86ISD::MOVDDUP;
32962 SrcVT = DstVT = MVT::v4f64;
32965 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
32966 Shuffle = X86ISD::MOVSLDUP;
32967 SrcVT = DstVT = MVT::v8f32;
32970 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
32971 Shuffle = X86ISD::MOVSHDUP;
32972 SrcVT = DstVT = MVT::v8f32;
32977 if (MaskVT.is512BitVector() && AllowFloatDomain) {
32978 assert(Subtarget.hasAVX512() &&
32979 "AVX512 required for 512-bit vector shuffles");
32980 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
32981 Shuffle = X86ISD::MOVDDUP;
32982 SrcVT = DstVT = MVT::v8f64;
32985 if (isTargetShuffleEquivalent(
32986 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
32987 Shuffle = X86ISD::MOVSLDUP;
32988 SrcVT = DstVT = MVT::v16f32;
32991 if (isTargetShuffleEquivalent(
32992 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
32993 Shuffle = X86ISD::MOVSHDUP;
32994 SrcVT = DstVT = MVT::v16f32;
33002 // Attempt to match a combined shuffle mask against supported unary immediate
33003 // permute instructions.
33004 // TODO: Investigate sharing more of this with shuffle lowering.
33005 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
33006 const APInt &Zeroable,
33007 bool AllowFloatDomain, bool AllowIntDomain,
33008 const X86Subtarget &Subtarget,
33009 unsigned &Shuffle, MVT &ShuffleVT,
33010 unsigned &PermuteImm) {
33011 unsigned NumMaskElts = Mask.size();
33012 unsigned InputSizeInBits = MaskVT.getSizeInBits();
33013 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
33014 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
33016 bool ContainsZeros =
33017 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
33019 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
33020 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
33021 // Check for lane crossing permutes.
33022 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
33023 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
33024 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
33025 Shuffle = X86ISD::VPERMI;
33026 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
33027 PermuteImm = getV4X86ShuffleImm(Mask);
33030 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
33031 SmallVector<int, 4> RepeatedMask;
33032 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
33033 Shuffle = X86ISD::VPERMI;
33034 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
33035 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
33039 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
33040 // VPERMILPD can permute with a non-repeating shuffle.
33041 Shuffle = X86ISD::VPERMILPI;
33042 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
33044 for (int i = 0, e = Mask.size(); i != e; ++i) {
33046 if (M == SM_SentinelUndef)
33048 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
33049 PermuteImm |= (M & 1) << i;
33055 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
33056 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
33057 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
33058 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
33059 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
33060 SmallVector<int, 4> RepeatedMask;
33061 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
33062 // Narrow the repeated mask to create 32-bit element permutes.
33063 SmallVector<int, 4> WordMask = RepeatedMask;
33064 if (MaskScalarSizeInBits == 64)
33065 scaleShuffleMask<int>(2, RepeatedMask, WordMask);
33067 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
33068 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
33069 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
33070 PermuteImm = getV4X86ShuffleImm(WordMask);
33075 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
33076 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
33077 SmallVector<int, 4> RepeatedMask;
33078 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
33079 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
33080 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
33082 // PSHUFLW: permute lower 4 elements only.
33083 if (isUndefOrInRange(LoMask, 0, 4) &&
33084 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
33085 Shuffle = X86ISD::PSHUFLW;
33086 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
33087 PermuteImm = getV4X86ShuffleImm(LoMask);
33091 // PSHUFHW: permute upper 4 elements only.
33092 if (isUndefOrInRange(HiMask, 4, 8) &&
33093 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
33094 // Offset the HiMask so that we can create the shuffle immediate.
33095 int OffsetHiMask[4];
33096 for (int i = 0; i != 4; ++i)
33097 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
33099 Shuffle = X86ISD::PSHUFHW;
33100 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
33101 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
33107 // Attempt to match against byte/bit shifts.
33108 // FIXME: Add 512-bit support.
33109 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33110 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
33111 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
33112 Mask, 0, Zeroable, Subtarget);
33113 if (0 < ShiftAmt) {
33114 PermuteImm = (unsigned)ShiftAmt;
33122 // Attempt to match a combined unary shuffle mask against supported binary
33123 // shuffle instructions.
33124 // TODO: Investigate sharing more of this with shuffle lowering.
33125 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
33126 bool AllowFloatDomain, bool AllowIntDomain,
33127 SDValue &V1, SDValue &V2, const SDLoc &DL,
33128 SelectionDAG &DAG, const X86Subtarget &Subtarget,
33129 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
33131 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
33133 if (MaskVT.is128BitVector()) {
33134 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
33136 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
33137 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
33138 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
33141 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
33143 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
33144 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
33147 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
33148 (AllowFloatDomain || !Subtarget.hasSSE41())) {
33150 Shuffle = X86ISD::MOVSD;
33151 SrcVT = DstVT = MVT::v2f64;
33154 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
33155 (AllowFloatDomain || !Subtarget.hasSSE41())) {
33156 Shuffle = X86ISD::MOVSS;
33157 SrcVT = DstVT = MVT::v4f32;
33162 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
33163 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
33164 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
33165 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
33166 if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
33173 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
33174 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
33175 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33176 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
33177 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
33178 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
33179 if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
33181 SrcVT = DstVT = MaskVT;
33182 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
33183 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
33191 static bool matchBinaryPermuteShuffle(
33192 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
33193 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
33194 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
33195 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
33196 unsigned NumMaskElts = Mask.size();
33197 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
33199 // Attempt to match against PALIGNR byte rotate.
33200 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
33201 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
33202 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
33203 if (0 < ByteRotation) {
33204 Shuffle = X86ISD::PALIGNR;
33205 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
33206 PermuteImm = ByteRotation;
33211 // Attempt to combine to X86ISD::BLENDI.
33212 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
33213 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
33214 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
33215 uint64_t BlendMask = 0;
33216 bool ForceV1Zero = false, ForceV2Zero = false;
33217 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
33218 if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
33219 ForceV2Zero, BlendMask)) {
33220 if (MaskVT == MVT::v16i16) {
33221 // We can only use v16i16 PBLENDW if the lanes are repeated.
33222 SmallVector<int, 8> RepeatedMask;
33223 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
33225 assert(RepeatedMask.size() == 8 &&
33226 "Repeated mask size doesn't match!");
33228 for (int i = 0; i < 8; ++i)
33229 if (RepeatedMask[i] >= 8)
33230 PermuteImm |= 1 << i;
33231 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33232 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33233 Shuffle = X86ISD::BLENDI;
33234 ShuffleVT = MaskVT;
33238 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33239 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33240 PermuteImm = (unsigned)BlendMask;
33241 Shuffle = X86ISD::BLENDI;
33242 ShuffleVT = MaskVT;
33248 // Attempt to combine to INSERTPS, but only if it has elements that need to
33250 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
33251 MaskVT.is128BitVector() &&
33252 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }) &&
33253 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
33254 Shuffle = X86ISD::INSERTPS;
33255 ShuffleVT = MVT::v4f32;
33259 // Attempt to combine to SHUFPD.
33260 if (AllowFloatDomain && EltSizeInBits == 64 &&
33261 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33262 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
33263 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
33264 bool ForceV1Zero = false, ForceV2Zero = false;
33265 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
33266 PermuteImm, Mask, Zeroable)) {
33267 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33268 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33269 Shuffle = X86ISD::SHUFP;
33270 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
33275 // Attempt to combine to SHUFPS.
33276 if (AllowFloatDomain && EltSizeInBits == 32 &&
33277 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
33278 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
33279 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
33280 SmallVector<int, 4> RepeatedMask;
33281 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
33282 // Match each half of the repeated mask, to determine if its just
33283 // referencing one of the vectors, is zeroable or entirely undef.
33284 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
33285 int M0 = RepeatedMask[Offset];
33286 int M1 = RepeatedMask[Offset + 1];
33288 if (isUndefInRange(RepeatedMask, Offset, 2)) {
33289 return DAG.getUNDEF(MaskVT);
33290 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
33291 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
33292 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
33293 return getZeroVector(MaskVT, Subtarget, DAG, DL);
33294 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
33295 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
33296 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
33298 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
33299 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
33300 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
33307 int ShufMask[4] = {-1, -1, -1, -1};
33308 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
33309 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
33314 Shuffle = X86ISD::SHUFP;
33315 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
33316 PermuteImm = getV4X86ShuffleImm(ShufMask);
33322 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
33323 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
33324 MaskVT.is128BitVector() &&
33325 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
33326 Shuffle = X86ISD::INSERTPS;
33327 ShuffleVT = MVT::v4f32;
33334 static SDValue combineX86ShuffleChainWithExtract(
33335 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
33336 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33337 const X86Subtarget &Subtarget);
33339 /// Combine an arbitrary chain of shuffles into a single instruction if
33342 /// This is the leaf of the recursive combine below. When we have found some
33343 /// chain of single-use x86 shuffle instructions and accumulated the combined
33344 /// shuffle mask represented by them, this will try to pattern match that mask
33345 /// into either a single instruction if there is a special purpose instruction
33346 /// for this operation, or into a PSHUFB instruction which is a fully general
33347 /// instruction but should only be used to replace chains over a certain depth.
33348 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
33349 ArrayRef<int> BaseMask, int Depth,
33350 bool HasVariableMask,
33351 bool AllowVariableMask, SelectionDAG &DAG,
33352 const X86Subtarget &Subtarget) {
33353 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
33354 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
33355 "Unexpected number of shuffle inputs!");
33357 // Find the inputs that enter the chain. Note that multiple uses are OK
33358 // here, we're not going to remove the operands we find.
33359 bool UnaryShuffle = (Inputs.size() == 1);
33360 SDValue V1 = peekThroughBitcasts(Inputs[0]);
33361 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
33362 : peekThroughBitcasts(Inputs[1]));
33364 MVT VT1 = V1.getSimpleValueType();
33365 MVT VT2 = V2.getSimpleValueType();
33366 MVT RootVT = Root.getSimpleValueType();
33367 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
33368 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
33369 "Vector size mismatch");
33374 unsigned NumBaseMaskElts = BaseMask.size();
33375 if (NumBaseMaskElts == 1) {
33376 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
33377 return DAG.getBitcast(RootVT, V1);
33380 unsigned RootSizeInBits = RootVT.getSizeInBits();
33381 unsigned NumRootElts = RootVT.getVectorNumElements();
33382 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
33383 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
33384 (RootVT.isFloatingPoint() && Depth >= 1) ||
33385 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
33387 // Don't combine if we are a AVX512/EVEX target and the mask element size
33388 // is different from the root element size - this would prevent writemasks
33389 // from being reused.
33390 // TODO - this currently prevents all lane shuffles from occurring.
33391 // TODO - check for writemasks usage instead of always preventing combining.
33392 // TODO - attempt to narrow Mask back to writemask size.
33393 bool IsEVEXShuffle =
33394 RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
33396 // Attempt to match a subvector broadcast.
33397 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
33398 if (UnaryShuffle &&
33399 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
33400 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
33401 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
33402 SDValue Src = Inputs[0];
33403 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
33404 Src.getOperand(0).isUndef() &&
33405 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
33406 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
33407 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
33408 Src.getValueType(),
33409 Src.getOperand(1)));
33414 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
33416 // Handle 128-bit lane shuffles of 256-bit vectors.
33417 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
33418 // we need to use the zeroing feature.
33419 // TODO - this should support binary shuffles.
33420 if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
33421 !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
33422 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
33423 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
33424 return SDValue(); // Nothing to do!
33425 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
33426 unsigned PermMask = 0;
33427 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
33428 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
33430 Res = DAG.getBitcast(ShuffleVT, V1);
33431 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
33432 DAG.getUNDEF(ShuffleVT),
33433 DAG.getTargetConstant(PermMask, DL, MVT::i8));
33434 return DAG.getBitcast(RootVT, Res);
33437 // For masks that have been widened to 128-bit elements or more,
33438 // narrow back down to 64-bit elements.
33439 SmallVector<int, 64> Mask;
33440 if (BaseMaskEltSizeInBits > 64) {
33441 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
33442 int MaskScale = BaseMaskEltSizeInBits / 64;
33443 scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
33445 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
33448 unsigned NumMaskElts = Mask.size();
33449 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
33451 // Determine the effective mask value type.
33452 FloatDomain &= (32 <= MaskEltSizeInBits);
33453 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
33454 : MVT::getIntegerVT(MaskEltSizeInBits);
33455 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
33457 // Only allow legal mask types.
33458 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
33461 // Attempt to match the mask against known shuffle patterns.
33462 MVT ShuffleSrcVT, ShuffleVT;
33463 unsigned Shuffle, PermuteImm;
33465 // Which shuffle domains are permitted?
33466 // Permit domain crossing at higher combine depths.
33467 // TODO: Should we indicate which domain is preferred if both are allowed?
33468 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
33469 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
33470 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
33472 // Determine zeroable mask elements.
33473 APInt KnownUndef, KnownZero;
33474 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
33475 APInt Zeroable = KnownUndef | KnownZero;
33477 if (UnaryShuffle) {
33478 // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
33479 // directly if we don't shuffle the lower element and we shuffle the upper
33480 // (zero) elements within themselves.
33481 if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
33482 (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
33483 MaskEltSizeInBits) == 0) {
33485 cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
33487 ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
33488 if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
33489 isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
33490 return DAG.getBitcast(RootVT, V1);
33494 // Attempt to match against broadcast-from-vector.
33495 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
33496 if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
33497 && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
33498 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
33499 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
33500 if (V1.getValueType() == MaskVT &&
33501 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
33502 MayFoldLoad(V1.getOperand(0))) {
33503 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
33504 return SDValue(); // Nothing to do!
33505 Res = V1.getOperand(0);
33506 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
33507 return DAG.getBitcast(RootVT, Res);
33509 if (Subtarget.hasAVX2()) {
33510 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
33511 return SDValue(); // Nothing to do!
33512 Res = DAG.getBitcast(MaskVT, V1);
33513 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
33514 return DAG.getBitcast(RootVT, Res);
33519 SDValue NewV1 = V1; // Save operand in case early exit happens.
33520 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
33521 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
33523 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33524 if (Depth == 0 && Root.getOpcode() == Shuffle)
33525 return SDValue(); // Nothing to do!
33526 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
33527 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
33528 return DAG.getBitcast(RootVT, Res);
33531 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
33532 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
33534 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33535 if (Depth == 0 && Root.getOpcode() == Shuffle)
33536 return SDValue(); // Nothing to do!
33537 Res = DAG.getBitcast(ShuffleVT, V1);
33538 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
33539 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
33540 return DAG.getBitcast(RootVT, Res);
33544 SDValue NewV1 = V1; // Save operands in case early exit happens.
33545 SDValue NewV2 = V2;
33546 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
33547 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
33548 ShuffleVT, UnaryShuffle) &&
33549 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33550 if (Depth == 0 && Root.getOpcode() == Shuffle)
33551 return SDValue(); // Nothing to do!
33552 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
33553 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
33554 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
33555 return DAG.getBitcast(RootVT, Res);
33558 NewV1 = V1; // Save operands in case early exit happens.
33560 if (matchBinaryPermuteShuffle(
33561 MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
33562 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
33563 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33564 if (Depth == 0 && Root.getOpcode() == Shuffle)
33565 return SDValue(); // Nothing to do!
33566 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
33567 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
33568 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
33569 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
33570 return DAG.getBitcast(RootVT, Res);
33573 // Typically from here on, we need an integer version of MaskVT.
33574 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
33575 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
33577 // Annoyingly, SSE4A instructions don't map into the above match helpers.
33578 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
33579 uint64_t BitLen, BitIdx;
33580 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
33582 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
33583 return SDValue(); // Nothing to do!
33584 V1 = DAG.getBitcast(IntMaskVT, V1);
33585 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
33586 DAG.getTargetConstant(BitLen, DL, MVT::i8),
33587 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
33588 return DAG.getBitcast(RootVT, Res);
33591 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
33592 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
33593 return SDValue(); // Nothing to do!
33594 V1 = DAG.getBitcast(IntMaskVT, V1);
33595 V2 = DAG.getBitcast(IntMaskVT, V2);
33596 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
33597 DAG.getTargetConstant(BitLen, DL, MVT::i8),
33598 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
33599 return DAG.getBitcast(RootVT, Res);
33603 // Don't try to re-form single instruction chains under any circumstances now
33604 // that we've done encoding canonicalization for them.
33608 // Depth threshold above which we can efficiently use variable mask shuffles.
33609 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
33610 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
33612 bool MaskContainsZeros =
33613 any_of(Mask, [](int M) { return M == SM_SentinelZero; });
33615 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
33616 // If we have a single input lane-crossing shuffle then lower to VPERMV.
33617 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33618 ((Subtarget.hasAVX2() &&
33619 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33620 (Subtarget.hasAVX512() &&
33621 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33622 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33623 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33624 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33625 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33626 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33627 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33628 Res = DAG.getBitcast(MaskVT, V1);
33629 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
33630 return DAG.getBitcast(RootVT, Res);
33633 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
33634 // vector as the second source.
33635 if (UnaryShuffle && AllowVariableMask &&
33636 ((Subtarget.hasAVX512() &&
33637 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33638 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33639 (Subtarget.hasVLX() &&
33640 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
33641 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33642 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33643 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33644 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33645 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33646 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
33647 for (unsigned i = 0; i != NumMaskElts; ++i)
33648 if (Mask[i] == SM_SentinelZero)
33649 Mask[i] = NumMaskElts + i;
33651 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33652 Res = DAG.getBitcast(MaskVT, V1);
33653 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
33654 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
33655 return DAG.getBitcast(RootVT, Res);
33658 // If that failed and either input is extracted then try to combine as a
33659 // shuffle with the larger type.
33660 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
33661 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
33663 return WideShuffle;
33665 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
33666 if (AllowVariableMask && !MaskContainsZeros &&
33667 ((Subtarget.hasAVX512() &&
33668 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33669 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33670 (Subtarget.hasVLX() &&
33671 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
33672 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33673 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33674 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33675 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33676 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33677 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33678 V1 = DAG.getBitcast(MaskVT, V1);
33679 V2 = DAG.getBitcast(MaskVT, V2);
33680 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
33681 return DAG.getBitcast(RootVT, Res);
33686 // See if we can combine a single input shuffle with zeros to a bit-mask,
33687 // which is much simpler than any shuffle.
33688 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
33689 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
33690 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
33691 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
33692 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
33693 APInt UndefElts(NumMaskElts, 0);
33694 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
33695 for (unsigned i = 0; i != NumMaskElts; ++i) {
33697 if (M == SM_SentinelUndef) {
33698 UndefElts.setBit(i);
33701 if (M == SM_SentinelZero)
33703 EltBits[i] = AllOnes;
33705 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
33706 Res = DAG.getBitcast(MaskVT, V1);
33707 unsigned AndOpcode =
33708 FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
33709 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
33710 return DAG.getBitcast(RootVT, Res);
33713 // If we have a single input shuffle with different shuffle patterns in the
33714 // the 128-bit lanes use the variable mask to VPERMILPS.
33715 // TODO Combine other mask types at higher depths.
33716 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33717 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
33718 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
33719 SmallVector<SDValue, 16> VPermIdx;
33720 for (int M : Mask) {
33722 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
33723 VPermIdx.push_back(Idx);
33725 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
33726 Res = DAG.getBitcast(MaskVT, V1);
33727 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
33728 return DAG.getBitcast(RootVT, Res);
33731 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
33732 // to VPERMIL2PD/VPERMIL2PS.
33733 if (AllowVariableMask && Subtarget.hasXOP() &&
33734 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
33735 MaskVT == MVT::v8f32)) {
33736 // VPERMIL2 Operation.
33737 // Bits[3] - Match Bit.
33738 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
33739 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
33740 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
33741 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
33742 SmallVector<int, 8> VPerm2Idx;
33743 unsigned M2ZImm = 0;
33744 for (int M : Mask) {
33745 if (M == SM_SentinelUndef) {
33746 VPerm2Idx.push_back(-1);
33749 if (M == SM_SentinelZero) {
33751 VPerm2Idx.push_back(8);
33754 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
33755 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
33756 VPerm2Idx.push_back(Index);
33758 V1 = DAG.getBitcast(MaskVT, V1);
33759 V2 = DAG.getBitcast(MaskVT, V2);
33760 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
33761 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
33762 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
33763 return DAG.getBitcast(RootVT, Res);
33766 // If we have 3 or more shuffle instructions or a chain involving a variable
33767 // mask, we can replace them with a single PSHUFB instruction profitably.
33768 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
33769 // instructions, but in practice PSHUFB tends to be *very* fast so we're
33770 // more aggressive.
33771 if (UnaryShuffle && AllowVariableMask &&
33772 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
33773 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
33774 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
33775 SmallVector<SDValue, 16> PSHUFBMask;
33776 int NumBytes = RootVT.getSizeInBits() / 8;
33777 int Ratio = NumBytes / NumMaskElts;
33778 for (int i = 0; i < NumBytes; ++i) {
33779 int M = Mask[i / Ratio];
33780 if (M == SM_SentinelUndef) {
33781 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
33784 if (M == SM_SentinelZero) {
33785 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
33788 M = Ratio * M + i % Ratio;
33789 assert((M / 16) == (i / 16) && "Lane crossing detected");
33790 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
33792 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
33793 Res = DAG.getBitcast(ByteVT, V1);
33794 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
33795 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
33796 return DAG.getBitcast(RootVT, Res);
33799 // With XOP, if we have a 128-bit binary input shuffle we can always combine
33800 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
33801 // slower than PSHUFB on targets that support both.
33802 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
33803 // VPPERM Mask Operation
33804 // Bits[4:0] - Byte Index (0 - 31)
33805 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
33806 SmallVector<SDValue, 16> VPPERMMask;
33808 int Ratio = NumBytes / NumMaskElts;
33809 for (int i = 0; i < NumBytes; ++i) {
33810 int M = Mask[i / Ratio];
33811 if (M == SM_SentinelUndef) {
33812 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
33815 if (M == SM_SentinelZero) {
33816 VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
33819 M = Ratio * M + i % Ratio;
33820 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
33822 MVT ByteVT = MVT::v16i8;
33823 V1 = DAG.getBitcast(ByteVT, V1);
33824 V2 = DAG.getBitcast(ByteVT, V2);
33825 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
33826 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
33827 return DAG.getBitcast(RootVT, Res);
33830 // If that failed and either input is extracted then try to combine as a
33831 // shuffle with the larger type.
33832 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
33833 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
33835 return WideShuffle;
33837 // If we have a dual input shuffle then lower to VPERMV3.
33838 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33839 ((Subtarget.hasAVX512() &&
33840 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33841 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33842 (Subtarget.hasVLX() &&
33843 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
33844 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
33845 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33846 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33847 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
33848 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
33849 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33850 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
33851 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
33852 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33853 V1 = DAG.getBitcast(MaskVT, V1);
33854 V2 = DAG.getBitcast(MaskVT, V2);
33855 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
33856 return DAG.getBitcast(RootVT, Res);
33859 // Failed to find any combines.
33863 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
33864 // instruction if possible.
33866 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
33867 // type size to attempt to combine:
33868 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
33870 // extract_subvector(shuffle(x,y,m2),0)
33871 static SDValue combineX86ShuffleChainWithExtract(
33872 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
33873 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33874 const X86Subtarget &Subtarget) {
33875 unsigned NumMaskElts = BaseMask.size();
33876 unsigned NumInputs = Inputs.size();
33877 if (NumInputs == 0)
33880 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
33881 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
33883 // Peek through subvectors.
33884 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
33885 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
33886 for (unsigned i = 0; i != NumInputs; ++i) {
33887 SDValue &Src = WideInputs[i];
33888 unsigned &Offset = Offsets[i];
33889 Src = peekThroughBitcasts(Src);
33890 EVT BaseVT = Src.getValueType();
33891 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
33892 isa<ConstantSDNode>(Src.getOperand(1))) {
33893 Offset += Src.getConstantOperandVal(1);
33894 Src = Src.getOperand(0);
33896 WideSizeInBits = std::max(WideSizeInBits,
33897 (unsigned)Src.getValueSizeInBits());
33898 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
33899 "Unexpected subvector extraction");
33900 Offset /= BaseVT.getVectorNumElements();
33901 Offset *= NumMaskElts;
33904 // Bail if we're always extracting from the lowest subvectors,
33905 // combineX86ShuffleChain should match this for the current width.
33906 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
33909 EVT RootVT = Root.getValueType();
33910 unsigned RootSizeInBits = RootVT.getSizeInBits();
33911 unsigned Scale = WideSizeInBits / RootSizeInBits;
33912 assert((WideSizeInBits % RootSizeInBits) == 0 &&
33913 "Unexpected subvector extraction");
33915 // If the src vector types aren't the same, see if we can extend
33916 // them to match each other.
33917 // TODO: Support different scalar types?
33918 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
33919 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
33920 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
33921 Op.getValueType().getScalarType() != WideSVT;
33925 for (SDValue &NewInput : WideInputs) {
33926 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
33927 "Shuffle vector size mismatch");
33928 if (WideSizeInBits > NewInput.getValueSizeInBits())
33929 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
33930 SDLoc(NewInput), WideSizeInBits);
33931 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
33932 "Unexpected subvector extraction");
33935 // Create new mask for larger type.
33936 for (unsigned i = 1; i != NumInputs; ++i)
33937 Offsets[i] += i * Scale * NumMaskElts;
33939 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
33940 for (int &M : WideMask) {
33943 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
33945 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
33947 // Remove unused/repeated shuffle source ops.
33948 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
33949 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
33951 if (WideInputs.size() > 2)
33954 // Increase depth for every upper subvector we've peeked through.
33955 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
33957 // Attempt to combine wider chain.
33958 // TODO: Can we use a better Root?
33959 SDValue WideRoot = WideInputs[0];
33960 if (SDValue WideShuffle = combineX86ShuffleChain(
33961 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
33962 AllowVariableMask, DAG, Subtarget)) {
33964 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
33965 return DAG.getBitcast(RootVT, WideShuffle);
33970 // Attempt to constant fold all of the constant source ops.
33971 // Returns true if the entire shuffle is folded to a constant.
33972 // TODO: Extend this to merge multiple constant Ops and update the mask.
33973 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
33974 ArrayRef<int> Mask, SDValue Root,
33975 bool HasVariableMask,
33977 const X86Subtarget &Subtarget) {
33978 MVT VT = Root.getSimpleValueType();
33980 unsigned SizeInBits = VT.getSizeInBits();
33981 unsigned NumMaskElts = Mask.size();
33982 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
33983 unsigned NumOps = Ops.size();
33985 // Extract constant bits from each source op.
33986 bool OneUseConstantOp = false;
33987 SmallVector<APInt, 16> UndefEltsOps(NumOps);
33988 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
33989 for (unsigned i = 0; i != NumOps; ++i) {
33990 SDValue SrcOp = Ops[i];
33991 OneUseConstantOp |= SrcOp.hasOneUse();
33992 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
33997 // Only fold if at least one of the constants is only used once or
33998 // the combined shuffle has included a variable mask shuffle, this
33999 // is to avoid constant pool bloat.
34000 if (!OneUseConstantOp && !HasVariableMask)
34003 // Shuffle the constant bits according to the mask.
34005 APInt UndefElts(NumMaskElts, 0);
34006 APInt ZeroElts(NumMaskElts, 0);
34007 APInt ConstantElts(NumMaskElts, 0);
34008 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
34009 APInt::getNullValue(MaskSizeInBits));
34010 for (unsigned i = 0; i != NumMaskElts; ++i) {
34012 if (M == SM_SentinelUndef) {
34013 UndefElts.setBit(i);
34015 } else if (M == SM_SentinelZero) {
34016 ZeroElts.setBit(i);
34019 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
34021 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
34022 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
34024 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
34025 if (SrcUndefElts[SrcMaskIdx]) {
34026 UndefElts.setBit(i);
34030 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
34031 APInt &Bits = SrcEltBits[SrcMaskIdx];
34033 ZeroElts.setBit(i);
34037 ConstantElts.setBit(i);
34038 ConstantBitData[i] = Bits;
34040 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
34042 // Attempt to create a zero vector.
34043 if ((UndefElts | ZeroElts).isAllOnesValue())
34044 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
34046 // Create the constant data.
34048 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
34049 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
34051 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
34053 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
34054 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
34057 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
34058 return DAG.getBitcast(VT, CstOp);
34061 /// Fully generic combining of x86 shuffle instructions.
34063 /// This should be the last combine run over the x86 shuffle instructions. Once
34064 /// they have been fully optimized, this will recursively consider all chains
34065 /// of single-use shuffle instructions, build a generic model of the cumulative
34066 /// shuffle operation, and check for simpler instructions which implement this
34067 /// operation. We use this primarily for two purposes:
34069 /// 1) Collapse generic shuffles to specialized single instructions when
34070 /// equivalent. In most cases, this is just an encoding size win, but
34071 /// sometimes we will collapse multiple generic shuffles into a single
34072 /// special-purpose shuffle.
34073 /// 2) Look for sequences of shuffle instructions with 3 or more total
34074 /// instructions, and replace them with the slightly more expensive SSSE3
34075 /// PSHUFB instruction if available. We do this as the last combining step
34076 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
34077 /// a suitable short sequence of other instructions. The PSHUFB will either
34078 /// use a register or have to read from memory and so is slightly (but only
34079 /// slightly) more expensive than the other shuffle instructions.
34081 /// Because this is inherently a quadratic operation (for each shuffle in
34082 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
34083 /// This should never be an issue in practice as the shuffle lowering doesn't
34084 /// produce sequences of more than 8 instructions.
34086 /// FIXME: We will currently miss some cases where the redundant shuffling
34087 /// would simplify under the threshold for PSHUFB formation because of
34088 /// combine-ordering. To fix this, we should do the redundant instruction
34089 /// combining in this recursive walk.
34090 static SDValue combineX86ShufflesRecursively(
34091 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
34092 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
34093 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
34094 const X86Subtarget &Subtarget) {
34095 assert(RootMask.size() > 0 &&
34096 (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
34097 "Illegal shuffle root mask");
34099 // Bound the depth of our recursive combine because this is ultimately
34100 // quadratic in nature.
34101 const unsigned MaxRecursionDepth = 8;
34102 if (Depth >= MaxRecursionDepth)
34105 // Directly rip through bitcasts to find the underlying operand.
34106 SDValue Op = SrcOps[SrcOpIndex];
34107 Op = peekThroughOneUseBitcasts(Op);
34109 MVT VT = Op.getSimpleValueType();
34110 if (!VT.isVector())
34111 return SDValue(); // Bail if we hit a non-vector.
34113 assert(Root.getSimpleValueType().isVector() &&
34114 "Shuffles operate on vector types!");
34115 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
34116 "Can only combine shuffles of the same vector register size.");
34118 // Extract target shuffle mask and resolve sentinels and inputs.
34119 // TODO - determine Op's demanded elts from RootMask.
34120 SmallVector<int, 64> OpMask;
34121 SmallVector<SDValue, 2> OpInputs;
34122 APInt OpUndef, OpZero;
34123 APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
34124 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
34125 if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
34126 OpZero, DAG, Depth, false))
34129 SmallVector<int, 64> Mask;
34130 SmallVector<SDValue, 16> Ops;
34132 // We don't need to merge masks if the root is empty.
34133 bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
34135 // Only resolve zeros if it will remove an input, otherwise we might end
34136 // up in an infinite loop.
34137 bool ResolveKnownZeros = true;
34138 if (!OpZero.isNullValue()) {
34139 APInt UsedInputs = APInt::getNullValue(OpInputs.size());
34140 for (int i = 0, e = OpMask.size(); i != e; ++i) {
34142 if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
34144 UsedInputs.setBit(M / OpMask.size());
34145 if (UsedInputs.isAllOnesValue()) {
34146 ResolveKnownZeros = false;
34151 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
34152 ResolveKnownZeros);
34155 Ops.append(OpInputs.begin(), OpInputs.end());
34157 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
34159 // Add the inputs to the Ops list, avoiding duplicates.
34160 Ops.append(SrcOps.begin(), SrcOps.end());
34162 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
34163 // Attempt to find an existing match.
34164 SDValue InputBC = peekThroughBitcasts(Input);
34165 for (int i = 0, e = Ops.size(); i < e; ++i)
34166 if (InputBC == peekThroughBitcasts(Ops[i]))
34168 // Match failed - should we replace an existing Op?
34169 if (InsertionPoint >= 0) {
34170 Ops[InsertionPoint] = Input;
34171 return InsertionPoint;
34173 // Add to the end of the Ops list.
34174 Ops.push_back(Input);
34175 return Ops.size() - 1;
34178 SmallVector<int, 2> OpInputIdx;
34179 for (SDValue OpInput : OpInputs)
34180 OpInputIdx.push_back(
34181 AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
34183 assert(((RootMask.size() > OpMask.size() &&
34184 RootMask.size() % OpMask.size() == 0) ||
34185 (OpMask.size() > RootMask.size() &&
34186 OpMask.size() % RootMask.size() == 0) ||
34187 OpMask.size() == RootMask.size()) &&
34188 "The smaller number of elements must divide the larger.");
34190 // This function can be performance-critical, so we rely on the power-of-2
34191 // knowledge that we have about the mask sizes to replace div/rem ops with
34192 // bit-masks and shifts.
34193 assert(isPowerOf2_32(RootMask.size()) &&
34194 "Non-power-of-2 shuffle mask sizes");
34195 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
34196 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
34197 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
34199 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
34200 unsigned RootRatio =
34201 std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
34202 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
34203 assert((RootRatio == 1 || OpRatio == 1) &&
34204 "Must not have a ratio for both incoming and op masks!");
34206 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
34207 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
34208 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
34209 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
34210 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
34212 Mask.resize(MaskWidth, SM_SentinelUndef);
34214 // Merge this shuffle operation's mask into our accumulated mask. Note that
34215 // this shuffle's mask will be the first applied to the input, followed by
34216 // the root mask to get us all the way to the root value arrangement. The
34217 // reason for this order is that we are recursing up the operation chain.
34218 for (unsigned i = 0; i < MaskWidth; ++i) {
34219 unsigned RootIdx = i >> RootRatioLog2;
34220 if (RootMask[RootIdx] < 0) {
34221 // This is a zero or undef lane, we're done.
34222 Mask[i] = RootMask[RootIdx];
34226 unsigned RootMaskedIdx =
34228 ? RootMask[RootIdx]
34229 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
34231 // Just insert the scaled root mask value if it references an input other
34232 // than the SrcOp we're currently inserting.
34233 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
34234 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
34235 Mask[i] = RootMaskedIdx;
34239 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
34240 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
34241 if (OpMask[OpIdx] < 0) {
34242 // The incoming lanes are zero or undef, it doesn't matter which ones we
34244 Mask[i] = OpMask[OpIdx];
34248 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
34249 unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
34250 : (OpMask[OpIdx] << OpRatioLog2) +
34251 (RootMaskedIdx & (OpRatio - 1));
34253 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
34254 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
34255 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
34256 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
34258 Mask[i] = OpMaskedIdx;
34262 // Remove unused/repeated shuffle source ops.
34263 resolveTargetShuffleInputsAndMask(Ops, Mask);
34265 // Handle the all undef/zero cases early.
34266 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
34267 return DAG.getUNDEF(Root.getValueType());
34269 // TODO - should we handle the mixed zero/undef case as well? Just returning
34270 // a zero mask will lose information on undef elements possibly reducing
34271 // future combine possibilities.
34272 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
34273 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
34276 assert(!Ops.empty() && "Shuffle with no inputs detected");
34277 HasVariableMask |= IsOpVariableMask;
34279 // Update the list of shuffle nodes that have been combined so far.
34280 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
34282 CombinedNodes.push_back(Op.getNode());
34284 // See if we can recurse into each shuffle source op (if it's a target
34285 // shuffle). The source op should only be generally combined if it either has
34286 // a single use (i.e. current Op) or all its users have already been combined,
34287 // if not then we can still combine but should prevent generation of variable
34288 // shuffles to avoid constant pool bloat.
34289 // Don't recurse if we already have more source ops than we can combine in
34290 // the remaining recursion depth.
34291 if (Ops.size() < (MaxRecursionDepth - Depth)) {
34292 for (int i = 0, e = Ops.size(); i < e; ++i) {
34293 // For empty roots, we need to resolve zeroable elements before combining
34294 // them with other shuffles.
34295 SmallVector<int, 64> ResolvedMask = Mask;
34297 resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
34298 bool AllowVar = false;
34299 if (Ops[i].getNode()->hasOneUse() ||
34300 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
34301 AllowVar = AllowVariableMask;
34302 if (SDValue Res = combineX86ShufflesRecursively(
34303 Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1,
34304 HasVariableMask, AllowVar, DAG, Subtarget))
34309 // Attempt to constant fold all of the constant source ops.
34310 if (SDValue Cst = combineX86ShufflesConstants(
34311 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
34314 // We can only combine unary and binary shuffle mask cases.
34315 if (Ops.size() <= 2) {
34316 // Minor canonicalization of the accumulated shuffle mask to make it easier
34317 // to match below. All this does is detect masks with sequential pairs of
34318 // elements, and shrink them to the half-width mask. It does this in a loop
34319 // so it will reduce the size of the mask to the minimal width mask which
34320 // performs an equivalent shuffle.
34321 SmallVector<int, 64> WidenedMask;
34322 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
34323 Mask = std::move(WidenedMask);
34326 // Canonicalization of binary shuffle masks to improve pattern matching by
34327 // commuting the inputs.
34328 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
34329 ShuffleVectorSDNode::commuteMask(Mask);
34330 std::swap(Ops[0], Ops[1]);
34333 // Finally, try to combine into a single shuffle instruction.
34334 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
34335 AllowVariableMask, DAG, Subtarget);
34338 // If that failed and any input is extracted then try to combine as a
34339 // shuffle with the larger type.
34340 return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
34341 HasVariableMask, AllowVariableMask,
34345 /// Helper entry wrapper to combineX86ShufflesRecursively.
34346 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
34347 const X86Subtarget &Subtarget) {
34348 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
34349 /*HasVarMask*/ false,
34350 /*AllowVarMask*/ true, DAG, Subtarget);
34353 /// Get the PSHUF-style mask from PSHUF node.
34355 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
34356 /// PSHUF-style masks that can be reused with such instructions.
34357 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
34358 MVT VT = N.getSimpleValueType();
34359 SmallVector<int, 4> Mask;
34360 SmallVector<SDValue, 2> Ops;
34363 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
34367 // If we have more than 128-bits, only the low 128-bits of shuffle mask
34368 // matter. Check that the upper masks are repeats and remove them.
34369 if (VT.getSizeInBits() > 128) {
34370 int LaneElts = 128 / VT.getScalarSizeInBits();
34372 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
34373 for (int j = 0; j < LaneElts; ++j)
34374 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
34375 "Mask doesn't repeat in high 128-bit lanes!");
34377 Mask.resize(LaneElts);
34380 switch (N.getOpcode()) {
34381 case X86ISD::PSHUFD:
34383 case X86ISD::PSHUFLW:
34386 case X86ISD::PSHUFHW:
34387 Mask.erase(Mask.begin(), Mask.begin() + 4);
34388 for (int &M : Mask)
34392 llvm_unreachable("No valid shuffle instruction found!");
34396 /// Search for a combinable shuffle across a chain ending in pshufd.
34398 /// We walk up the chain and look for a combinable shuffle, skipping over
34399 /// shuffles that we could hoist this shuffle's transformation past without
34400 /// altering anything.
34402 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
34403 SelectionDAG &DAG) {
34404 assert(N.getOpcode() == X86ISD::PSHUFD &&
34405 "Called with something other than an x86 128-bit half shuffle!");
34408 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
34409 // of the shuffles in the chain so that we can form a fresh chain to replace
34411 SmallVector<SDValue, 8> Chain;
34412 SDValue V = N.getOperand(0);
34413 for (; V.hasOneUse(); V = V.getOperand(0)) {
34414 switch (V.getOpcode()) {
34416 return SDValue(); // Nothing combined!
34419 // Skip bitcasts as we always know the type for the target specific
34423 case X86ISD::PSHUFD:
34424 // Found another dword shuffle.
34427 case X86ISD::PSHUFLW:
34428 // Check that the low words (being shuffled) are the identity in the
34429 // dword shuffle, and the high words are self-contained.
34430 if (Mask[0] != 0 || Mask[1] != 1 ||
34431 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
34434 Chain.push_back(V);
34437 case X86ISD::PSHUFHW:
34438 // Check that the high words (being shuffled) are the identity in the
34439 // dword shuffle, and the low words are self-contained.
34440 if (Mask[2] != 2 || Mask[3] != 3 ||
34441 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
34444 Chain.push_back(V);
34447 case X86ISD::UNPCKL:
34448 case X86ISD::UNPCKH:
34449 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
34450 // shuffle into a preceding word shuffle.
34451 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
34452 V.getSimpleValueType().getVectorElementType() != MVT::i16)
34455 // Search for a half-shuffle which we can combine with.
34456 unsigned CombineOp =
34457 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
34458 if (V.getOperand(0) != V.getOperand(1) ||
34459 !V->isOnlyUserOf(V.getOperand(0).getNode()))
34461 Chain.push_back(V);
34462 V = V.getOperand(0);
34464 switch (V.getOpcode()) {
34466 return SDValue(); // Nothing to combine.
34468 case X86ISD::PSHUFLW:
34469 case X86ISD::PSHUFHW:
34470 if (V.getOpcode() == CombineOp)
34473 Chain.push_back(V);
34477 V = V.getOperand(0);
34481 } while (V.hasOneUse());
34484 // Break out of the loop if we break out of the switch.
34488 if (!V.hasOneUse())
34489 // We fell out of the loop without finding a viable combining instruction.
34492 // Merge this node's mask and our incoming mask.
34493 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
34494 for (int &M : Mask)
34496 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
34497 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
34499 // Rebuild the chain around this new shuffle.
34500 while (!Chain.empty()) {
34501 SDValue W = Chain.pop_back_val();
34503 if (V.getValueType() != W.getOperand(0).getValueType())
34504 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
34506 switch (W.getOpcode()) {
34508 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
34510 case X86ISD::UNPCKL:
34511 case X86ISD::UNPCKH:
34512 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
34515 case X86ISD::PSHUFD:
34516 case X86ISD::PSHUFLW:
34517 case X86ISD::PSHUFHW:
34518 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
34522 if (V.getValueType() != N.getValueType())
34523 V = DAG.getBitcast(N.getValueType(), V);
34525 // Return the new chain to replace N.
34529 /// Try to combine x86 target specific shuffles.
34530 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
34531 TargetLowering::DAGCombinerInfo &DCI,
34532 const X86Subtarget &Subtarget) {
34534 MVT VT = N.getSimpleValueType();
34535 SmallVector<int, 4> Mask;
34536 unsigned Opcode = N.getOpcode();
34538 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
34539 // single instruction.
34540 if (VT.getScalarSizeInBits() == 64 &&
34541 (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
34542 Opcode == X86ISD::UNPCKL)) {
34543 auto BC0 = peekThroughBitcasts(N.getOperand(0));
34544 auto BC1 = peekThroughBitcasts(N.getOperand(1));
34545 EVT VT0 = BC0.getValueType();
34546 EVT VT1 = BC1.getValueType();
34547 unsigned Opcode0 = BC0.getOpcode();
34548 unsigned Opcode1 = BC1.getOpcode();
34549 if (Opcode0 == Opcode1 && VT0 == VT1 &&
34550 (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
34551 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
34552 Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
34554 if (Opcode == X86ISD::MOVSD) {
34555 Lo = BC1.getOperand(0);
34556 Hi = BC0.getOperand(1);
34558 Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
34559 Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
34561 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
34562 return DAG.getBitcast(VT, Horiz);
34567 case X86ISD::VBROADCAST: {
34568 SDValue Src = N.getOperand(0);
34569 SDValue BC = peekThroughBitcasts(Src);
34570 EVT SrcVT = Src.getValueType();
34571 EVT BCVT = BC.getValueType();
34573 // If broadcasting from another shuffle, attempt to simplify it.
34574 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
34575 if (isTargetShuffle(BC.getOpcode()) &&
34576 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
34577 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
34578 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
34580 for (unsigned i = 0; i != Scale; ++i)
34581 DemandedMask[i] = i;
34582 if (SDValue Res = combineX86ShufflesRecursively(
34583 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
34584 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
34585 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
34586 DAG.getBitcast(SrcVT, Res));
34589 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
34590 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
34591 if (Src.getOpcode() == ISD::BITCAST &&
34592 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
34593 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
34594 VT.getVectorNumElements());
34595 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
34598 // Reduce broadcast source vector to lowest 128-bits.
34599 if (SrcVT.getSizeInBits() > 128)
34600 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
34601 extract128BitVector(Src, 0, DAG, DL));
34603 // broadcast(scalar_to_vector(x)) -> broadcast(x).
34604 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
34605 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
34607 // Share broadcast with the longest vector and extract low subvector (free).
34608 for (SDNode *User : Src->uses())
34609 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
34610 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
34611 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
34612 VT.getSizeInBits());
34615 // vbroadcast(scalarload X) -> vbroadcast_load X
34616 // For float loads, extract other uses of the scalar from the broadcast.
34617 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
34618 ISD::isNormalLoad(Src.getNode())) {
34619 LoadSDNode *LN = cast<LoadSDNode>(Src);
34620 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
34621 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
34623 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
34624 LN->getMemoryVT(), LN->getMemOperand());
34625 // If the load value is used only by N, replace it via CombineTo N.
34626 bool NoReplaceExtract = Src.hasOneUse();
34627 DCI.CombineTo(N.getNode(), BcastLd);
34628 if (NoReplaceExtract) {
34629 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
34630 DCI.recursivelyDeleteUnusedNodes(LN);
34632 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
34633 DAG.getIntPtrConstant(0, DL));
34634 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
34636 return N; // Return N so it doesn't get rechecked!
34641 case X86ISD::BLENDI: {
34642 SDValue N0 = N.getOperand(0);
34643 SDValue N1 = N.getOperand(1);
34645 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
34646 // TODO: Handle MVT::v16i16 repeated blend mask.
34647 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
34648 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
34649 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
34650 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
34651 SrcVT.getScalarSizeInBits() >= 32) {
34652 unsigned BlendMask = N.getConstantOperandVal(2);
34653 unsigned Size = VT.getVectorNumElements();
34654 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
34655 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
34656 return DAG.getBitcast(
34657 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
34659 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
34664 case X86ISD::VPERMI: {
34665 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
34666 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
34667 SDValue N0 = N.getOperand(0);
34668 SDValue N1 = N.getOperand(1);
34669 unsigned EltSizeInBits = VT.getScalarSizeInBits();
34670 if (N0.getOpcode() == ISD::BITCAST &&
34671 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
34672 SDValue Src = N0.getOperand(0);
34673 EVT SrcVT = Src.getValueType();
34674 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
34675 return DAG.getBitcast(VT, Res);
34679 case X86ISD::PSHUFD:
34680 case X86ISD::PSHUFLW:
34681 case X86ISD::PSHUFHW:
34682 Mask = getPSHUFShuffleMask(N);
34683 assert(Mask.size() == 4);
34685 case X86ISD::MOVSD:
34686 case X86ISD::MOVSS: {
34687 SDValue N0 = N.getOperand(0);
34688 SDValue N1 = N.getOperand(1);
34690 // Canonicalize scalar FPOps:
34691 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
34692 // If commutable, allow OP(N1[0], N0[0]).
34693 unsigned Opcode1 = N1.getOpcode();
34694 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
34695 Opcode1 == ISD::FDIV) {
34696 SDValue N10 = N1.getOperand(0);
34697 SDValue N11 = N1.getOperand(1);
34699 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
34701 std::swap(N10, N11);
34702 MVT SVT = VT.getVectorElementType();
34703 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
34704 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
34705 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
34706 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
34707 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
34708 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
34714 case X86ISD::INSERTPS: {
34715 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
34716 SDValue Op0 = N.getOperand(0);
34717 SDValue Op1 = N.getOperand(1);
34718 SDValue Op2 = N.getOperand(2);
34719 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
34720 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
34721 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
34722 unsigned ZeroMask = InsertPSMask & 0xF;
34724 // If we zero out all elements from Op0 then we don't need to reference it.
34725 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
34726 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
34727 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34729 // If we zero out the element from Op1 then we don't need to reference it.
34730 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
34731 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
34732 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34734 // Attempt to merge insertps Op1 with an inner target shuffle node.
34735 SmallVector<int, 8> TargetMask1;
34736 SmallVector<SDValue, 2> Ops1;
34737 APInt KnownUndef1, KnownZero1;
34738 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
34740 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
34741 // Zero/UNDEF insertion - zero out element and remove dependency.
34742 InsertPSMask |= (1u << DstIdx);
34743 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
34744 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34746 // Update insertps mask srcidx and reference the source input directly.
34747 int M = TargetMask1[SrcIdx];
34748 assert(0 <= M && M < 8 && "Shuffle index out of range");
34749 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
34750 Op1 = Ops1[M < 4 ? 0 : 1];
34751 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
34752 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34755 // Attempt to merge insertps Op0 with an inner target shuffle node.
34756 SmallVector<int, 8> TargetMask0;
34757 SmallVector<SDValue, 2> Ops0;
34758 APInt KnownUndef0, KnownZero0;
34759 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
34761 bool Updated = false;
34762 bool UseInput00 = false;
34763 bool UseInput01 = false;
34764 for (int i = 0; i != 4; ++i) {
34765 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
34766 // No change if element is already zero or the inserted element.
34768 } else if (KnownUndef0[i] || KnownZero0[i]) {
34769 // If the target mask is undef/zero then we must zero the element.
34770 InsertPSMask |= (1u << i);
34775 // The input vector element must be inline.
34776 int M = TargetMask0[i];
34777 if (M != i && M != (i + 4))
34780 // Determine which inputs of the target shuffle we're using.
34781 UseInput00 |= (0 <= M && M < 4);
34782 UseInput01 |= (4 <= M);
34785 // If we're not using both inputs of the target shuffle then use the
34786 // referenced input directly.
34787 if (UseInput00 && !UseInput01) {
34790 } else if (!UseInput00 && UseInput01) {
34796 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
34797 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34800 // If we're inserting an element from a vbroadcast load, fold the
34801 // load into the X86insertps instruction. We need to convert the scalar
34802 // load to a vector and clear the source lane of the INSERTPS control.
34803 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
34804 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
34805 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
34806 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
34807 MemIntr->getBasePtr(),
34808 MemIntr->getMemOperand());
34809 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
34810 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
34812 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
34813 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
34824 // Nuke no-op shuffles that show up after combining.
34825 if (isNoopShuffleMask(Mask))
34826 return N.getOperand(0);
34828 // Look for simplifications involving one or two shuffle instructions.
34829 SDValue V = N.getOperand(0);
34830 switch (N.getOpcode()) {
34833 case X86ISD::PSHUFLW:
34834 case X86ISD::PSHUFHW:
34835 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
34837 // See if this reduces to a PSHUFD which is no more expensive and can
34838 // combine with more operations. Note that it has to at least flip the
34839 // dwords as otherwise it would have been removed as a no-op.
34840 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
34841 int DMask[] = {0, 1, 2, 3};
34842 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
34843 DMask[DOffset + 0] = DOffset + 1;
34844 DMask[DOffset + 1] = DOffset + 0;
34845 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
34846 V = DAG.getBitcast(DVT, V);
34847 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
34848 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
34849 return DAG.getBitcast(VT, V);
34852 // Look for shuffle patterns which can be implemented as a single unpack.
34853 // FIXME: This doesn't handle the location of the PSHUFD generically, and
34854 // only works when we have a PSHUFD followed by two half-shuffles.
34855 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
34856 (V.getOpcode() == X86ISD::PSHUFLW ||
34857 V.getOpcode() == X86ISD::PSHUFHW) &&
34858 V.getOpcode() != N.getOpcode() &&
34860 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
34861 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
34862 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
34863 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
34864 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
34865 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
34867 for (int i = 0; i < 4; ++i) {
34868 WordMask[i + NOffset] = Mask[i] + NOffset;
34869 WordMask[i + VOffset] = VMask[i] + VOffset;
34871 // Map the word mask through the DWord mask.
34873 for (int i = 0; i < 8; ++i)
34874 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
34875 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
34876 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
34877 // We can replace all three shuffles with an unpack.
34878 V = DAG.getBitcast(VT, D.getOperand(0));
34879 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
34888 case X86ISD::PSHUFD:
34889 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
34898 /// Checks if the shuffle mask takes subsequent elements
34899 /// alternately from two vectors.
34900 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
34901 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
34903 int ParitySrc[2] = {-1, -1};
34904 unsigned Size = Mask.size();
34905 for (unsigned i = 0; i != Size; ++i) {
34910 // Make sure we are using the matching element from the input.
34911 if ((M % Size) != i)
34914 // Make sure we use the same input for all elements of the same parity.
34915 int Src = M / Size;
34916 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
34918 ParitySrc[i % 2] = Src;
34921 // Make sure each input is used.
34922 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
34925 Op0Even = ParitySrc[0] == 0;
34929 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
34930 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
34931 /// are written to the parameters \p Opnd0 and \p Opnd1.
34933 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
34934 /// so it is easier to generically match. We also insert dummy vector shuffle
34935 /// nodes for the operands which explicitly discard the lanes which are unused
34936 /// by this operation to try to flow through the rest of the combiner
34937 /// the fact that they're unused.
34938 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
34939 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
34942 EVT VT = N->getValueType(0);
34943 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34944 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
34945 !VT.getSimpleVT().isFloatingPoint())
34948 // We only handle target-independent shuffles.
34949 // FIXME: It would be easy and harmless to use the target shuffle mask
34950 // extraction tool to support more.
34951 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
34954 SDValue V1 = N->getOperand(0);
34955 SDValue V2 = N->getOperand(1);
34957 // Make sure we have an FADD and an FSUB.
34958 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
34959 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
34960 V1.getOpcode() == V2.getOpcode())
34963 // If there are other uses of these operations we can't fold them.
34964 if (!V1->hasOneUse() || !V2->hasOneUse())
34967 // Ensure that both operations have the same operands. Note that we can
34968 // commute the FADD operands.
34970 if (V1.getOpcode() == ISD::FSUB) {
34971 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
34972 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
34973 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
34976 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
34977 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
34978 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
34979 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
34983 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
34985 if (!isAddSubOrSubAddMask(Mask, Op0Even))
34988 // It's a subadd if the vector in the even parity is an FADD.
34989 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
34990 : V2->getOpcode() == ISD::FADD;
34997 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
34998 static SDValue combineShuffleToFMAddSub(SDNode *N,
34999 const X86Subtarget &Subtarget,
35000 SelectionDAG &DAG) {
35001 // We only handle target-independent shuffles.
35002 // FIXME: It would be easy and harmless to use the target shuffle mask
35003 // extraction tool to support more.
35004 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
35007 MVT VT = N->getSimpleValueType(0);
35008 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35009 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
35012 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
35013 SDValue Op0 = N->getOperand(0);
35014 SDValue Op1 = N->getOperand(1);
35015 SDValue FMAdd = Op0, FMSub = Op1;
35016 if (FMSub.getOpcode() != X86ISD::FMSUB)
35017 std::swap(FMAdd, FMSub);
35019 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
35020 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
35021 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
35022 FMAdd.getOperand(2) != FMSub.getOperand(2))
35025 // Check for correct shuffle mask.
35026 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
35028 if (!isAddSubOrSubAddMask(Mask, Op0Even))
35031 // FMAddSub takes zeroth operand from FMSub node.
35033 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
35034 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
35035 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
35036 FMAdd.getOperand(2));
35039 /// Try to combine a shuffle into a target-specific add-sub or
35040 /// mul-add-sub node.
35041 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
35042 const X86Subtarget &Subtarget,
35043 SelectionDAG &DAG) {
35044 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
35047 SDValue Opnd0, Opnd1;
35049 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
35052 MVT VT = N->getSimpleValueType(0);
35055 // Try to generate X86ISD::FMADDSUB node here.
35057 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
35058 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
35059 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
35065 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
35066 // the ADDSUB idiom has been successfully recognized. There are no known
35067 // X86 targets with 512-bit ADDSUB instructions!
35068 if (VT.is512BitVector())
35071 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
35074 // We are looking for a shuffle where both sources are concatenated with undef
35075 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
35076 // if we can express this as a single-source shuffle, that's preferable.
35077 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
35078 const X86Subtarget &Subtarget) {
35079 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
35082 EVT VT = N->getValueType(0);
35084 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
35085 if (!VT.is128BitVector() && !VT.is256BitVector())
35088 if (VT.getVectorElementType() != MVT::i32 &&
35089 VT.getVectorElementType() != MVT::i64 &&
35090 VT.getVectorElementType() != MVT::f32 &&
35091 VT.getVectorElementType() != MVT::f64)
35094 SDValue N0 = N->getOperand(0);
35095 SDValue N1 = N->getOperand(1);
35097 // Check that both sources are concats with undef.
35098 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
35099 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
35100 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
35101 !N1.getOperand(1).isUndef())
35104 // Construct the new shuffle mask. Elements from the first source retain their
35105 // index, but elements from the second source no longer need to skip an undef.
35106 SmallVector<int, 8> Mask;
35107 int NumElts = VT.getVectorNumElements();
35109 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
35110 for (int Elt : SVOp->getMask())
35111 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
35114 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
35116 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
35119 /// Eliminate a redundant shuffle of a horizontal math op.
35120 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
35121 unsigned Opcode = N->getOpcode();
35122 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
35123 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
35126 // For a broadcast, peek through an extract element of index 0 to find the
35127 // horizontal op: broadcast (ext_vec_elt HOp, 0)
35128 EVT VT = N->getValueType(0);
35129 if (Opcode == X86ISD::VBROADCAST) {
35130 SDValue SrcOp = N->getOperand(0);
35131 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
35132 SrcOp.getValueType() == MVT::f64 &&
35133 SrcOp.getOperand(0).getValueType() == VT &&
35134 isNullConstant(SrcOp.getOperand(1)))
35135 N = SrcOp.getNode();
35138 SDValue HOp = N->getOperand(0);
35139 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
35140 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
35143 // 128-bit horizontal math instructions are defined to operate on adjacent
35144 // lanes of each operand as:
35145 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
35146 // ...similarly for v2f64 and v8i16.
35147 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
35148 HOp.getOperand(0) != HOp.getOperand(1))
35151 // The shuffle that we are eliminating may have allowed the horizontal op to
35152 // have an undemanded (undefined) operand. Duplicate the other (defined)
35153 // operand to ensure that the results are defined across all lanes without the
35155 auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
35157 if (HorizOp.getOperand(0).isUndef()) {
35158 assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
35159 X = HorizOp.getOperand(1);
35160 } else if (HorizOp.getOperand(1).isUndef()) {
35161 assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
35162 X = HorizOp.getOperand(0);
35166 return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
35167 HorizOp.getValueType(), X, X);
35170 // When the operands of a horizontal math op are identical, the low half of
35171 // the result is the same as the high half. If a target shuffle is also
35172 // replicating low and high halves (and without changing the type/length of
35173 // the vector), we don't need the shuffle.
35174 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
35175 if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
35176 // movddup (hadd X, X) --> hadd X, X
35177 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
35178 assert((HOp.getValueType() == MVT::v2f64 ||
35179 HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
35180 return updateHOp(HOp, DAG);
35185 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
35186 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
35187 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
35188 // but this should be tied to whatever horizontal op matching and shuffle
35189 // canonicalization are producing.
35190 if (HOp.getValueSizeInBits() == 128 &&
35191 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
35192 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
35193 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
35194 return updateHOp(HOp, DAG);
35196 if (HOp.getValueSizeInBits() == 256 &&
35197 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
35198 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
35199 isTargetShuffleEquivalent(
35200 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
35201 return updateHOp(HOp, DAG);
35206 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
35207 /// low half of each source vector and does not set any high half elements in
35208 /// the destination vector, narrow the shuffle to half its original size.
35209 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
35210 if (!Shuf->getValueType(0).isSimple())
35212 MVT VT = Shuf->getSimpleValueType(0);
35213 if (!VT.is256BitVector() && !VT.is512BitVector())
35216 // See if we can ignore all of the high elements of the shuffle.
35217 ArrayRef<int> Mask = Shuf->getMask();
35218 if (!isUndefUpperHalf(Mask))
35221 // Check if the shuffle mask accesses only the low half of each input vector
35222 // (half-index output is 0 or 2).
35223 int HalfIdx1, HalfIdx2;
35224 SmallVector<int, 8> HalfMask(Mask.size() / 2);
35225 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
35226 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
35229 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
35230 // The trick is knowing that all of the insert/extract are actually free
35231 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
35232 // of narrow inputs into a narrow output, and that is always cheaper than
35233 // the wide shuffle that we started with.
35234 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
35235 Shuf->getOperand(1), HalfMask, HalfIdx1,
35236 HalfIdx2, false, DAG, /*UseConcat*/true);
35239 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
35240 TargetLowering::DAGCombinerInfo &DCI,
35241 const X86Subtarget &Subtarget) {
35242 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
35243 if (SDValue V = narrowShuffle(Shuf, DAG))
35246 // If we have legalized the vector types, look for blends of FADD and FSUB
35247 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
35249 EVT VT = N->getValueType(0);
35250 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35251 if (TLI.isTypeLegal(VT)) {
35252 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
35255 if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
35259 // Attempt to combine into a vector load/broadcast.
35260 if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
35263 // For AVX2, we sometimes want to combine
35264 // (vector_shuffle <mask> (concat_vectors t1, undef)
35265 // (concat_vectors t2, undef))
35267 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
35268 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
35269 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
35272 if (isTargetShuffle(N->getOpcode())) {
35274 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
35277 // Try recursively combining arbitrary sequences of x86 shuffle
35278 // instructions into higher-order shuffles. We do this after combining
35279 // specific PSHUF instruction sequences into their minimal form so that we
35280 // can evaluate how many specialized shuffle instructions are involved in
35281 // a particular chain.
35282 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
35285 // Simplify source operands based on shuffle mask.
35286 // TODO - merge this into combineX86ShufflesRecursively.
35287 APInt KnownUndef, KnownZero;
35288 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
35289 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
35290 return SDValue(N, 0);
35293 // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
35294 // in the upper 64 bits.
35295 // TODO: Can we generalize this using computeKnownBits.
35296 if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
35297 (VT == MVT::v2f64 || VT == MVT::v2i64) &&
35298 N->getOperand(0).getOpcode() == ISD::BITCAST &&
35299 (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
35300 N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
35301 SDValue In = N->getOperand(0).getOperand(0);
35302 switch (In.getOpcode()) {
35305 case X86ISD::CVTP2SI: case X86ISD::CVTP2UI:
35306 case X86ISD::MCVTP2SI: case X86ISD::MCVTP2UI:
35307 case X86ISD::CVTTP2SI: case X86ISD::CVTTP2UI:
35308 case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
35309 case X86ISD::CVTSI2P: case X86ISD::CVTUI2P:
35310 case X86ISD::MCVTSI2P: case X86ISD::MCVTUI2P:
35311 case X86ISD::VFPROUND: case X86ISD::VMFPROUND:
35312 if (In.getOperand(0).getValueType() == MVT::v2f64 ||
35313 In.getOperand(0).getValueType() == MVT::v2i64)
35314 return N->getOperand(0); // return the bitcast
35316 case X86ISD::STRICT_CVTTP2SI:
35317 case X86ISD::STRICT_CVTTP2UI:
35318 case X86ISD::STRICT_CVTSI2P:
35319 case X86ISD::STRICT_CVTUI2P:
35320 case X86ISD::STRICT_VFPROUND:
35321 if (In.getOperand(1).getValueType() == MVT::v2f64 ||
35322 In.getOperand(1).getValueType() == MVT::v2i64)
35323 return N->getOperand(0);
35328 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
35329 // insert into a zero vector. This helps get VZEXT_MOVL closer to
35330 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
35331 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
35332 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
35333 N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
35334 N->getOperand(0).hasOneUse() &&
35335 N->getOperand(0).getOperand(0).isUndef() &&
35336 isNullConstant(N->getOperand(0).getOperand(2))) {
35337 SDValue In = N->getOperand(0).getOperand(1);
35338 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
35339 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
35340 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
35341 Movl, N->getOperand(0).getOperand(2));
35344 // If this a vzmovl of a full vector load, replace it with a vzload, unless
35345 // the load is volatile.
35346 if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
35347 ISD::isNormalLoad(N->getOperand(0).getNode())) {
35348 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
35349 if (LN->isSimple()) {
35350 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
35351 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
35353 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
35354 VT.getVectorElementType(),
35355 LN->getPointerInfo(),
35356 LN->getAlignment(),
35357 MachineMemOperand::MOLoad);
35358 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
35366 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
35367 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
35368 TargetLoweringOpt &TLO, unsigned Depth) const {
35369 int NumElts = DemandedElts.getBitWidth();
35370 unsigned Opc = Op.getOpcode();
35371 EVT VT = Op.getValueType();
35373 // Handle special case opcodes.
35375 case X86ISD::PMULDQ:
35376 case X86ISD::PMULUDQ: {
35377 APInt LHSUndef, LHSZero;
35378 APInt RHSUndef, RHSZero;
35379 SDValue LHS = Op.getOperand(0);
35380 SDValue RHS = Op.getOperand(1);
35381 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
35384 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
35387 // Multiply by zero.
35388 KnownZero = LHSZero | RHSZero;
35393 case X86ISD::VSRA: {
35394 // We only need the bottom 64-bits of the (128-bit) shift amount.
35395 SDValue Amt = Op.getOperand(1);
35396 MVT AmtVT = Amt.getSimpleValueType();
35397 assert(AmtVT.is128BitVector() && "Unexpected value type");
35399 // If we reuse the shift amount just for sse shift amounts then we know that
35400 // only the bottom 64-bits are only ever used.
35401 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
35402 unsigned UseOpc = Use->getOpcode();
35403 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
35404 UseOpc == X86ISD::VSRA) &&
35405 Use->getOperand(0) != Amt;
35408 APInt AmtUndef, AmtZero;
35409 unsigned NumAmtElts = AmtVT.getVectorNumElements();
35410 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
35411 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
35412 Depth + 1, AssumeSingleUse))
35416 case X86ISD::VSHLI:
35417 case X86ISD::VSRLI:
35418 case X86ISD::VSRAI: {
35419 SDValue Src = Op.getOperand(0);
35421 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
35424 // TODO convert SrcUndef to KnownUndef.
35427 case X86ISD::KSHIFTL: {
35428 SDValue Src = Op.getOperand(0);
35429 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
35430 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
35431 unsigned ShiftAmt = Amt->getZExtValue();
35434 return TLO.CombineTo(Op, Src);
35436 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
35437 // single shift. We can do this if the bottom bits (which are shifted
35438 // out) are never demanded.
35439 if (Src.getOpcode() == X86ISD::KSHIFTR) {
35440 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
35441 unsigned C1 = Src.getConstantOperandVal(1);
35442 unsigned NewOpc = X86ISD::KSHIFTL;
35443 int Diff = ShiftAmt - C1;
35446 NewOpc = X86ISD::KSHIFTR;
35450 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
35451 return TLO.CombineTo(
35452 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
35456 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
35457 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
35461 KnownUndef <<= ShiftAmt;
35462 KnownZero <<= ShiftAmt;
35463 KnownZero.setLowBits(ShiftAmt);
35466 case X86ISD::KSHIFTR: {
35467 SDValue Src = Op.getOperand(0);
35468 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
35469 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
35470 unsigned ShiftAmt = Amt->getZExtValue();
35473 return TLO.CombineTo(Op, Src);
35475 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
35476 // single shift. We can do this if the top bits (which are shifted
35477 // out) are never demanded.
35478 if (Src.getOpcode() == X86ISD::KSHIFTL) {
35479 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
35480 unsigned C1 = Src.getConstantOperandVal(1);
35481 unsigned NewOpc = X86ISD::KSHIFTR;
35482 int Diff = ShiftAmt - C1;
35485 NewOpc = X86ISD::KSHIFTL;
35489 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
35490 return TLO.CombineTo(
35491 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
35495 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
35496 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
35500 KnownUndef.lshrInPlace(ShiftAmt);
35501 KnownZero.lshrInPlace(ShiftAmt);
35502 KnownZero.setHighBits(ShiftAmt);
35505 case X86ISD::CVTSI2P:
35506 case X86ISD::CVTUI2P: {
35507 SDValue Src = Op.getOperand(0);
35508 MVT SrcVT = Src.getSimpleValueType();
35509 APInt SrcUndef, SrcZero;
35510 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
35511 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
35516 case X86ISD::PACKSS:
35517 case X86ISD::PACKUS: {
35518 SDValue N0 = Op.getOperand(0);
35519 SDValue N1 = Op.getOperand(1);
35521 APInt DemandedLHS, DemandedRHS;
35522 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
35524 APInt SrcUndef, SrcZero;
35525 if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
35528 if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
35532 // Aggressively peek through ops to get at the demanded elts.
35533 // TODO - we should do this for all target/faux shuffles ops.
35534 if (!DemandedElts.isAllOnesValue()) {
35535 APInt DemandedSrcBits =
35536 APInt::getAllOnesValue(N0.getScalarValueSizeInBits());
35537 SDValue NewN0 = SimplifyMultipleUseDemandedBits(
35538 N0, DemandedSrcBits, DemandedLHS, TLO.DAG, Depth + 1);
35539 SDValue NewN1 = SimplifyMultipleUseDemandedBits(
35540 N1, DemandedSrcBits, DemandedRHS, TLO.DAG, Depth + 1);
35541 if (NewN0 || NewN1) {
35542 NewN0 = NewN0 ? NewN0 : N0;
35543 NewN1 = NewN1 ? NewN1 : N1;
35544 return TLO.CombineTo(Op,
35545 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
35552 case X86ISD::FHADD:
35553 case X86ISD::FHSUB: {
35554 APInt DemandedLHS, DemandedRHS;
35555 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
35557 APInt LHSUndef, LHSZero;
35558 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
35559 LHSZero, TLO, Depth + 1))
35561 APInt RHSUndef, RHSZero;
35562 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
35563 RHSZero, TLO, Depth + 1))
35567 case X86ISD::VTRUNC:
35568 case X86ISD::VTRUNCS:
35569 case X86ISD::VTRUNCUS: {
35570 SDValue Src = Op.getOperand(0);
35571 MVT SrcVT = Src.getSimpleValueType();
35572 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
35573 APInt SrcUndef, SrcZero;
35574 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
35577 KnownZero = SrcZero.zextOrTrunc(NumElts);
35578 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
35581 case X86ISD::BLENDV: {
35582 APInt SelUndef, SelZero;
35583 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
35584 SelZero, TLO, Depth + 1))
35587 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
35588 APInt LHSUndef, LHSZero;
35589 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
35590 LHSZero, TLO, Depth + 1))
35593 APInt RHSUndef, RHSZero;
35594 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
35595 RHSZero, TLO, Depth + 1))
35598 KnownZero = LHSZero & RHSZero;
35599 KnownUndef = LHSUndef & RHSUndef;
35602 case X86ISD::VBROADCAST: {
35603 SDValue Src = Op.getOperand(0);
35604 MVT SrcVT = Src.getSimpleValueType();
35605 if (!SrcVT.isVector())
35607 // Don't bother broadcasting if we just need the 0'th element.
35608 if (DemandedElts == 1) {
35609 if (Src.getValueType() != VT)
35610 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
35612 return TLO.CombineTo(Op, Src);
35614 APInt SrcUndef, SrcZero;
35615 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
35616 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
35621 case X86ISD::VPERMV: {
35622 SDValue Mask = Op.getOperand(0);
35623 APInt MaskUndef, MaskZero;
35624 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35629 case X86ISD::PSHUFB:
35630 case X86ISD::VPERMV3:
35631 case X86ISD::VPERMILPV: {
35632 SDValue Mask = Op.getOperand(1);
35633 APInt MaskUndef, MaskZero;
35634 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35639 case X86ISD::VPPERM:
35640 case X86ISD::VPERMIL2: {
35641 SDValue Mask = Op.getOperand(2);
35642 APInt MaskUndef, MaskZero;
35643 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35650 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
35651 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
35652 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
35653 if ((VT.is256BitVector() || VT.is512BitVector()) &&
35654 DemandedElts.lshr(NumElts / 2) == 0) {
35655 unsigned SizeInBits = VT.getSizeInBits();
35656 unsigned ExtSizeInBits = SizeInBits / 2;
35658 // See if 512-bit ops only use the bottom 128-bits.
35659 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
35660 ExtSizeInBits = SizeInBits / 4;
35663 // Zero upper elements.
35664 case X86ISD::VZEXT_MOVL: {
35667 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35669 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
35670 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35672 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35673 return TLO.CombineTo(Op, Insert);
35675 // Subvector broadcast.
35676 case X86ISD::SUBV_BROADCAST: {
35678 SDValue Src = Op.getOperand(0);
35679 if (Src.getValueSizeInBits() > ExtSizeInBits)
35680 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
35681 else if (Src.getValueSizeInBits() < ExtSizeInBits) {
35682 MVT SrcSVT = Src.getSimpleValueType().getScalarType();
35684 MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
35685 Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
35687 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
35688 TLO.DAG, DL, ExtSizeInBits));
35690 // Byte shifts by immediate.
35691 case X86ISD::VSHLDQ:
35692 case X86ISD::VSRLDQ:
35693 // Shift by uniform.
35697 // Shift by immediate.
35698 case X86ISD::VSHLI:
35699 case X86ISD::VSRLI:
35700 case X86ISD::VSRAI: {
35703 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35705 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
35706 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35708 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35709 return TLO.CombineTo(Op, Insert);
35711 case X86ISD::VPERMI: {
35712 // Simplify PERMPD/PERMQ to extract_subvector.
35713 // TODO: This should be done in shuffle combining.
35714 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
35715 SmallVector<int, 4> Mask;
35716 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
35717 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
35719 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
35720 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35721 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
35722 return TLO.CombineTo(Op, Insert);
35727 // Target Shuffles.
35728 case X86ISD::PSHUFB:
35729 case X86ISD::UNPCKL:
35730 case X86ISD::UNPCKH:
35731 // Saturated Packs.
35732 case X86ISD::PACKSS:
35733 case X86ISD::PACKUS:
35737 case X86ISD::FHADD:
35738 case X86ISD::FHSUB: {
35740 MVT ExtVT = VT.getSimpleVT();
35741 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
35742 ExtSizeInBits / ExtVT.getScalarSizeInBits());
35744 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35746 extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
35747 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
35748 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35750 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35751 return TLO.CombineTo(Op, Insert);
35756 // Get target/faux shuffle mask.
35757 APInt OpUndef, OpZero;
35758 SmallVector<int, 64> OpMask;
35759 SmallVector<SDValue, 2> OpInputs;
35760 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
35761 OpZero, TLO.DAG, Depth, false))
35764 // Shuffle inputs must be the same size as the result.
35765 if (OpMask.size() != (unsigned)NumElts ||
35766 llvm::any_of(OpInputs, [VT](SDValue V) {
35767 return VT.getSizeInBits() != V.getValueSizeInBits() ||
35768 !V.getValueType().isVector();
35772 KnownZero = OpZero;
35773 KnownUndef = OpUndef;
35775 // Check if shuffle mask can be simplified to undef/zero/identity.
35776 int NumSrcs = OpInputs.size();
35777 for (int i = 0; i != NumElts; ++i)
35778 if (!DemandedElts[i])
35779 OpMask[i] = SM_SentinelUndef;
35781 if (isUndefInRange(OpMask, 0, NumElts)) {
35782 KnownUndef.setAllBits();
35783 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
35785 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
35786 KnownZero.setAllBits();
35787 return TLO.CombineTo(
35788 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
35790 for (int Src = 0; Src != NumSrcs; ++Src)
35791 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
35792 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
35794 // Attempt to simplify inputs.
35795 for (int Src = 0; Src != NumSrcs; ++Src) {
35796 // TODO: Support inputs of different types.
35797 if (OpInputs[Src].getValueType() != VT)
35800 int Lo = Src * NumElts;
35801 APInt SrcElts = APInt::getNullValue(NumElts);
35802 for (int i = 0; i != NumElts; ++i)
35803 if (DemandedElts[i]) {
35804 int M = OpMask[i] - Lo;
35805 if (0 <= M && M < NumElts)
35809 // TODO - Propagate input undef/zero elts.
35810 APInt SrcUndef, SrcZero;
35811 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
35816 // If we don't demand all elements, then attempt to combine to a simpler
35818 // TODO: Handle other depths, but first we need to handle the fact that
35819 // it might combine to the same shuffle.
35820 if (!DemandedElts.isAllOnesValue() && Depth == 0) {
35821 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
35822 for (int i = 0; i != NumElts; ++i)
35823 if (DemandedElts[i])
35824 DemandedMask[i] = i;
35826 SDValue NewShuffle = combineX86ShufflesRecursively(
35827 {Op}, 0, Op, DemandedMask, {}, Depth, /*HasVarMask*/ false,
35828 /*AllowVarMask*/ true, TLO.DAG, Subtarget);
35830 return TLO.CombineTo(Op, NewShuffle);
35836 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
35837 SDValue Op, const APInt &OriginalDemandedBits,
35838 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
35839 unsigned Depth) const {
35840 EVT VT = Op.getValueType();
35841 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
35842 unsigned Opc = Op.getOpcode();
35844 case X86ISD::PMULDQ:
35845 case X86ISD::PMULUDQ: {
35846 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
35848 SDValue LHS = Op.getOperand(0);
35849 SDValue RHS = Op.getOperand(1);
35850 // FIXME: Can we bound this better?
35851 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
35852 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
35855 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
35859 // Aggressively peek through ops to get at the demanded low bits.
35860 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
35861 LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
35862 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
35863 RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
35864 if (DemandedLHS || DemandedRHS) {
35865 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
35866 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
35867 return TLO.CombineTo(
35868 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
35872 case X86ISD::VSHLI: {
35873 SDValue Op0 = Op.getOperand(0);
35875 unsigned ShAmt = Op.getConstantOperandVal(1);
35876 if (ShAmt >= BitWidth)
35879 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
35881 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
35882 // single shift. We can do this if the bottom bits (which are shifted
35883 // out) are never demanded.
35884 if (Op0.getOpcode() == X86ISD::VSRLI &&
35885 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
35886 unsigned Shift2Amt = Op0.getConstantOperandVal(1);
35887 if (Shift2Amt < BitWidth) {
35888 int Diff = ShAmt - Shift2Amt;
35890 return TLO.CombineTo(Op, Op0.getOperand(0));
35892 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
35893 SDValue NewShift = TLO.DAG.getNode(
35894 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
35895 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
35896 return TLO.CombineTo(Op, NewShift);
35900 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
35904 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35905 Known.Zero <<= ShAmt;
35906 Known.One <<= ShAmt;
35908 // Low bits known zero.
35909 Known.Zero.setLowBits(ShAmt);
35912 case X86ISD::VSRLI: {
35913 unsigned ShAmt = Op.getConstantOperandVal(1);
35914 if (ShAmt >= BitWidth)
35917 APInt DemandedMask = OriginalDemandedBits << ShAmt;
35919 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
35920 OriginalDemandedElts, Known, TLO, Depth + 1))
35923 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35924 Known.Zero.lshrInPlace(ShAmt);
35925 Known.One.lshrInPlace(ShAmt);
35927 // High bits known zero.
35928 Known.Zero.setHighBits(ShAmt);
35931 case X86ISD::VSRAI: {
35932 SDValue Op0 = Op.getOperand(0);
35933 SDValue Op1 = Op.getOperand(1);
35935 unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
35936 if (ShAmt >= BitWidth)
35939 APInt DemandedMask = OriginalDemandedBits << ShAmt;
35941 // If we just want the sign bit then we don't need to shift it.
35942 if (OriginalDemandedBits.isSignMask())
35943 return TLO.CombineTo(Op, Op0);
35945 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
35946 if (Op0.getOpcode() == X86ISD::VSHLI &&
35947 Op.getOperand(1) == Op0.getOperand(1)) {
35948 SDValue Op00 = Op0.getOperand(0);
35949 unsigned NumSignBits =
35950 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
35951 if (ShAmt < NumSignBits)
35952 return TLO.CombineTo(Op, Op00);
35955 // If any of the demanded bits are produced by the sign extension, we also
35956 // demand the input sign bit.
35957 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
35958 DemandedMask.setSignBit();
35960 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
35964 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35965 Known.Zero.lshrInPlace(ShAmt);
35966 Known.One.lshrInPlace(ShAmt);
35968 // If the input sign bit is known to be zero, or if none of the top bits
35969 // are demanded, turn this into an unsigned shift right.
35970 if (Known.Zero[BitWidth - ShAmt - 1] ||
35971 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
35972 return TLO.CombineTo(
35973 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
35975 // High bits are known one.
35976 if (Known.One[BitWidth - ShAmt - 1])
35977 Known.One.setHighBits(ShAmt);
35980 case X86ISD::PEXTRB:
35981 case X86ISD::PEXTRW: {
35982 SDValue Vec = Op.getOperand(0);
35983 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
35984 MVT VecVT = Vec.getSimpleValueType();
35985 unsigned NumVecElts = VecVT.getVectorNumElements();
35987 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
35988 unsigned Idx = CIdx->getZExtValue();
35989 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
35991 // If we demand no bits from the vector then we must have demanded
35992 // bits from the implict zext - simplify to zero.
35993 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
35994 if (DemandedVecBits == 0)
35995 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
35997 APInt KnownUndef, KnownZero;
35998 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
35999 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
36000 KnownZero, TLO, Depth + 1))
36003 KnownBits KnownVec;
36004 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
36005 KnownVec, TLO, Depth + 1))
36008 if (SDValue V = SimplifyMultipleUseDemandedBits(
36009 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
36010 return TLO.CombineTo(
36011 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
36013 Known = KnownVec.zext(BitWidth, true);
36018 case X86ISD::PINSRB:
36019 case X86ISD::PINSRW: {
36020 SDValue Vec = Op.getOperand(0);
36021 SDValue Scl = Op.getOperand(1);
36022 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
36023 MVT VecVT = Vec.getSimpleValueType();
36025 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
36026 unsigned Idx = CIdx->getZExtValue();
36027 if (!OriginalDemandedElts[Idx])
36028 return TLO.CombineTo(Op, Vec);
36030 KnownBits KnownVec;
36031 APInt DemandedVecElts(OriginalDemandedElts);
36032 DemandedVecElts.clearBit(Idx);
36033 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
36034 KnownVec, TLO, Depth + 1))
36037 KnownBits KnownScl;
36038 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
36039 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
36040 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
36043 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
36044 Known.One = KnownVec.One & KnownScl.One;
36045 Known.Zero = KnownVec.Zero & KnownScl.Zero;
36050 case X86ISD::PACKSS:
36051 // PACKSS saturates to MIN/MAX integer values. So if we just want the
36052 // sign bit then we can just ask for the source operands sign bit.
36053 // TODO - add known bits handling.
36054 if (OriginalDemandedBits.isSignMask()) {
36055 APInt DemandedLHS, DemandedRHS;
36056 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
36058 KnownBits KnownLHS, KnownRHS;
36059 APInt SignMask = APInt::getSignMask(BitWidth * 2);
36060 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
36061 KnownLHS, TLO, Depth + 1))
36063 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
36064 KnownRHS, TLO, Depth + 1))
36067 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
36069 case X86ISD::PCMPGT:
36070 // icmp sgt(0, R) == ashr(R, BitWidth-1).
36071 // iff we only need the sign bit then we can use R directly.
36072 if (OriginalDemandedBits.isSignMask() &&
36073 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
36074 return TLO.CombineTo(Op, Op.getOperand(1));
36076 case X86ISD::MOVMSK: {
36077 SDValue Src = Op.getOperand(0);
36078 MVT SrcVT = Src.getSimpleValueType();
36079 unsigned SrcBits = SrcVT.getScalarSizeInBits();
36080 unsigned NumElts = SrcVT.getVectorNumElements();
36082 // If we don't need the sign bits at all just return zero.
36083 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
36084 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
36086 // Only demand the vector elements of the sign bits we need.
36087 APInt KnownUndef, KnownZero;
36088 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
36089 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
36093 Known.Zero = KnownZero.zextOrSelf(BitWidth);
36094 Known.Zero.setHighBits(BitWidth - NumElts);
36096 // MOVMSK only uses the MSB from each vector element.
36097 KnownBits KnownSrc;
36098 if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
36099 KnownSrc, TLO, Depth + 1))
36102 if (KnownSrc.One[SrcBits - 1])
36103 Known.One.setLowBits(NumElts);
36104 else if (KnownSrc.Zero[SrcBits - 1])
36105 Known.Zero.setLowBits(NumElts);
36110 return TargetLowering::SimplifyDemandedBitsForTargetNode(
36111 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
36114 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
36115 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
36116 SelectionDAG &DAG, unsigned Depth) const {
36117 int NumElts = DemandedElts.getBitWidth();
36118 unsigned Opc = Op.getOpcode();
36119 EVT VT = Op.getValueType();
36122 case X86ISD::PINSRB:
36123 case X86ISD::PINSRW: {
36124 // If we don't demand the inserted element, return the base vector.
36125 SDValue Vec = Op.getOperand(0);
36126 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
36127 MVT VecVT = Vec.getSimpleValueType();
36128 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
36129 !DemandedElts[CIdx->getZExtValue()])
36133 case X86ISD::PCMPGT:
36134 // icmp sgt(0, R) == ashr(R, BitWidth-1).
36135 // iff we only need the sign bit then we can use R directly.
36136 if (DemandedBits.isSignMask() &&
36137 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
36138 return Op.getOperand(1);
36142 APInt ShuffleUndef, ShuffleZero;
36143 SmallVector<int, 16> ShuffleMask;
36144 SmallVector<SDValue, 2> ShuffleOps;
36145 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
36146 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
36147 // If all the demanded elts are from one operand and are inline,
36148 // then we can use the operand directly.
36149 int NumOps = ShuffleOps.size();
36150 if (ShuffleMask.size() == (unsigned)NumElts &&
36151 llvm::all_of(ShuffleOps, [VT](SDValue V) {
36152 return VT.getSizeInBits() == V.getValueSizeInBits();
36155 if (DemandedElts.isSubsetOf(ShuffleUndef))
36156 return DAG.getUNDEF(VT);
36157 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
36158 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
36160 // Bitmask that indicates which ops have only been accessed 'inline'.
36161 APInt IdentityOp = APInt::getAllOnesValue(NumOps);
36162 for (int i = 0; i != NumElts; ++i) {
36163 int M = ShuffleMask[i];
36164 if (!DemandedElts[i] || ShuffleUndef[i])
36166 int Op = M / NumElts;
36167 int Index = M % NumElts;
36168 if (M < 0 || Index != i) {
36169 IdentityOp.clearAllBits();
36172 IdentityOp &= APInt::getOneBitSet(NumOps, Op);
36173 if (IdentityOp == 0)
36176 assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
36177 "Multiple identity shuffles detected");
36179 if (IdentityOp != 0)
36180 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
36184 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
36185 Op, DemandedBits, DemandedElts, DAG, Depth);
36188 // Helper to peek through bitops/setcc to determine size of source vector.
36189 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
36190 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
36191 switch (Src.getOpcode()) {
36193 return Src.getOperand(0).getValueSizeInBits() == Size;
36197 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
36198 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
36203 // Helper to push sign extension of vXi1 SETCC result through bitops.
36204 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
36205 SDValue Src, const SDLoc &DL) {
36206 switch (Src.getOpcode()) {
36208 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
36212 return DAG.getNode(
36213 Src.getOpcode(), DL, SExtVT,
36214 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
36215 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
36217 llvm_unreachable("Unexpected node type for vXi1 sign extension");
36220 // Try to match patterns such as
36221 // (i16 bitcast (v16i1 x))
36223 // (i16 movmsk (16i8 sext (v16i1 x)))
36224 // before the illegal vector is scalarized on subtargets that don't have legal
36226 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
36228 const X86Subtarget &Subtarget) {
36229 EVT SrcVT = Src.getValueType();
36230 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
36233 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
36234 // movmskb even with avx512. This will be better than truncating to vXi1 and
36235 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
36236 // vpcmpeqb/vpcmpgtb.
36237 bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
36238 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
36239 Src.getOperand(0).getValueType() == MVT::v32i8 ||
36240 Src.getOperand(0).getValueType() == MVT::v64i8);
36242 // With AVX512 vxi1 types are legal and we prefer using k-regs.
36243 // MOVMSK is supported in SSE2 or later.
36244 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
36247 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
36248 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
36249 // v8i16 and v16i16.
36250 // For these two cases, we can shuffle the upper element bytes to a
36251 // consecutive sequence at the start of the vector and treat the results as
36252 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
36253 // for v16i16 this is not the case, because the shuffle is expensive, so we
36254 // avoid sign-extending to this type entirely.
36255 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
36256 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
36258 bool PropagateSExt = false;
36259 switch (SrcVT.getSimpleVT().SimpleTy) {
36263 SExtVT = MVT::v2i64;
36266 SExtVT = MVT::v4i32;
36267 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
36268 // sign-extend to a 256-bit operation to avoid truncation.
36269 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
36270 SExtVT = MVT::v4i64;
36271 PropagateSExt = true;
36275 SExtVT = MVT::v8i16;
36276 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
36277 // sign-extend to a 256-bit operation to match the compare.
36278 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
36279 // 256-bit because the shuffle is cheaper than sign extending the result of
36281 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
36282 checkBitcastSrcVectorSize(Src, 512))) {
36283 SExtVT = MVT::v8i32;
36284 PropagateSExt = true;
36288 SExtVT = MVT::v16i8;
36289 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
36290 // it is not profitable to sign-extend to 256-bit because this will
36291 // require an extra cross-lane shuffle which is more expensive than
36292 // truncating the result of the compare to 128-bits.
36295 SExtVT = MVT::v32i8;
36298 // If we have AVX512F, but not AVX512BW and the input is truncated from
36299 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
36300 if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
36301 SExtVT = MVT::v64i8;
36307 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
36308 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
36310 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
36311 V = getPMOVMSKB(DL, V, DAG, Subtarget);
36313 if (SExtVT == MVT::v8i16)
36314 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
36315 DAG.getUNDEF(MVT::v8i16));
36316 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
36320 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
36321 V = DAG.getZExtOrTrunc(V, DL, IntVT);
36322 return DAG.getBitcast(VT, V);
36325 // Convert a vXi1 constant build vector to the same width scalar integer.
36326 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
36327 EVT SrcVT = Op.getValueType();
36328 assert(SrcVT.getVectorElementType() == MVT::i1 &&
36329 "Expected a vXi1 vector");
36330 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
36331 "Expected a constant build vector");
36333 APInt Imm(SrcVT.getVectorNumElements(), 0);
36334 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
36335 SDValue In = Op.getOperand(Idx);
36336 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
36339 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
36340 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
36343 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
36344 TargetLowering::DAGCombinerInfo &DCI,
36345 const X86Subtarget &Subtarget) {
36346 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
36348 if (!DCI.isBeforeLegalizeOps())
36351 // Only do this if we have k-registers.
36352 if (!Subtarget.hasAVX512())
36355 EVT DstVT = N->getValueType(0);
36356 SDValue Op = N->getOperand(0);
36357 EVT SrcVT = Op.getValueType();
36359 if (!Op.hasOneUse())
36362 // Look for logic ops.
36363 if (Op.getOpcode() != ISD::AND &&
36364 Op.getOpcode() != ISD::OR &&
36365 Op.getOpcode() != ISD::XOR)
36368 // Make sure we have a bitcast between mask registers and a scalar type.
36369 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
36370 DstVT.isScalarInteger()) &&
36371 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
36372 SrcVT.isScalarInteger()))
36375 SDValue LHS = Op.getOperand(0);
36376 SDValue RHS = Op.getOperand(1);
36378 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
36379 LHS.getOperand(0).getValueType() == DstVT)
36380 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
36381 DAG.getBitcast(DstVT, RHS));
36383 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
36384 RHS.getOperand(0).getValueType() == DstVT)
36385 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
36386 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
36388 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
36389 // Most of these have to move a constant from the scalar domain anyway.
36390 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
36391 RHS = combinevXi1ConstantToInteger(RHS, DAG);
36392 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
36393 DAG.getBitcast(DstVT, LHS), RHS);
36399 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
36400 const X86Subtarget &Subtarget) {
36402 unsigned NumElts = BV->getNumOperands();
36403 SDValue Splat = BV->getSplatValue();
36405 // Build MMX element from integer GPR or SSE float values.
36406 auto CreateMMXElement = [&](SDValue V) {
36408 return DAG.getUNDEF(MVT::x86mmx);
36409 if (V.getValueType().isFloatingPoint()) {
36410 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
36411 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
36412 V = DAG.getBitcast(MVT::v2i64, V);
36413 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
36415 V = DAG.getBitcast(MVT::i32, V);
36417 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
36419 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
36422 // Convert build vector ops to MMX data in the bottom elements.
36423 SmallVector<SDValue, 8> Ops;
36425 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
36427 if (Splat.isUndef())
36428 return DAG.getUNDEF(MVT::x86mmx);
36430 Splat = CreateMMXElement(Splat);
36432 if (Subtarget.hasSSE1()) {
36433 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
36435 Splat = DAG.getNode(
36436 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
36437 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
36440 // Use PSHUFW to repeat 16-bit elements.
36441 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
36442 return DAG.getNode(
36443 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
36444 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
36445 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
36447 Ops.append(NumElts, Splat);
36449 for (unsigned i = 0; i != NumElts; ++i)
36450 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
36453 // Use tree of PUNPCKLs to build up general MMX vector.
36454 while (Ops.size() > 1) {
36455 unsigned NumOps = Ops.size();
36456 unsigned IntrinOp =
36457 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
36458 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
36459 : Intrinsic::x86_mmx_punpcklbw));
36460 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
36461 for (unsigned i = 0; i != NumOps; i += 2)
36462 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
36463 Ops[i], Ops[i + 1]);
36464 Ops.resize(NumOps / 2);
36470 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
36471 TargetLowering::DAGCombinerInfo &DCI,
36472 const X86Subtarget &Subtarget) {
36473 SDValue N0 = N->getOperand(0);
36474 EVT VT = N->getValueType(0);
36475 EVT SrcVT = N0.getValueType();
36477 // Try to match patterns such as
36478 // (i16 bitcast (v16i1 x))
36480 // (i16 movmsk (16i8 sext (v16i1 x)))
36481 // before the setcc result is scalarized on subtargets that don't have legal
36483 if (DCI.isBeforeLegalize()) {
36485 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
36488 // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
36489 // legalization destroys the v4i32 type.
36490 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
36491 VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
36492 N0.getOperand(0).getValueType() == MVT::v4i32 &&
36493 ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()) &&
36494 cast<CondCodeSDNode>(N0.getOperand(2))->get() == ISD::SETLT) {
36495 SDValue N00 = N0.getOperand(0);
36496 // Only do this if we can avoid scalarizing the input.
36497 if (ISD::isNormalLoad(N00.getNode()) ||
36498 (N00.getOpcode() == ISD::BITCAST &&
36499 N00.getOperand(0).getValueType() == MVT::v4f32)) {
36500 SDValue V = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32,
36501 DAG.getBitcast(MVT::v4f32, N00));
36502 return DAG.getZExtOrTrunc(V, dl, VT);
36506 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
36507 // type, widen both sides to avoid a trip through memory.
36508 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
36509 Subtarget.hasAVX512()) {
36510 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
36511 N0 = DAG.getBitcast(MVT::v8i1, N0);
36512 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
36513 DAG.getIntPtrConstant(0, dl));
36516 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
36517 // type, widen both sides to avoid a trip through memory.
36518 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
36519 Subtarget.hasAVX512()) {
36520 // Use zeros for the widening if we already have some zeroes. This can
36521 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
36523 // FIXME: It might make sense to detect a concat_vectors with a mix of
36524 // zeroes and undef and turn it into insert_subvector for i1 vectors as
36525 // a separate combine. What we can't do is canonicalize the operands of
36526 // such a concat or we'll get into a loop with SimplifyDemandedBits.
36527 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
36528 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
36529 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
36530 SrcVT = LastOp.getValueType();
36531 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
36532 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
36533 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
36534 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
36535 N0 = DAG.getBitcast(MVT::i8, N0);
36536 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
36540 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
36541 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
36543 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
36544 N0 = DAG.getBitcast(MVT::i8, N0);
36545 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
36549 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
36550 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
36551 // due to insert_subvector legalization on KNL. By promoting the copy to i16
36552 // we can help with known bits propagation from the vXi1 domain to the
36554 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
36555 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
36556 N0.getOperand(0).getValueType() == MVT::v16i1 &&
36557 isNullConstant(N0.getOperand(1)))
36558 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
36559 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
36561 // Combine (bitcast (vbroadcast_load)) -> (vbroadcast_load). The memory VT
36562 // determines // the number of bits loaded. Remaining bits are zero.
36563 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
36564 VT.getScalarSizeInBits() == SrcVT.getScalarSizeInBits()) {
36565 auto *BCast = cast<MemIntrinsicSDNode>(N0);
36566 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36567 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
36569 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
36570 VT.getVectorElementType(),
36571 BCast->getMemOperand());
36572 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
36576 // Since MMX types are special and don't usually play with other vector types,
36577 // it's better to handle them early to be sure we emit efficient code by
36578 // avoiding store-load conversions.
36579 if (VT == MVT::x86mmx) {
36580 // Detect MMX constant vectors.
36582 SmallVector<APInt, 1> EltBits;
36583 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
36585 // Handle zero-extension of i32 with MOVD.
36586 if (EltBits[0].countLeadingZeros() >= 32)
36587 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
36588 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
36589 // Else, bitcast to a double.
36590 // TODO - investigate supporting sext 32-bit immediates on x86_64.
36591 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
36592 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
36595 // Detect bitcasts to x86mmx low word.
36596 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
36597 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
36598 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
36599 bool LowUndef = true, AllUndefOrZero = true;
36600 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
36601 SDValue Op = N0.getOperand(i);
36602 LowUndef &= Op.isUndef() || (i >= e/2);
36603 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
36605 if (AllUndefOrZero) {
36606 SDValue N00 = N0.getOperand(0);
36608 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
36609 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
36610 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
36614 // Detect bitcasts of 64-bit build vectors and convert to a
36615 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
36617 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
36618 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
36619 SrcVT == MVT::v8i8))
36620 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
36622 // Detect bitcasts between element or subvector extraction to x86mmx.
36623 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
36624 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
36625 isNullConstant(N0.getOperand(1))) {
36626 SDValue N00 = N0.getOperand(0);
36627 if (N00.getValueType().is128BitVector())
36628 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
36629 DAG.getBitcast(MVT::v2i64, N00));
36632 // Detect bitcasts from FP_TO_SINT to x86mmx.
36633 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
36635 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
36636 DAG.getUNDEF(MVT::v2i32));
36637 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
36638 DAG.getBitcast(MVT::v2i64, Res));
36642 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
36643 // most of these to scalar anyway.
36644 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
36645 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
36646 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
36647 return combinevXi1ConstantToInteger(N0, DAG);
36650 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
36651 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
36652 isa<ConstantSDNode>(N0)) {
36653 auto *C = cast<ConstantSDNode>(N0);
36654 if (C->isAllOnesValue())
36655 return DAG.getConstant(1, SDLoc(N0), VT);
36656 if (C->isNullValue())
36657 return DAG.getConstant(0, SDLoc(N0), VT);
36660 // Try to remove bitcasts from input and output of mask arithmetic to
36661 // remove GPR<->K-register crossings.
36662 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
36665 // Convert a bitcasted integer logic operation that has one bitcasted
36666 // floating-point operand into a floating-point logic operation. This may
36667 // create a load of a constant, but that is cheaper than materializing the
36668 // constant in an integer register and transferring it to an SSE register or
36669 // transferring the SSE operand to integer register and back.
36671 switch (N0.getOpcode()) {
36672 case ISD::AND: FPOpcode = X86ISD::FAND; break;
36673 case ISD::OR: FPOpcode = X86ISD::FOR; break;
36674 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
36675 default: return SDValue();
36678 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
36679 (Subtarget.hasSSE2() && VT == MVT::f64)))
36682 SDValue LogicOp0 = N0.getOperand(0);
36683 SDValue LogicOp1 = N0.getOperand(1);
36686 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
36687 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
36688 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
36689 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
36690 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
36691 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
36693 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
36694 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
36695 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
36696 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
36697 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
36698 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
36704 // Given a ABS node, detect the following pattern:
36705 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
36706 // This is useful as it is the input into a SAD pattern.
36707 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
36708 SDValue AbsOp1 = Abs->getOperand(0);
36709 if (AbsOp1.getOpcode() != ISD::SUB)
36712 Op0 = AbsOp1.getOperand(0);
36713 Op1 = AbsOp1.getOperand(1);
36715 // Check if the operands of the sub are zero-extended from vectors of i8.
36716 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
36717 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
36718 Op1.getOpcode() != ISD::ZERO_EXTEND ||
36719 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
36725 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
36727 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
36728 const SDValue &Zext1, const SDLoc &DL,
36729 const X86Subtarget &Subtarget) {
36730 // Find the appropriate width for the PSADBW.
36731 EVT InVT = Zext0.getOperand(0).getValueType();
36732 unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
36734 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
36735 // fill in the missing vector elements with 0.
36736 unsigned NumConcat = RegSize / InVT.getSizeInBits();
36737 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
36738 Ops[0] = Zext0.getOperand(0);
36739 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
36740 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
36741 Ops[0] = Zext1.getOperand(0);
36742 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
36744 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
36745 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
36746 ArrayRef<SDValue> Ops) {
36747 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
36748 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
36750 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
36751 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
36755 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
36757 static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
36758 const X86Subtarget &Subtarget) {
36759 // Bail without SSE41.
36760 if (!Subtarget.hasSSE41())
36763 EVT ExtractVT = Extract->getValueType(0);
36764 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
36767 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
36768 ISD::NodeType BinOp;
36769 SDValue Src = DAG.matchBinOpReduction(
36770 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
36774 EVT SrcVT = Src.getValueType();
36775 EVT SrcSVT = SrcVT.getScalarType();
36776 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
36780 SDValue MinPos = Src;
36782 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
36783 while (SrcVT.getSizeInBits() > 128) {
36784 unsigned NumElts = SrcVT.getVectorNumElements();
36785 unsigned NumSubElts = NumElts / 2;
36786 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
36787 unsigned SubSizeInBits = SrcVT.getSizeInBits();
36788 SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
36789 SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
36790 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
36792 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
36793 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
36794 "Unexpected value type");
36796 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
36797 // to flip the value accordingly.
36799 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
36800 if (BinOp == ISD::SMAX)
36801 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
36802 else if (BinOp == ISD::SMIN)
36803 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
36804 else if (BinOp == ISD::UMAX)
36805 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
36808 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
36810 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
36811 // shuffling each upper element down and insert zeros. This means that the
36812 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
36813 // ready for the PHMINPOS.
36814 if (ExtractVT == MVT::i8) {
36815 SDValue Upper = DAG.getVectorShuffle(
36816 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
36817 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
36818 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
36821 // Perform the PHMINPOS on a v8i16 vector,
36822 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
36823 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
36824 MinPos = DAG.getBitcast(SrcVT, MinPos);
36827 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
36829 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
36830 DAG.getIntPtrConstant(0, DL));
36833 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
36834 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
36836 const X86Subtarget &Subtarget) {
36837 // Bail without SSE2.
36838 if (!Subtarget.hasSSE2())
36841 EVT ExtractVT = Extract->getValueType(0);
36842 unsigned BitWidth = ExtractVT.getSizeInBits();
36843 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
36844 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
36847 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
36848 ISD::NodeType BinOp;
36849 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
36850 if (!Match && ExtractVT == MVT::i1)
36851 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
36855 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
36856 // which we can't support here for now.
36857 if (Match.getScalarValueSizeInBits() != BitWidth)
36862 EVT MatchVT = Match.getValueType();
36863 unsigned NumElts = MatchVT.getVectorNumElements();
36864 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
36865 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36867 if (ExtractVT == MVT::i1) {
36868 // Special case for (pre-legalization) vXi1 reductions.
36869 if (NumElts > 64 || !isPowerOf2_32(NumElts))
36871 if (TLI.isTypeLegal(MatchVT)) {
36872 // If this is a legal AVX512 predicate type then we can just bitcast.
36873 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
36874 Movmsk = DAG.getBitcast(MovmskVT, Match);
36876 // Use combineBitcastvxi1 to create the MOVMSK.
36877 while (NumElts > MaxElts) {
36879 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
36880 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
36883 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
36884 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
36888 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
36890 // Bail with AVX512VL (which uses predicate registers).
36891 if (Subtarget.hasVLX())
36894 unsigned MatchSizeInBits = Match.getValueSizeInBits();
36895 if (!(MatchSizeInBits == 128 ||
36896 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
36899 // Make sure this isn't a vector of 1 element. The perf win from using
36900 // MOVMSK diminishes with less elements in the reduction, but it is
36901 // generally better to get the comparison over to the GPRs as soon as
36902 // possible to reduce the number of vector ops.
36903 if (Match.getValueType().getVectorNumElements() < 2)
36906 // Check that we are extracting a reduction of all sign bits.
36907 if (DAG.ComputeNumSignBits(Match) != BitWidth)
36910 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
36912 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
36913 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
36914 MatchSizeInBits = Match.getValueSizeInBits();
36917 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
36919 if (64 == BitWidth || 32 == BitWidth)
36920 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
36921 MatchSizeInBits / BitWidth);
36923 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
36925 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
36926 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
36927 NumElts = MaskSrcVT.getVectorNumElements();
36929 assert((NumElts <= 32 || NumElts == 64) &&
36930 "Not expecting more than 64 elements");
36932 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
36933 if (BinOp == ISD::XOR) {
36934 // parity -> (AND (CTPOP(MOVMSK X)), 1)
36935 SDValue Mask = DAG.getConstant(1, DL, CmpVT);
36936 SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
36937 Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
36938 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
36942 ISD::CondCode CondCode;
36943 if (BinOp == ISD::OR) {
36944 // any_of -> MOVMSK != 0
36945 CmpC = DAG.getConstant(0, DL, CmpVT);
36946 CondCode = ISD::CondCode::SETNE;
36948 // all_of -> MOVMSK == ((1 << NumElts) - 1)
36949 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
36951 CondCode = ISD::CondCode::SETEQ;
36954 // The setcc produces an i8 of 0/1, so extend that to the result width and
36955 // negate to get the final 0/-1 mask value.
36957 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
36958 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
36959 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
36960 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
36961 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
36964 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
36965 const X86Subtarget &Subtarget) {
36966 // PSADBW is only supported on SSE2 and up.
36967 if (!Subtarget.hasSSE2())
36970 // Verify the type we're extracting from is any integer type above i16.
36971 EVT VT = Extract->getOperand(0).getValueType();
36972 if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
36975 unsigned RegSize = 128;
36976 if (Subtarget.useBWIRegs())
36978 else if (Subtarget.hasAVX())
36981 // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
36982 // TODO: We should be able to handle larger vectors by splitting them before
36983 // feeding them into several SADs, and then reducing over those.
36984 if (RegSize / VT.getVectorNumElements() < 8)
36987 // Match shuffle + add pyramid.
36988 ISD::NodeType BinOp;
36989 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
36991 // The operand is expected to be zero extended from i8
36992 // (verified in detectZextAbsDiff).
36993 // In order to convert to i64 and above, additional any/zero/sign
36994 // extend is expected.
36995 // The zero extend from 32 bit has no mathematical effect on the result.
36996 // Also the sign extend is basically zero extend
36997 // (extends the sign bit which is zero).
36998 // So it is correct to skip the sign/zero extend instruction.
36999 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
37000 Root.getOpcode() == ISD::ZERO_EXTEND ||
37001 Root.getOpcode() == ISD::ANY_EXTEND))
37002 Root = Root.getOperand(0);
37004 // If there was a match, we want Root to be a select that is the root of an
37005 // abs-diff pattern.
37006 if (!Root || Root.getOpcode() != ISD::ABS)
37009 // Check whether we have an abs-diff pattern feeding into the select.
37010 SDValue Zext0, Zext1;
37011 if (!detectZextAbsDiff(Root, Zext0, Zext1))
37014 // Create the SAD instruction.
37016 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
37018 // If the original vector was wider than 8 elements, sum over the results
37019 // in the SAD vector.
37020 unsigned Stages = Log2_32(VT.getVectorNumElements());
37021 MVT SadVT = SAD.getSimpleValueType();
37023 unsigned SadElems = SadVT.getVectorNumElements();
37025 for(unsigned i = Stages - 3; i > 0; --i) {
37026 SmallVector<int, 16> Mask(SadElems, -1);
37027 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
37028 Mask[j] = MaskEnd + j;
37031 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
37032 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
37036 MVT Type = Extract->getSimpleValueType(0);
37037 unsigned TypeSizeInBits = Type.getSizeInBits();
37038 // Return the lowest TypeSizeInBits bits.
37039 MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
37040 SAD = DAG.getBitcast(ResVT, SAD);
37041 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
37042 Extract->getOperand(1));
37045 // Attempt to peek through a target shuffle and extract the scalar from the
37047 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
37048 TargetLowering::DAGCombinerInfo &DCI,
37049 const X86Subtarget &Subtarget) {
37050 if (DCI.isBeforeLegalizeOps())
37054 SDValue Src = N->getOperand(0);
37055 SDValue Idx = N->getOperand(1);
37057 EVT VT = N->getValueType(0);
37058 EVT SrcVT = Src.getValueType();
37059 EVT SrcSVT = SrcVT.getVectorElementType();
37060 unsigned NumSrcElts = SrcVT.getVectorNumElements();
37062 // Don't attempt this for boolean mask vectors or unknown extraction indices.
37063 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
37066 SDValue SrcBC = peekThroughBitcasts(Src);
37068 // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
37069 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
37070 SDValue SrcOp = SrcBC.getOperand(0);
37071 if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
37072 return DAG.getBitcast(VT, SrcOp);
37075 // If we're extracting a single element from a broadcast load and there are
37076 // no other users, just create a single load.
37077 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
37078 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
37079 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
37080 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
37081 VT.getSizeInBits() == SrcBCWidth) {
37082 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
37083 MemIntr->getBasePtr(),
37084 MemIntr->getPointerInfo(),
37085 MemIntr->getAlignment(),
37086 MemIntr->getMemOperand()->getFlags());
37087 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
37092 // Handle extract(truncate(x)) for 0'th index.
37093 // TODO: Treat this as a faux shuffle?
37094 // TODO: When can we use this for general indices?
37095 if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() &&
37096 isNullConstant(Idx)) {
37097 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
37098 Src = DAG.getBitcast(SrcVT, Src);
37099 return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
37102 // Resolve the target shuffle inputs and mask.
37103 SmallVector<int, 16> Mask;
37104 SmallVector<SDValue, 2> Ops;
37105 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
37108 // Attempt to narrow/widen the shuffle mask to the correct size.
37109 if (Mask.size() != NumSrcElts) {
37110 if ((NumSrcElts % Mask.size()) == 0) {
37111 SmallVector<int, 16> ScaledMask;
37112 int Scale = NumSrcElts / Mask.size();
37113 scaleShuffleMask<int>(Scale, Mask, ScaledMask);
37114 Mask = std::move(ScaledMask);
37115 } else if ((Mask.size() % NumSrcElts) == 0) {
37116 // Simplify Mask based on demanded element.
37117 int ExtractIdx = (int)N->getConstantOperandVal(1);
37118 int Scale = Mask.size() / NumSrcElts;
37119 int Lo = Scale * ExtractIdx;
37120 int Hi = Scale * (ExtractIdx + 1);
37121 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
37122 if (i < Lo || Hi <= i)
37123 Mask[i] = SM_SentinelUndef;
37125 SmallVector<int, 16> WidenedMask;
37126 while (Mask.size() > NumSrcElts &&
37127 canWidenShuffleElements(Mask, WidenedMask))
37128 Mask = std::move(WidenedMask);
37129 // TODO - investigate support for wider shuffle masks with known upper
37130 // undef/zero elements for implicit zero-extension.
37134 // Check if narrowing/widening failed.
37135 if (Mask.size() != NumSrcElts)
37138 int SrcIdx = Mask[N->getConstantOperandVal(1)];
37140 // If the shuffle source element is undef/zero then we can just accept it.
37141 if (SrcIdx == SM_SentinelUndef)
37142 return DAG.getUNDEF(VT);
37144 if (SrcIdx == SM_SentinelZero)
37145 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
37146 : DAG.getConstant(0, dl, VT);
37148 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
37149 SrcIdx = SrcIdx % Mask.size();
37151 // We can only extract other elements from 128-bit vectors and in certain
37152 // circumstances, depending on SSE-level.
37153 // TODO: Investigate using extract_subvector for larger vectors.
37154 // TODO: Investigate float/double extraction if it will be just stored.
37155 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
37156 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
37157 assert(SrcSVT == VT && "Unexpected extraction type");
37158 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
37159 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
37160 DAG.getIntPtrConstant(SrcIdx, dl));
37163 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
37164 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
37165 assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
37166 "Unexpected extraction type");
37167 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
37168 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
37169 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
37170 DAG.getIntPtrConstant(SrcIdx, dl));
37171 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
37177 /// Extracting a scalar FP value from vector element 0 is free, so extract each
37178 /// operand first, then perform the math as a scalar op.
37179 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
37180 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
37181 SDValue Vec = ExtElt->getOperand(0);
37182 SDValue Index = ExtElt->getOperand(1);
37183 EVT VT = ExtElt->getValueType(0);
37184 EVT VecVT = Vec.getValueType();
37186 // TODO: If this is a unary/expensive/expand op, allow extraction from a
37187 // non-zero element because the shuffle+scalar op will be cheaper?
37188 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
37191 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
37192 // extract, the condition code), so deal with those as a special-case.
37193 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
37194 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
37195 if (OpVT != MVT::f32 && OpVT != MVT::f64)
37198 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
37200 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
37201 Vec.getOperand(0), Index);
37202 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
37203 Vec.getOperand(1), Index);
37204 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
37207 if (VT != MVT::f32 && VT != MVT::f64)
37210 // Vector FP selects don't fit the pattern of FP math ops (because the
37211 // condition has a different type and we have to change the opcode), so deal
37212 // with those here.
37213 // FIXME: This is restricted to pre type legalization by ensuring the setcc
37214 // has i1 elements. If we loosen this we need to convert vector bool to a
37216 if (Vec.getOpcode() == ISD::VSELECT &&
37217 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
37218 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
37219 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
37220 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
37222 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
37223 Vec.getOperand(0).getValueType().getScalarType(),
37224 Vec.getOperand(0), Index);
37225 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
37226 Vec.getOperand(1), Index);
37227 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
37228 Vec.getOperand(2), Index);
37229 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
37232 // TODO: This switch could include FNEG and the x86-specific FP logic ops
37233 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
37234 // missed load folding and fma+fneg combining.
37235 switch (Vec.getOpcode()) {
37236 case ISD::FMA: // Begin 3 operands
37238 case ISD::FADD: // Begin 2 operands
37243 case ISD::FCOPYSIGN:
37246 case ISD::FMINNUM_IEEE:
37247 case ISD::FMAXNUM_IEEE:
37248 case ISD::FMAXIMUM:
37249 case ISD::FMINIMUM:
37252 case ISD::FABS: // Begin 1 operand
37257 case ISD::FNEARBYINT:
37261 case X86ISD::FRSQRT: {
37262 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
37264 SmallVector<SDValue, 4> ExtOps;
37265 for (SDValue Op : Vec->ops())
37266 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
37267 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
37272 llvm_unreachable("All opcodes should return within switch");
37275 /// Try to convert a vector reduction sequence composed of binops and shuffles
37276 /// into horizontal ops.
37277 static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
37278 const X86Subtarget &Subtarget) {
37279 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
37281 // We need at least SSE2 to anything here.
37282 if (!Subtarget.hasSSE2())
37287 DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
37291 SDValue Index = ExtElt->getOperand(1);
37292 assert(isNullConstant(Index) &&
37293 "Reduction doesn't end in an extract from index 0");
37295 EVT VT = ExtElt->getValueType(0);
37296 EVT VecVT = Rdx.getValueType();
37297 if (VecVT.getScalarType() != VT)
37302 // vXi8 reduction - sub 128-bit vector.
37303 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
37304 if (VecVT == MVT::v4i8) {
37306 if (Subtarget.hasSSE41()) {
37307 Rdx = DAG.getBitcast(MVT::i32, Rdx);
37308 Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
37309 DAG.getConstant(0, DL, MVT::v4i32), Rdx,
37310 DAG.getIntPtrConstant(0, DL));
37311 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37313 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
37314 DAG.getConstant(0, DL, VecVT));
37317 if (Rdx.getValueType() == MVT::v8i8) {
37319 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
37320 DAG.getUNDEF(MVT::v8i8));
37322 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
37323 DAG.getConstant(0, DL, MVT::v16i8));
37324 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37325 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37328 // Must be a >=128-bit vector with pow2 elements.
37329 if ((VecVT.getSizeInBits() % 128) != 0 ||
37330 !isPowerOf2_32(VecVT.getVectorNumElements()))
37333 // vXi8 reduction - sum lo/hi halves then use PSADBW.
37334 if (VT == MVT::i8) {
37335 while (Rdx.getValueSizeInBits() > 128) {
37336 unsigned HalfSize = VecVT.getSizeInBits() / 2;
37337 unsigned HalfElts = VecVT.getVectorNumElements() / 2;
37338 SDValue Lo = extractSubVector(Rdx, 0, DAG, DL, HalfSize);
37339 SDValue Hi = extractSubVector(Rdx, HalfElts, DAG, DL, HalfSize);
37340 Rdx = DAG.getNode(ISD::ADD, DL, Lo.getValueType(), Lo, Hi);
37341 VecVT = Rdx.getValueType();
37343 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
37345 SDValue Hi = DAG.getVectorShuffle(
37346 MVT::v16i8, DL, Rdx, Rdx,
37347 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
37348 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
37349 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
37350 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
37351 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37352 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37355 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
37356 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
37357 if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
37360 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
37362 // 256-bit horizontal instructions operate on 128-bit chunks rather than
37363 // across the whole vector, so we need an extract + hop preliminary stage.
37364 // This is the only step where the operands of the hop are not the same value.
37365 // TODO: We could extend this to handle 512-bit or even longer vectors.
37366 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
37367 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
37368 unsigned NumElts = VecVT.getVectorNumElements();
37369 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
37370 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
37371 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
37372 VecVT = Rdx.getValueType();
37374 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
37375 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
37378 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
37379 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
37380 for (unsigned i = 0; i != ReductionSteps; ++i)
37381 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
37383 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37386 /// Detect vector gather/scatter index generation and convert it from being a
37387 /// bunch of shuffles and extracts into a somewhat faster sequence.
37388 /// For i686, the best sequence is apparently storing the value and loading
37389 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
37390 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
37391 TargetLowering::DAGCombinerInfo &DCI,
37392 const X86Subtarget &Subtarget) {
37393 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
37396 SDValue InputVector = N->getOperand(0);
37397 SDValue EltIdx = N->getOperand(1);
37398 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
37400 EVT SrcVT = InputVector.getValueType();
37401 EVT VT = N->getValueType(0);
37402 SDLoc dl(InputVector);
37403 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
37404 unsigned NumSrcElts = SrcVT.getVectorNumElements();
37406 if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
37407 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
37409 // Integer Constant Folding.
37410 if (CIdx && VT.isInteger()) {
37411 APInt UndefVecElts;
37412 SmallVector<APInt, 16> EltBits;
37413 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
37414 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
37415 EltBits, true, false)) {
37416 uint64_t Idx = CIdx->getZExtValue();
37417 if (UndefVecElts[Idx])
37418 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
37419 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
37425 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37426 if (TLI.SimplifyDemandedBits(
37427 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
37428 return SDValue(N, 0);
37430 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
37431 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
37432 InputVector.getOpcode() == X86ISD::PINSRW) &&
37433 InputVector.getOperand(2) == EltIdx) {
37434 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
37435 "Vector type mismatch");
37436 SDValue Scl = InputVector.getOperand(1);
37437 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
37438 return DAG.getZExtOrTrunc(Scl, dl, VT);
37441 // TODO - Remove this once we can handle the implicit zero-extension of
37442 // X86ISD::PEXTRW/X86ISD::PEXTRB in combineHorizontalPredicateResult and
37443 // combineBasicSADPattern.
37447 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
37448 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
37449 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
37450 SDValue MMXSrc = InputVector.getOperand(0);
37452 // The bitcast source is a direct mmx result.
37453 if (MMXSrc.getValueType() == MVT::x86mmx)
37454 return DAG.getBitcast(VT, InputVector);
37457 // Detect mmx to i32 conversion through a v2i32 elt extract.
37458 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
37459 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
37460 SDValue MMXSrc = InputVector.getOperand(0);
37462 // The bitcast source is a direct mmx result.
37463 if (MMXSrc.getValueType() == MVT::x86mmx)
37464 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
37467 // Check whether this extract is the root of a sum of absolute differences
37468 // pattern. This has to be done here because we really want it to happen
37469 // pre-legalization,
37470 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
37473 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
37474 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
37477 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
37478 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
37481 if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
37484 if (SDValue V = scalarizeExtEltFP(N, DAG))
37487 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
37488 // and then testing the relevant element.
37489 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
37490 SmallVector<SDNode *, 16> BoolExtracts;
37491 auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
37492 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
37493 isa<ConstantSDNode>(Use->getOperand(1)) &&
37494 Use->getValueType(0) == MVT::i1) {
37495 BoolExtracts.push_back(Use);
37500 if (all_of(InputVector->uses(), IsBoolExtract) &&
37501 BoolExtracts.size() > 1) {
37502 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
37504 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
37505 for (SDNode *Use : BoolExtracts) {
37506 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
37507 unsigned MaskIdx = Use->getConstantOperandVal(1);
37508 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
37509 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
37510 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
37511 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
37512 DCI.CombineTo(Use, Res);
37514 return SDValue(N, 0);
37522 /// If a vector select has an operand that is -1 or 0, try to simplify the
37523 /// select to a bitwise logic operation.
37524 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
37526 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
37527 TargetLowering::DAGCombinerInfo &DCI,
37528 const X86Subtarget &Subtarget) {
37529 SDValue Cond = N->getOperand(0);
37530 SDValue LHS = N->getOperand(1);
37531 SDValue RHS = N->getOperand(2);
37532 EVT VT = LHS.getValueType();
37533 EVT CondVT = Cond.getValueType();
37535 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37537 if (N->getOpcode() != ISD::VSELECT)
37540 assert(CondVT.isVector() && "Vector select expects a vector selector!");
37542 // Check if the first operand is all zeros and Cond type is vXi1.
37543 // This situation only applies to avx512.
37544 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
37545 // TODO: Can we assert that both operands are not zeros (because that should
37546 // get simplified at node creation time)?
37547 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
37548 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
37550 // If both inputs are 0/undef, create a complete zero vector.
37551 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
37552 if (TValIsAllZeros && FValIsAllZeros) {
37553 if (VT.isFloatingPoint())
37554 return DAG.getConstantFP(0.0, DL, VT);
37555 return DAG.getConstant(0, DL, VT);
37558 if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
37559 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
37560 // Invert the cond to not(cond) : xor(op,allones)=not(op)
37561 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
37562 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
37563 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
37566 // To use the condition operand as a bitwise mask, it must have elements that
37567 // are the same size as the select elements. Ie, the condition operand must
37568 // have already been promoted from the IR select condition type <N x i1>.
37569 // Don't check if the types themselves are equal because that excludes
37570 // vector floating-point selects.
37571 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
37574 // Try to invert the condition if true value is not all 1s and false value is
37575 // not all 0s. Only do this if the condition has one use.
37576 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
37577 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
37578 // Check if the selector will be produced by CMPP*/PCMP*.
37579 Cond.getOpcode() == ISD::SETCC &&
37580 // Check if SETCC has already been promoted.
37581 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
37583 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
37585 if (TValIsAllZeros || FValIsAllOnes) {
37586 SDValue CC = Cond.getOperand(2);
37587 ISD::CondCode NewCC = ISD::getSetCCInverse(
37588 cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
37589 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
37591 std::swap(LHS, RHS);
37592 TValIsAllOnes = FValIsAllOnes;
37593 FValIsAllZeros = TValIsAllZeros;
37597 // Cond value must be 'sign splat' to be converted to a logical op.
37598 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
37601 // vselect Cond, 111..., 000... -> Cond
37602 if (TValIsAllOnes && FValIsAllZeros)
37603 return DAG.getBitcast(VT, Cond);
37605 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
37608 // vselect Cond, 111..., X -> or Cond, X
37609 if (TValIsAllOnes) {
37610 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
37611 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
37612 return DAG.getBitcast(VT, Or);
37615 // vselect Cond, X, 000... -> and Cond, X
37616 if (FValIsAllZeros) {
37617 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
37618 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
37619 return DAG.getBitcast(VT, And);
37622 // vselect Cond, 000..., X -> andn Cond, X
37623 if (TValIsAllZeros) {
37624 MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
37625 SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
37626 SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
37627 SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
37628 return DAG.getBitcast(VT, AndN);
37634 /// If both arms of a vector select are concatenated vectors, split the select,
37635 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
37636 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
37637 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
37638 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
37639 const X86Subtarget &Subtarget) {
37640 unsigned Opcode = N->getOpcode();
37641 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
37644 // TODO: Split 512-bit vectors too?
37645 EVT VT = N->getValueType(0);
37646 if (!VT.is256BitVector())
37649 // TODO: Split as long as any 2 of the 3 operands are concatenated?
37650 SDValue Cond = N->getOperand(0);
37651 SDValue TVal = N->getOperand(1);
37652 SDValue FVal = N->getOperand(2);
37653 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
37654 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
37655 !collectConcatOps(TVal.getNode(), CatOpsT) ||
37656 !collectConcatOps(FVal.getNode(), CatOpsF))
37659 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
37660 ArrayRef<SDValue> Ops) {
37661 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
37663 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
37664 makeBlend, /*CheckBWI*/ false);
37667 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
37668 SDValue Cond = N->getOperand(0);
37669 SDValue LHS = N->getOperand(1);
37670 SDValue RHS = N->getOperand(2);
37673 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
37674 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
37675 if (!TrueC || !FalseC)
37678 // Don't do this for crazy integer types.
37679 EVT VT = N->getValueType(0);
37680 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
37683 // We're going to use the condition bit in math or logic ops. We could allow
37684 // this with a wider condition value (post-legalization it becomes an i8),
37685 // but if nothing is creating selects that late, it doesn't matter.
37686 if (Cond.getValueType() != MVT::i1)
37689 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
37690 // 3, 5, or 9 with i32/i64, so those get transformed too.
37691 // TODO: For constants that overflow or do not differ by power-of-2 or small
37692 // multiplier, convert to 'and' + 'add'.
37693 const APInt &TrueVal = TrueC->getAPIntValue();
37694 const APInt &FalseVal = FalseC->getAPIntValue();
37696 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
37700 APInt AbsDiff = Diff.abs();
37701 if (AbsDiff.isPowerOf2() ||
37702 ((VT == MVT::i32 || VT == MVT::i64) &&
37703 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
37705 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
37706 // of the condition can usually be folded into a compare predicate, but even
37707 // without that, the sequence should be cheaper than a CMOV alternative.
37708 if (TrueVal.slt(FalseVal)) {
37709 Cond = DAG.getNOT(DL, Cond, MVT::i1);
37710 std::swap(TrueC, FalseC);
37713 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
37714 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
37716 // Multiply condition by the difference if non-one.
37717 if (!AbsDiff.isOneValue())
37718 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
37720 // Add the base if non-zero.
37721 if (!FalseC->isNullValue())
37722 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
37730 /// If this is a *dynamic* select (non-constant condition) and we can match
37731 /// this node with one of the variable blend instructions, restructure the
37732 /// condition so that blends can use the high (sign) bit of each element.
37733 /// This function will also call SimplifyDemandedBits on already created
37734 /// BLENDV to perform additional simplifications.
37735 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
37736 TargetLowering::DAGCombinerInfo &DCI,
37737 const X86Subtarget &Subtarget) {
37738 SDValue Cond = N->getOperand(0);
37739 if ((N->getOpcode() != ISD::VSELECT &&
37740 N->getOpcode() != X86ISD::BLENDV) ||
37741 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
37744 // Don't optimize before the condition has been transformed to a legal type
37745 // and don't ever optimize vector selects that map to AVX512 mask-registers.
37746 unsigned BitWidth = Cond.getScalarValueSizeInBits();
37747 if (BitWidth < 8 || BitWidth > 64)
37750 // We can only handle the cases where VSELECT is directly legal on the
37751 // subtarget. We custom lower VSELECT nodes with constant conditions and
37752 // this makes it hard to see whether a dynamic VSELECT will correctly
37753 // lower, so we both check the operation's status and explicitly handle the
37754 // cases where a *dynamic* blend will fail even though a constant-condition
37755 // blend could be custom lowered.
37756 // FIXME: We should find a better way to handle this class of problems.
37757 // Potentially, we should combine constant-condition vselect nodes
37758 // pre-legalization into shuffles and not mark as many types as custom
37760 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37761 EVT VT = N->getValueType(0);
37762 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
37764 // FIXME: We don't support i16-element blends currently. We could and
37765 // should support them by making *all* the bits in the condition be set
37766 // rather than just the high bit and using an i8-element blend.
37767 if (VT.getVectorElementType() == MVT::i16)
37769 // Dynamic blending was only available from SSE4.1 onward.
37770 if (VT.is128BitVector() && !Subtarget.hasSSE41())
37772 // Byte blends are only available in AVX2
37773 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
37775 // There are no 512-bit blend instructions that use sign bits.
37776 if (VT.is512BitVector())
37779 auto OnlyUsedAsSelectCond = [](SDValue Cond) {
37780 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
37782 if ((UI->getOpcode() != ISD::VSELECT &&
37783 UI->getOpcode() != X86ISD::BLENDV) ||
37784 UI.getOperandNo() != 0)
37790 if (OnlyUsedAsSelectCond(Cond)) {
37791 APInt DemandedMask(APInt::getSignMask(BitWidth));
37793 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
37794 !DCI.isBeforeLegalizeOps());
37795 if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
37798 // If we changed the computation somewhere in the DAG, this change will
37799 // affect all users of Cond. Update all the nodes so that we do not use
37800 // the generic VSELECT anymore. Otherwise, we may perform wrong
37801 // optimizations as we messed with the actual expectation for the vector
37803 for (SDNode *U : Cond->uses()) {
37804 if (U->getOpcode() == X86ISD::BLENDV)
37807 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
37808 Cond, U->getOperand(1), U->getOperand(2));
37809 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
37810 DCI.AddToWorklist(U);
37812 DCI.CommitTargetLoweringOpt(TLO);
37813 return SDValue(N, 0);
37816 // Otherwise we can still at least try to simplify multiple use bits.
37817 APInt DemandedMask(APInt::getSignMask(BitWidth));
37818 APInt DemandedElts(APInt::getAllOnesValue(VT.getVectorNumElements()));
37820 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
37821 !DCI.isBeforeLegalizeOps());
37822 if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedMask,
37823 DemandedElts, DAG, 0))
37824 return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
37825 V, N->getOperand(1), N->getOperand(2));
37831 // (or (and (M, (sub 0, X)), (pandn M, X)))
37832 // which is a special case of:
37833 // (select M, (sub 0, X), X)
37835 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
37836 // We know that, if fNegate is 0 or 1:
37837 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
37839 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
37840 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
37841 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
37842 // This lets us transform our vselect to:
37843 // (add (xor X, M), (and M, 1))
37845 // (sub (xor X, M), M)
37846 static SDValue combineLogicBlendIntoConditionalNegate(
37847 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
37848 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
37849 EVT MaskVT = Mask.getValueType();
37850 assert(MaskVT.isInteger() &&
37851 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
37852 "Mask must be zero/all-bits");
37854 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
37856 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
37859 auto IsNegV = [](SDNode *N, SDValue V) {
37860 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
37861 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
37865 if (IsNegV(Y.getNode(), X))
37867 else if (IsNegV(X.getNode(), Y))
37872 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
37873 SDValue SubOp2 = Mask;
37875 // If the negate was on the false side of the select, then
37876 // the operands of the SUB need to be swapped. PR 27251.
37877 // This is because the pattern being matched above is
37878 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
37879 // but if the pattern matched was
37880 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
37881 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
37882 // pattern also needs to be a negation of the replacement pattern above.
37883 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
37884 // sub accomplishes the negation of the replacement pattern.
37886 std::swap(SubOp1, SubOp2);
37888 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
37889 return DAG.getBitcast(VT, Res);
37892 /// Do target-specific dag combines on SELECT and VSELECT nodes.
37893 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
37894 TargetLowering::DAGCombinerInfo &DCI,
37895 const X86Subtarget &Subtarget) {
37897 SDValue Cond = N->getOperand(0);
37898 SDValue LHS = N->getOperand(1);
37899 SDValue RHS = N->getOperand(2);
37901 // Try simplification again because we use this function to optimize
37902 // BLENDV nodes that are not handled by the generic combiner.
37903 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
37906 EVT VT = LHS.getValueType();
37907 EVT CondVT = Cond.getValueType();
37908 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37909 bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
37911 // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
37912 // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
37913 // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
37914 if (CondVT.isVector() && CondVT.isInteger() &&
37915 CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
37916 (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
37917 DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
37918 if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
37919 DL, DAG, Subtarget))
37922 // Convert vselects with constant condition into shuffles.
37923 if (CondConstantVector && DCI.isBeforeLegalizeOps()) {
37924 SmallVector<int, 64> Mask;
37925 if (createShuffleMaskFromVSELECT(Mask, Cond))
37926 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
37929 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
37930 // instructions match the semantics of the common C idiom x<y?x:y but not
37931 // x<=y?x:y, because of how they handle negative zero (which can be
37932 // ignored in unsafe-math mode).
37933 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
37934 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
37935 VT != MVT::f80 && VT != MVT::f128 &&
37936 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
37937 (Subtarget.hasSSE2() ||
37938 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
37939 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37941 unsigned Opcode = 0;
37942 // Check for x CC y ? x : y.
37943 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
37944 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
37948 // Converting this to a min would handle NaNs incorrectly, and swapping
37949 // the operands would cause it to handle comparisons between positive
37950 // and negative zero incorrectly.
37951 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
37952 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37953 !(DAG.isKnownNeverZeroFloat(LHS) ||
37954 DAG.isKnownNeverZeroFloat(RHS)))
37956 std::swap(LHS, RHS);
37958 Opcode = X86ISD::FMIN;
37961 // Converting this to a min would handle comparisons between positive
37962 // and negative zero incorrectly.
37963 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37964 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
37966 Opcode = X86ISD::FMIN;
37969 // Converting this to a min would handle both negative zeros and NaNs
37970 // incorrectly, but we can swap the operands to fix both.
37971 std::swap(LHS, RHS);
37976 Opcode = X86ISD::FMIN;
37980 // Converting this to a max would handle comparisons between positive
37981 // and negative zero incorrectly.
37982 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37983 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
37985 Opcode = X86ISD::FMAX;
37988 // Converting this to a max would handle NaNs incorrectly, and swapping
37989 // the operands would cause it to handle comparisons between positive
37990 // and negative zero incorrectly.
37991 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
37992 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37993 !(DAG.isKnownNeverZeroFloat(LHS) ||
37994 DAG.isKnownNeverZeroFloat(RHS)))
37996 std::swap(LHS, RHS);
37998 Opcode = X86ISD::FMAX;
38001 // Converting this to a max would handle both negative zeros and NaNs
38002 // incorrectly, but we can swap the operands to fix both.
38003 std::swap(LHS, RHS);
38008 Opcode = X86ISD::FMAX;
38011 // Check for x CC y ? y : x -- a min/max with reversed arms.
38012 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
38013 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
38017 // Converting this to a min would handle comparisons between positive
38018 // and negative zero incorrectly, and swapping the operands would
38019 // cause it to handle NaNs incorrectly.
38020 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38021 !(DAG.isKnownNeverZeroFloat(LHS) ||
38022 DAG.isKnownNeverZeroFloat(RHS))) {
38023 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38025 std::swap(LHS, RHS);
38027 Opcode = X86ISD::FMIN;
38030 // Converting this to a min would handle NaNs incorrectly.
38031 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38033 Opcode = X86ISD::FMIN;
38036 // Converting this to a min would handle both negative zeros and NaNs
38037 // incorrectly, but we can swap the operands to fix both.
38038 std::swap(LHS, RHS);
38043 Opcode = X86ISD::FMIN;
38047 // Converting this to a max would handle NaNs incorrectly.
38048 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38050 Opcode = X86ISD::FMAX;
38053 // Converting this to a max would handle comparisons between positive
38054 // and negative zero incorrectly, and swapping the operands would
38055 // cause it to handle NaNs incorrectly.
38056 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38057 !DAG.isKnownNeverZeroFloat(LHS) &&
38058 !DAG.isKnownNeverZeroFloat(RHS)) {
38059 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38061 std::swap(LHS, RHS);
38063 Opcode = X86ISD::FMAX;
38066 // Converting this to a max would handle both negative zeros and NaNs
38067 // incorrectly, but we can swap the operands to fix both.
38068 std::swap(LHS, RHS);
38073 Opcode = X86ISD::FMAX;
38079 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
38082 // Some mask scalar intrinsics rely on checking if only one bit is set
38083 // and implement it in C code like this:
38084 // A[0] = (U & 1) ? A[0] : W[0];
38085 // This creates some redundant instructions that break pattern matching.
38086 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
38087 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
38088 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
38089 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38090 SDValue AndNode = Cond.getOperand(0);
38091 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
38092 isNullConstant(Cond.getOperand(1)) &&
38093 isOneConstant(AndNode.getOperand(1))) {
38094 // LHS and RHS swapped due to
38095 // setcc outputting 1 when AND resulted in 0 and vice versa.
38096 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
38097 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
38101 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
38102 // lowering on KNL. In this case we convert it to
38103 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
38104 // The same situation all vectors of i8 and i16 without BWI.
38105 // Make sure we extend these even before type legalization gets a chance to
38106 // split wide vectors.
38107 // Since SKX these selects have a proper lowering.
38108 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
38109 CondVT.getVectorElementType() == MVT::i1 &&
38110 (VT.getVectorElementType() == MVT::i8 ||
38111 VT.getVectorElementType() == MVT::i16)) {
38112 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
38113 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
38116 // AVX512 - Extend select with zero to merge with target shuffle.
38117 // select(mask, extract_subvector(shuffle(x)), zero) -->
38118 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
38119 // TODO - support non target shuffles as well.
38120 if (Subtarget.hasAVX512() && CondVT.isVector() &&
38121 CondVT.getVectorElementType() == MVT::i1) {
38122 auto SelectableOp = [&TLI](SDValue Op) {
38123 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38124 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
38125 isNullConstant(Op.getOperand(1)) &&
38126 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
38127 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
38130 bool SelectableLHS = SelectableOp(LHS);
38131 bool SelectableRHS = SelectableOp(RHS);
38132 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
38133 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
38135 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
38136 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
38137 : RHS.getOperand(0).getValueType();
38138 unsigned NumSrcElts = SrcVT.getVectorNumElements();
38139 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
38140 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
38141 VT.getSizeInBits());
38142 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
38143 VT.getSizeInBits());
38144 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
38145 DAG.getUNDEF(SrcCondVT), Cond,
38146 DAG.getIntPtrConstant(0, DL));
38147 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
38148 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
38152 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
38155 // Canonicalize max and min:
38156 // (x > y) ? x : y -> (x >= y) ? x : y
38157 // (x < y) ? x : y -> (x <= y) ? x : y
38158 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
38159 // the need for an extra compare
38160 // against zero. e.g.
38161 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
38163 // testl %edi, %edi
38165 // cmovgl %edi, %eax
38169 // cmovsl %eax, %edi
38170 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
38171 Cond.hasOneUse() &&
38172 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
38173 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
38174 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38179 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
38180 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
38181 Cond.getOperand(0), Cond.getOperand(1), NewCC);
38182 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
38187 // Match VSELECTs into subs with unsigned saturation.
38188 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
38189 // psubus is available in SSE2 for i8 and i16 vectors.
38190 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
38191 isPowerOf2_32(VT.getVectorNumElements()) &&
38192 (VT.getVectorElementType() == MVT::i8 ||
38193 VT.getVectorElementType() == MVT::i16)) {
38194 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38196 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
38197 // left side invert the predicate to simplify logic below.
38199 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
38201 CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
38202 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
38206 if (Other.getNode() && Other->getNumOperands() == 2 &&
38207 Other->getOperand(0) == Cond.getOperand(0)) {
38208 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
38209 SDValue CondRHS = Cond->getOperand(1);
38211 // Look for a general sub with unsigned saturation first.
38212 // x >= y ? x-y : 0 --> subus x, y
38213 // x > y ? x-y : 0 --> subus x, y
38214 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
38215 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
38216 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38218 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
38219 if (isa<BuildVectorSDNode>(CondRHS)) {
38220 // If the RHS is a constant we have to reverse the const
38221 // canonicalization.
38222 // x > C-1 ? x+-C : 0 --> subus x, C
38223 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
38224 return (!Op && !Cond) ||
38226 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
38228 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
38229 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
38230 /*AllowUndefs*/ true)) {
38231 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38233 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38236 // Another special case: If C was a sign bit, the sub has been
38237 // canonicalized into a xor.
38238 // FIXME: Would it be better to use computeKnownBits to determine
38239 // whether it's safe to decanonicalize the xor?
38240 // x s< 0 ? x^C : 0 --> subus x, C
38241 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
38242 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
38243 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
38244 OpRHSConst->getAPIntValue().isSignMask()) {
38245 // Note that we have to rebuild the RHS constant here to ensure we
38246 // don't rely on particular values of undef lanes.
38247 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
38248 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38256 // Match VSELECTs into add with unsigned saturation.
38257 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
38258 // paddus is available in SSE2 for i8 and i16 vectors.
38259 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
38260 isPowerOf2_32(VT.getVectorNumElements()) &&
38261 (VT.getVectorElementType() == MVT::i8 ||
38262 VT.getVectorElementType() == MVT::i16)) {
38263 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38265 SDValue CondLHS = Cond->getOperand(0);
38266 SDValue CondRHS = Cond->getOperand(1);
38268 // Check if one of the arms of the VSELECT is vector with all bits set.
38269 // If it's on the left side invert the predicate to simplify logic below.
38271 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
38273 CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
38274 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
38278 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
38279 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
38281 // Canonicalize condition operands.
38282 if (CC == ISD::SETUGE) {
38283 std::swap(CondLHS, CondRHS);
38287 // We can test against either of the addition operands.
38288 // x <= x+y ? x+y : ~0 --> addus x, y
38289 // x+y >= x ? x+y : ~0 --> addus x, y
38290 if (CC == ISD::SETULE && Other == CondRHS &&
38291 (OpLHS == CondLHS || OpRHS == CondLHS))
38292 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
38294 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
38295 CondLHS == OpLHS) {
38296 // If the RHS is a constant we have to reverse the const
38297 // canonicalization.
38298 // x > ~C ? x+C : ~0 --> addus x, C
38299 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
38300 return Cond->getAPIntValue() == ~Op->getAPIntValue();
38302 if (CC == ISD::SETULE &&
38303 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
38304 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
38309 // Early exit check
38310 if (!TLI.isTypeLegal(VT))
38313 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
38316 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
38319 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
38322 // select(~Cond, X, Y) -> select(Cond, Y, X)
38323 if (CondVT.getScalarType() != MVT::i1)
38324 if (SDValue CondNot = IsNOT(Cond, DAG))
38325 return DAG.getNode(N->getOpcode(), DL, VT,
38326 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
38328 // Custom action for SELECT MMX
38329 if (VT == MVT::x86mmx) {
38330 LHS = DAG.getBitcast(MVT::i64, LHS);
38331 RHS = DAG.getBitcast(MVT::i64, RHS);
38332 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
38333 return DAG.getBitcast(VT, newSelect);
38340 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
38342 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
38343 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
38344 /// Note that this is only legal for some op/cc combinations.
38345 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
38347 const X86Subtarget &Subtarget) {
38348 // This combine only operates on CMP-like nodes.
38349 if (!(Cmp.getOpcode() == X86ISD::CMP ||
38350 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
38353 // Can't replace the cmp if it has more uses than the one we're looking at.
38354 // FIXME: We would like to be able to handle this, but would need to make sure
38355 // all uses were updated.
38356 if (!Cmp.hasOneUse())
38359 // This only applies to variations of the common case:
38360 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
38361 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
38362 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
38363 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
38364 // Using the proper condcodes (see below), overflow is checked for.
38366 // FIXME: We can generalize both constraints:
38367 // - XOR/OR/AND (if they were made to survive AtomicExpand)
38369 // if the result is compared.
38371 SDValue CmpLHS = Cmp.getOperand(0);
38372 SDValue CmpRHS = Cmp.getOperand(1);
38374 if (!CmpLHS.hasOneUse())
38377 unsigned Opc = CmpLHS.getOpcode();
38378 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
38381 SDValue OpRHS = CmpLHS.getOperand(2);
38382 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
38386 APInt Addend = OpRHSC->getAPIntValue();
38387 if (Opc == ISD::ATOMIC_LOAD_SUB)
38390 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
38394 APInt Comparison = CmpRHSC->getAPIntValue();
38396 // If the addend is the negation of the comparison value, then we can do
38397 // a full comparison by emitting the atomic arithmetic as a locked sub.
38398 if (Comparison == -Addend) {
38399 // The CC is fine, but we need to rewrite the LHS of the comparison as an
38401 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
38402 auto AtomicSub = DAG.getAtomic(
38403 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
38404 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
38405 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
38406 AN->getMemOperand());
38407 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
38408 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
38409 DAG.getUNDEF(CmpLHS.getValueType()));
38410 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
38414 // We can handle comparisons with zero in a number of cases by manipulating
38416 if (!Comparison.isNullValue())
38419 if (CC == X86::COND_S && Addend == 1)
38421 else if (CC == X86::COND_NS && Addend == 1)
38423 else if (CC == X86::COND_G && Addend == -1)
38425 else if (CC == X86::COND_LE && Addend == -1)
38430 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
38431 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
38432 DAG.getUNDEF(CmpLHS.getValueType()));
38433 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
38437 // Check whether a boolean test is testing a boolean value generated by
38438 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
38441 // Simplify the following patterns:
38442 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
38443 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
38444 // to (Op EFLAGS Cond)
38446 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
38447 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
38448 // to (Op EFLAGS !Cond)
38450 // where Op could be BRCOND or CMOV.
38452 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
38453 // This combine only operates on CMP-like nodes.
38454 if (!(Cmp.getOpcode() == X86ISD::CMP ||
38455 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
38458 // Quit if not used as a boolean value.
38459 if (CC != X86::COND_E && CC != X86::COND_NE)
38462 // Check CMP operands. One of them should be 0 or 1 and the other should be
38463 // an SetCC or extended from it.
38464 SDValue Op1 = Cmp.getOperand(0);
38465 SDValue Op2 = Cmp.getOperand(1);
38468 const ConstantSDNode* C = nullptr;
38469 bool needOppositeCond = (CC == X86::COND_E);
38470 bool checkAgainstTrue = false; // Is it a comparison against 1?
38472 if ((C = dyn_cast<ConstantSDNode>(Op1)))
38474 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
38476 else // Quit if all operands are not constants.
38479 if (C->getZExtValue() == 1) {
38480 needOppositeCond = !needOppositeCond;
38481 checkAgainstTrue = true;
38482 } else if (C->getZExtValue() != 0)
38483 // Quit if the constant is neither 0 or 1.
38486 bool truncatedToBoolWithAnd = false;
38487 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
38488 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
38489 SetCC.getOpcode() == ISD::TRUNCATE ||
38490 SetCC.getOpcode() == ISD::AND) {
38491 if (SetCC.getOpcode() == ISD::AND) {
38493 if (isOneConstant(SetCC.getOperand(0)))
38495 if (isOneConstant(SetCC.getOperand(1)))
38499 SetCC = SetCC.getOperand(OpIdx);
38500 truncatedToBoolWithAnd = true;
38502 SetCC = SetCC.getOperand(0);
38505 switch (SetCC.getOpcode()) {
38506 case X86ISD::SETCC_CARRY:
38507 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
38508 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
38509 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
38510 // truncated to i1 using 'and'.
38511 if (checkAgainstTrue && !truncatedToBoolWithAnd)
38513 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
38514 "Invalid use of SETCC_CARRY!");
38516 case X86ISD::SETCC:
38517 // Set the condition code or opposite one if necessary.
38518 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
38519 if (needOppositeCond)
38520 CC = X86::GetOppositeBranchCondition(CC);
38521 return SetCC.getOperand(1);
38522 case X86ISD::CMOV: {
38523 // Check whether false/true value has canonical one, i.e. 0 or 1.
38524 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
38525 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
38526 // Quit if true value is not a constant.
38529 // Quit if false value is not a constant.
38531 SDValue Op = SetCC.getOperand(0);
38532 // Skip 'zext' or 'trunc' node.
38533 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
38534 Op.getOpcode() == ISD::TRUNCATE)
38535 Op = Op.getOperand(0);
38536 // A special case for rdrand/rdseed, where 0 is set if false cond is
38538 if ((Op.getOpcode() != X86ISD::RDRAND &&
38539 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
38542 // Quit if false value is not the constant 0 or 1.
38543 bool FValIsFalse = true;
38544 if (FVal && FVal->getZExtValue() != 0) {
38545 if (FVal->getZExtValue() != 1)
38547 // If FVal is 1, opposite cond is needed.
38548 needOppositeCond = !needOppositeCond;
38549 FValIsFalse = false;
38551 // Quit if TVal is not the constant opposite of FVal.
38552 if (FValIsFalse && TVal->getZExtValue() != 1)
38554 if (!FValIsFalse && TVal->getZExtValue() != 0)
38556 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
38557 if (needOppositeCond)
38558 CC = X86::GetOppositeBranchCondition(CC);
38559 return SetCC.getOperand(3);
38566 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
38568 /// (X86or (X86setcc) (X86setcc))
38569 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
38570 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
38571 X86::CondCode &CC1, SDValue &Flags,
38573 if (Cond->getOpcode() == X86ISD::CMP) {
38574 if (!isNullConstant(Cond->getOperand(1)))
38577 Cond = Cond->getOperand(0);
38582 SDValue SetCC0, SetCC1;
38583 switch (Cond->getOpcode()) {
38584 default: return false;
38591 SetCC0 = Cond->getOperand(0);
38592 SetCC1 = Cond->getOperand(1);
38596 // Make sure we have SETCC nodes, using the same flags value.
38597 if (SetCC0.getOpcode() != X86ISD::SETCC ||
38598 SetCC1.getOpcode() != X86ISD::SETCC ||
38599 SetCC0->getOperand(1) != SetCC1->getOperand(1))
38602 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
38603 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
38604 Flags = SetCC0->getOperand(1);
38608 // When legalizing carry, we create carries via add X, -1
38609 // If that comes from an actual carry, via setcc, we use the
38611 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
38612 if (EFLAGS.getOpcode() == X86ISD::ADD) {
38613 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
38614 SDValue Carry = EFLAGS.getOperand(0);
38615 while (Carry.getOpcode() == ISD::TRUNCATE ||
38616 Carry.getOpcode() == ISD::ZERO_EXTEND ||
38617 Carry.getOpcode() == ISD::SIGN_EXTEND ||
38618 Carry.getOpcode() == ISD::ANY_EXTEND ||
38619 (Carry.getOpcode() == ISD::AND &&
38620 isOneConstant(Carry.getOperand(1))))
38621 Carry = Carry.getOperand(0);
38622 if (Carry.getOpcode() == X86ISD::SETCC ||
38623 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
38624 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
38625 uint64_t CarryCC = Carry.getConstantOperandVal(0);
38626 SDValue CarryOp1 = Carry.getOperand(1);
38627 if (CarryCC == X86::COND_B)
38629 if (CarryCC == X86::COND_A) {
38630 // Try to convert COND_A into COND_B in an attempt to facilitate
38631 // materializing "setb reg".
38633 // Do not flip "e > c", where "c" is a constant, because Cmp
38634 // instruction cannot take an immediate as its first operand.
38636 if (CarryOp1.getOpcode() == X86ISD::SUB &&
38637 CarryOp1.getNode()->hasOneUse() &&
38638 CarryOp1.getValueType().isInteger() &&
38639 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
38640 SDValue SubCommute =
38641 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
38642 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
38643 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
38646 // If this is a check of the z flag of an add with 1, switch to the
38648 if (CarryCC == X86::COND_E &&
38649 CarryOp1.getOpcode() == X86ISD::ADD &&
38650 isOneConstant(CarryOp1.getOperand(1)))
38659 /// Optimize an EFLAGS definition used according to the condition code \p CC
38660 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
38661 /// uses of chain values.
38662 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
38664 const X86Subtarget &Subtarget) {
38665 if (CC == X86::COND_B)
38666 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
38669 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
38671 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
38674 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
38675 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
38676 TargetLowering::DAGCombinerInfo &DCI,
38677 const X86Subtarget &Subtarget) {
38680 SDValue FalseOp = N->getOperand(0);
38681 SDValue TrueOp = N->getOperand(1);
38682 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
38683 SDValue Cond = N->getOperand(3);
38685 // cmov X, X, ?, ? --> X
38686 if (TrueOp == FalseOp)
38689 // Try to simplify the EFLAGS and condition code operands.
38690 // We can't always do this as FCMOV only supports a subset of X86 cond.
38691 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
38692 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
38693 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
38695 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38699 // If this is a select between two integer constants, try to do some
38700 // optimizations. Note that the operands are ordered the opposite of SELECT
38702 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
38703 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
38704 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
38705 // larger than FalseC (the false value).
38706 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
38707 CC = X86::GetOppositeBranchCondition(CC);
38708 std::swap(TrueC, FalseC);
38709 std::swap(TrueOp, FalseOp);
38712 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
38713 // This is efficient for any integer data type (including i8/i16) and
38715 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
38716 Cond = getSETCC(CC, Cond, DL, DAG);
38718 // Zero extend the condition if needed.
38719 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
38721 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
38722 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
38723 DAG.getConstant(ShAmt, DL, MVT::i8));
38727 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
38728 // for any integer data type, including i8/i16.
38729 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
38730 Cond = getSETCC(CC, Cond, DL, DAG);
38732 // Zero extend the condition if needed.
38733 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
38734 FalseC->getValueType(0), Cond);
38735 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
38736 SDValue(FalseC, 0));
38740 // Optimize cases that will turn into an LEA instruction. This requires
38741 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
38742 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
38743 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
38744 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
38745 "Implicit constant truncation");
38747 bool isFastMultiplier = false;
38748 if (Diff.ult(10)) {
38749 switch (Diff.getZExtValue()) {
38751 case 1: // result = add base, cond
38752 case 2: // result = lea base( , cond*2)
38753 case 3: // result = lea base(cond, cond*2)
38754 case 4: // result = lea base( , cond*4)
38755 case 5: // result = lea base(cond, cond*4)
38756 case 8: // result = lea base( , cond*8)
38757 case 9: // result = lea base(cond, cond*8)
38758 isFastMultiplier = true;
38763 if (isFastMultiplier) {
38764 Cond = getSETCC(CC, Cond, DL ,DAG);
38765 // Zero extend the condition if needed.
38766 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
38768 // Scale the condition by the difference.
38770 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
38771 DAG.getConstant(Diff, DL, Cond.getValueType()));
38773 // Add the base if non-zero.
38774 if (FalseC->getAPIntValue() != 0)
38775 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
38776 SDValue(FalseC, 0));
38783 // Handle these cases:
38784 // (select (x != c), e, c) -> select (x != c), e, x),
38785 // (select (x == c), c, e) -> select (x == c), x, e)
38786 // where the c is an integer constant, and the "select" is the combination
38787 // of CMOV and CMP.
38789 // The rationale for this change is that the conditional-move from a constant
38790 // needs two instructions, however, conditional-move from a register needs
38791 // only one instruction.
38793 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
38794 // some instruction-combining opportunities. This opt needs to be
38795 // postponed as late as possible.
38797 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
38798 // the DCI.xxxx conditions are provided to postpone the optimization as
38799 // late as possible.
38801 ConstantSDNode *CmpAgainst = nullptr;
38802 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
38803 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
38804 !isa<ConstantSDNode>(Cond.getOperand(0))) {
38806 if (CC == X86::COND_NE &&
38807 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
38808 CC = X86::GetOppositeBranchCondition(CC);
38809 std::swap(TrueOp, FalseOp);
38812 if (CC == X86::COND_E &&
38813 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
38814 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
38815 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
38816 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38821 // Fold and/or of setcc's to double CMOV:
38822 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
38823 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
38825 // This combine lets us generate:
38826 // cmovcc1 (jcc1 if we don't have CMOV)
38832 // cmovne (jne if we don't have CMOV)
38833 // When we can't use the CMOV instruction, it might increase branch
38835 // When we can use CMOV, or when there is no mispredict, this improves
38836 // throughput and reduces register pressure.
38838 if (CC == X86::COND_NE) {
38840 X86::CondCode CC0, CC1;
38842 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
38844 std::swap(FalseOp, TrueOp);
38845 CC0 = X86::GetOppositeBranchCondition(CC0);
38846 CC1 = X86::GetOppositeBranchCondition(CC1);
38849 SDValue LOps[] = {FalseOp, TrueOp,
38850 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
38851 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
38852 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
38854 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38859 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
38860 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
38861 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
38862 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
38863 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
38864 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
38865 SDValue Add = TrueOp;
38866 SDValue Const = FalseOp;
38867 // Canonicalize the condition code for easier matching and output.
38868 if (CC == X86::COND_E)
38869 std::swap(Add, Const);
38871 // We might have replaced the constant in the cmov with the LHS of the
38872 // compare. If so change it to the RHS of the compare.
38873 if (Const == Cond.getOperand(0))
38874 Const = Cond.getOperand(1);
38876 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
38877 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
38878 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
38879 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
38880 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
38881 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
38882 EVT VT = N->getValueType(0);
38883 // This should constant fold.
38884 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
38886 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
38887 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
38888 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
38895 /// Different mul shrinking modes.
38896 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
38898 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
38899 EVT VT = N->getOperand(0).getValueType();
38900 if (VT.getScalarSizeInBits() != 32)
38903 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
38904 unsigned SignBits[2] = {1, 1};
38905 bool IsPositive[2] = {false, false};
38906 for (unsigned i = 0; i < 2; i++) {
38907 SDValue Opd = N->getOperand(i);
38909 SignBits[i] = DAG.ComputeNumSignBits(Opd);
38910 IsPositive[i] = DAG.SignBitIsZero(Opd);
38913 bool AllPositive = IsPositive[0] && IsPositive[1];
38914 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
38915 // When ranges are from -128 ~ 127, use MULS8 mode.
38916 if (MinSignBits >= 25)
38917 Mode = ShrinkMode::MULS8;
38918 // When ranges are from 0 ~ 255, use MULU8 mode.
38919 else if (AllPositive && MinSignBits >= 24)
38920 Mode = ShrinkMode::MULU8;
38921 // When ranges are from -32768 ~ 32767, use MULS16 mode.
38922 else if (MinSignBits >= 17)
38923 Mode = ShrinkMode::MULS16;
38924 // When ranges are from 0 ~ 65535, use MULU16 mode.
38925 else if (AllPositive && MinSignBits >= 16)
38926 Mode = ShrinkMode::MULU16;
38932 /// When the operands of vector mul are extended from smaller size values,
38933 /// like i8 and i16, the type of mul may be shrinked to generate more
38934 /// efficient code. Two typical patterns are handled:
38936 /// %2 = sext/zext <N x i8> %1 to <N x i32>
38937 /// %4 = sext/zext <N x i8> %3 to <N x i32>
38938 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
38939 /// %5 = mul <N x i32> %2, %4
38942 /// %2 = zext/sext <N x i16> %1 to <N x i32>
38943 /// %4 = zext/sext <N x i16> %3 to <N x i32>
38944 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
38945 /// %5 = mul <N x i32> %2, %4
38947 /// There are four mul shrinking modes:
38948 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
38949 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
38950 /// generate pmullw+sext32 for it (MULS8 mode).
38951 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
38952 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
38953 /// generate pmullw+zext32 for it (MULU8 mode).
38954 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
38955 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
38956 /// generate pmullw+pmulhw for it (MULS16 mode).
38957 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
38958 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
38959 /// generate pmullw+pmulhuw for it (MULU16 mode).
38960 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
38961 const X86Subtarget &Subtarget) {
38962 // Check for legality
38963 // pmullw/pmulhw are not supported by SSE.
38964 if (!Subtarget.hasSSE2())
38967 // Check for profitability
38968 // pmulld is supported since SSE41. It is better to use pmulld
38969 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
38971 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
38972 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
38976 if (!canReduceVMulWidth(N, DAG, Mode))
38980 SDValue N0 = N->getOperand(0);
38981 SDValue N1 = N->getOperand(1);
38982 EVT VT = N->getOperand(0).getValueType();
38983 unsigned NumElts = VT.getVectorNumElements();
38984 if ((NumElts % 2) != 0)
38987 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
38989 // Shrink the operands of mul.
38990 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
38991 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
38993 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
38994 // lower part is needed.
38995 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
38996 if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
38997 return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
38998 : ISD::SIGN_EXTEND,
39001 MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
39002 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
39003 // the higher part is also needed.
39005 DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
39006 ReducedVT, NewN0, NewN1);
39008 // Repack the lower part and higher part result of mul into a wider
39010 // Generate shuffle functioning as punpcklwd.
39011 SmallVector<int, 16> ShuffleMask(NumElts);
39012 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
39013 ShuffleMask[2 * i] = i;
39014 ShuffleMask[2 * i + 1] = i + NumElts;
39017 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
39018 ResLo = DAG.getBitcast(ResVT, ResLo);
39019 // Generate shuffle functioning as punpckhwd.
39020 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
39021 ShuffleMask[2 * i] = i + NumElts / 2;
39022 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
39025 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
39026 ResHi = DAG.getBitcast(ResVT, ResHi);
39027 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
39030 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
39031 EVT VT, const SDLoc &DL) {
39033 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
39034 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39035 DAG.getConstant(Mult, DL, VT));
39036 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
39037 DAG.getConstant(Shift, DL, MVT::i8));
39038 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
39043 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
39044 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39045 DAG.getConstant(Mul1, DL, VT));
39046 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
39047 DAG.getConstant(Mul2, DL, VT));
39048 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
39057 // mul x, 11 => add ((shl (mul x, 5), 1), x)
39058 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
39060 // mul x, 21 => add ((shl (mul x, 5), 2), x)
39061 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
39063 // mul x, 41 => add ((shl (mul x, 5), 3), x)
39064 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
39066 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
39067 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
39068 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
39070 // mul x, 19 => add ((shl (mul x, 9), 1), x)
39071 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
39073 // mul x, 37 => add ((shl (mul x, 9), 2), x)
39074 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
39076 // mul x, 73 => add ((shl (mul x, 9), 3), x)
39077 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
39079 // mul x, 13 => add ((shl (mul x, 3), 2), x)
39080 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
39082 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
39083 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
39085 // mul x, 26 => add ((mul (mul x, 5), 5), x)
39086 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
39088 // mul x, 28 => add ((mul (mul x, 9), 3), x)
39089 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
39091 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
39092 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
39093 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
39096 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
39097 // by a single LEA.
39098 // First check if this a sum of two power of 2s because that's easy. Then
39099 // count how many zeros are up to the first bit.
39100 // TODO: We can do this even without LEA at a cost of two shifts and an add.
39101 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
39102 unsigned ScaleShift = countTrailingZeros(MulAmt);
39103 if (ScaleShift >= 1 && ScaleShift < 4) {
39104 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
39105 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39106 DAG.getConstant(ShiftAmt, DL, MVT::i8));
39107 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39108 DAG.getConstant(ScaleShift, DL, MVT::i8));
39109 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
39116 // If the upper 17 bits of each element are zero then we can use PMADDWD,
39117 // which is always at least as quick as PMULLD, except on KNL.
39118 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
39119 const X86Subtarget &Subtarget) {
39120 if (!Subtarget.hasSSE2())
39123 if (Subtarget.isPMADDWDSlow())
39126 EVT VT = N->getValueType(0);
39128 // Only support vXi32 vectors.
39129 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
39132 // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
39133 // Also allow v2i32 if it will be widened.
39134 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
39135 if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(WVT))
39138 SDValue N0 = N->getOperand(0);
39139 SDValue N1 = N->getOperand(1);
39141 // If we are zero extending two steps without SSE4.1, its better to reduce
39142 // the vmul width instead.
39143 if (!Subtarget.hasSSE41() &&
39144 (N0.getOpcode() == ISD::ZERO_EXTEND &&
39145 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
39146 (N1.getOpcode() == ISD::ZERO_EXTEND &&
39147 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
39150 APInt Mask17 = APInt::getHighBitsSet(32, 17);
39151 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
39152 !DAG.MaskedValueIsZero(N0, Mask17))
39155 // Use SplitOpsAndApply to handle AVX splitting.
39156 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39157 ArrayRef<SDValue> Ops) {
39158 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
39159 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
39161 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
39162 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
39166 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
39167 const X86Subtarget &Subtarget) {
39168 if (!Subtarget.hasSSE2())
39171 EVT VT = N->getValueType(0);
39173 // Only support vXi64 vectors.
39174 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
39175 VT.getVectorNumElements() < 2 ||
39176 !isPowerOf2_32(VT.getVectorNumElements()))
39179 SDValue N0 = N->getOperand(0);
39180 SDValue N1 = N->getOperand(1);
39182 // MULDQ returns the 64-bit result of the signed multiplication of the lower
39183 // 32-bits. We can lower with this if the sign bits stretch that far.
39184 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
39185 DAG.ComputeNumSignBits(N1) > 32) {
39186 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39187 ArrayRef<SDValue> Ops) {
39188 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
39190 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
39191 PMULDQBuilder, /*CheckBWI*/false);
39194 // If the upper bits are zero we can use a single pmuludq.
39195 APInt Mask = APInt::getHighBitsSet(64, 32);
39196 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
39197 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39198 ArrayRef<SDValue> Ops) {
39199 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
39201 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
39202 PMULUDQBuilder, /*CheckBWI*/false);
39208 /// Optimize a single multiply with constant into two operations in order to
39209 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
39210 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
39211 TargetLowering::DAGCombinerInfo &DCI,
39212 const X86Subtarget &Subtarget) {
39213 EVT VT = N->getValueType(0);
39215 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
39218 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
39221 if (DCI.isBeforeLegalize() && VT.isVector())
39222 return reduceVMULWidth(N, DAG, Subtarget);
39224 if (!MulConstantOptimization)
39226 // An imul is usually smaller than the alternative sequence.
39227 if (DAG.getMachineFunction().getFunction().hasMinSize())
39230 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
39233 if (VT != MVT::i64 && VT != MVT::i32)
39236 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
39239 if (isPowerOf2_64(C->getZExtValue()))
39242 int64_t SignMulAmt = C->getSExtValue();
39243 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
39244 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
39247 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
39248 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39249 DAG.getConstant(AbsMulAmt, DL, VT));
39250 if (SignMulAmt < 0)
39251 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
39257 uint64_t MulAmt1 = 0;
39258 uint64_t MulAmt2 = 0;
39259 if ((AbsMulAmt % 9) == 0) {
39261 MulAmt2 = AbsMulAmt / 9;
39262 } else if ((AbsMulAmt % 5) == 0) {
39264 MulAmt2 = AbsMulAmt / 5;
39265 } else if ((AbsMulAmt % 3) == 0) {
39267 MulAmt2 = AbsMulAmt / 3;
39271 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
39273 (isPowerOf2_64(MulAmt2) ||
39274 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
39276 if (isPowerOf2_64(MulAmt2) &&
39277 !(SignMulAmt >= 0 && N->hasOneUse() &&
39278 N->use_begin()->getOpcode() == ISD::ADD))
39279 // If second multiplifer is pow2, issue it first. We want the multiply by
39280 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
39281 // is an add. Only do this for positive multiply amounts since the
39282 // negate would prevent it from being used as an address mode anyway.
39283 std::swap(MulAmt1, MulAmt2);
39285 if (isPowerOf2_64(MulAmt1))
39286 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39287 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
39289 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39290 DAG.getConstant(MulAmt1, DL, VT));
39292 if (isPowerOf2_64(MulAmt2))
39293 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
39294 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
39296 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
39297 DAG.getConstant(MulAmt2, DL, VT));
39299 // Negate the result.
39300 if (SignMulAmt < 0)
39301 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
39303 } else if (!Subtarget.slowLEA())
39304 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
39307 assert(C->getZExtValue() != 0 &&
39308 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
39309 "Both cases that could cause potential overflows should have "
39310 "already been handled.");
39311 if (isPowerOf2_64(AbsMulAmt - 1)) {
39312 // (mul x, 2^N + 1) => (add (shl x, N), x)
39313 NewMul = DAG.getNode(
39314 ISD::ADD, DL, VT, N->getOperand(0),
39315 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39316 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
39318 // To negate, subtract the number from zero
39319 if (SignMulAmt < 0)
39320 NewMul = DAG.getNode(ISD::SUB, DL, VT,
39321 DAG.getConstant(0, DL, VT), NewMul);
39322 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
39323 // (mul x, 2^N - 1) => (sub (shl x, N), x)
39324 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39325 DAG.getConstant(Log2_64(AbsMulAmt + 1),
39327 // To negate, reverse the operands of the subtract.
39328 if (SignMulAmt < 0)
39329 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
39331 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39332 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
39333 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
39334 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39335 DAG.getConstant(Log2_64(AbsMulAmt - 2),
39337 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
39338 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
39339 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
39340 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
39341 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39342 DAG.getConstant(Log2_64(AbsMulAmt + 2),
39344 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39345 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39352 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
39353 SDValue N0 = N->getOperand(0);
39354 SDValue N1 = N->getOperand(1);
39355 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
39356 EVT VT = N0.getValueType();
39358 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
39359 // since the result of setcc_c is all zero's or all ones.
39360 if (VT.isInteger() && !VT.isVector() &&
39361 N1C && N0.getOpcode() == ISD::AND &&
39362 N0.getOperand(1).getOpcode() == ISD::Constant) {
39363 SDValue N00 = N0.getOperand(0);
39364 APInt Mask = N0.getConstantOperandAPInt(1);
39365 Mask <<= N1C->getAPIntValue();
39366 bool MaskOK = false;
39367 // We can handle cases concerning bit-widening nodes containing setcc_c if
39368 // we carefully interrogate the mask to make sure we are semantics
39370 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
39371 // of the underlying setcc_c operation if the setcc_c was zero extended.
39372 // Consider the following example:
39373 // zext(setcc_c) -> i32 0x0000FFFF
39374 // c1 -> i32 0x0000FFFF
39375 // c2 -> i32 0x00000001
39376 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
39377 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
39378 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
39380 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
39381 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
39383 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
39384 N00.getOpcode() == ISD::ANY_EXTEND) &&
39385 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
39386 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
39388 if (MaskOK && Mask != 0) {
39390 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
39394 // Hardware support for vector shifts is sparse which makes us scalarize the
39395 // vector operations in many cases. Also, on sandybridge ADD is faster than
39397 // (shl V, 1) -> add V,V
39398 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
39399 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
39400 assert(N0.getValueType().isVector() && "Invalid vector shift type");
39401 // We shift all of the values by one. In many cases we do not have
39402 // hardware support for this operation. This is better expressed as an ADD
39404 if (N1SplatC->isOne())
39405 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
39411 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
39412 SDValue N0 = N->getOperand(0);
39413 SDValue N1 = N->getOperand(1);
39414 EVT VT = N0.getValueType();
39415 unsigned Size = VT.getSizeInBits();
39417 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
39418 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
39419 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
39420 // depending on sign of (SarConst - [56,48,32,24,16])
39422 // sexts in X86 are MOVs. The MOVs have the same code size
39423 // as above SHIFTs (only SHIFT on 1 has lower code size).
39424 // However the MOVs have 2 advantages to a SHIFT:
39425 // 1. MOVs can write to a register that differs from source
39426 // 2. MOVs accept memory operands
39428 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
39429 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
39430 N0.getOperand(1).getOpcode() != ISD::Constant)
39433 SDValue N00 = N0.getOperand(0);
39434 SDValue N01 = N0.getOperand(1);
39435 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
39436 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
39437 EVT CVT = N1.getValueType();
39439 if (SarConst.isNegative())
39442 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
39443 unsigned ShiftSize = SVT.getSizeInBits();
39444 // skipping types without corresponding sext/zext and
39445 // ShlConst that is not one of [56,48,32,24,16]
39446 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
39450 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
39451 SarConst = SarConst - (Size - ShiftSize);
39454 else if (SarConst.isNegative())
39455 return DAG.getNode(ISD::SHL, DL, VT, NN,
39456 DAG.getConstant(-SarConst, DL, CVT));
39458 return DAG.getNode(ISD::SRA, DL, VT, NN,
39459 DAG.getConstant(SarConst, DL, CVT));
39464 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
39465 TargetLowering::DAGCombinerInfo &DCI) {
39466 SDValue N0 = N->getOperand(0);
39467 SDValue N1 = N->getOperand(1);
39468 EVT VT = N0.getValueType();
39470 // Only do this on the last DAG combine as it can interfere with other
39472 if (!DCI.isAfterLegalizeDAG())
39475 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
39476 // TODO: This is a generic DAG combine that became an x86-only combine to
39477 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
39478 // and-not ('andn').
39479 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
39482 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
39483 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
39484 if (!ShiftC || !AndC)
39487 // If we can shrink the constant mask below 8-bits or 32-bits, then this
39488 // transform should reduce code size. It may also enable secondary transforms
39489 // from improved known-bits analysis or instruction selection.
39490 APInt MaskVal = AndC->getAPIntValue();
39492 // If this can be matched by a zero extend, don't optimize.
39493 if (MaskVal.isMask()) {
39494 unsigned TO = MaskVal.countTrailingOnes();
39495 if (TO >= 8 && isPowerOf2_32(TO))
39499 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
39500 unsigned OldMaskSize = MaskVal.getMinSignedBits();
39501 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
39502 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
39503 (OldMaskSize > 32 && NewMaskSize <= 32)) {
39504 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
39506 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
39507 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
39508 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
39513 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
39514 TargetLowering::DAGCombinerInfo &DCI,
39515 const X86Subtarget &Subtarget) {
39516 unsigned Opcode = N->getOpcode();
39517 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
39518 "Unexpected shift opcode");
39520 EVT VT = N->getValueType(0);
39521 SDValue N0 = N->getOperand(0);
39522 SDValue N1 = N->getOperand(1);
39523 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
39524 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
39525 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
39526 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
39527 "Unexpected PACKSS/PACKUS input type");
39529 bool IsSigned = (X86ISD::PACKSS == Opcode);
39531 // Constant Folding.
39532 APInt UndefElts0, UndefElts1;
39533 SmallVector<APInt, 32> EltBits0, EltBits1;
39534 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
39535 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
39536 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
39537 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
39538 unsigned NumLanes = VT.getSizeInBits() / 128;
39539 unsigned NumDstElts = VT.getVectorNumElements();
39540 unsigned NumSrcElts = NumDstElts / 2;
39541 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
39542 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
39544 APInt Undefs(NumDstElts, 0);
39545 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
39546 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
39547 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
39548 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
39549 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
39550 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
39552 if (UndefElts[SrcIdx]) {
39553 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
39557 APInt &Val = EltBits[SrcIdx];
39559 // PACKSS: Truncate signed value with signed saturation.
39560 // Source values less than dst minint are saturated to minint.
39561 // Source values greater than dst maxint are saturated to maxint.
39562 if (Val.isSignedIntN(DstBitsPerElt))
39563 Val = Val.trunc(DstBitsPerElt);
39564 else if (Val.isNegative())
39565 Val = APInt::getSignedMinValue(DstBitsPerElt);
39567 Val = APInt::getSignedMaxValue(DstBitsPerElt);
39569 // PACKUS: Truncate signed value with unsigned saturation.
39570 // Source values less than zero are saturated to zero.
39571 // Source values greater than dst maxuint are saturated to maxuint.
39572 if (Val.isIntN(DstBitsPerElt))
39573 Val = Val.trunc(DstBitsPerElt);
39574 else if (Val.isNegative())
39575 Val = APInt::getNullValue(DstBitsPerElt);
39577 Val = APInt::getAllOnesValue(DstBitsPerElt);
39579 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
39583 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
39586 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
39587 // truncate to create a larger truncate.
39588 if (Subtarget.hasAVX512() &&
39589 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
39590 N0.getOperand(0).getValueType() == MVT::v8i32) {
39591 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
39593 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
39594 if (Subtarget.hasVLX())
39595 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
39597 // Widen input to v16i32 so we can truncate that.
39599 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
39600 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
39601 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
39605 // Attempt to combine as shuffle.
39607 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39613 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
39614 TargetLowering::DAGCombinerInfo &DCI,
39615 const X86Subtarget &Subtarget) {
39616 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
39617 X86ISD::VSRL == N->getOpcode()) &&
39618 "Unexpected shift opcode");
39619 EVT VT = N->getValueType(0);
39620 SDValue N0 = N->getOperand(0);
39621 SDValue N1 = N->getOperand(1);
39623 // Shift zero -> zero.
39624 if (ISD::isBuildVectorAllZeros(N0.getNode()))
39625 return DAG.getConstant(0, SDLoc(N), VT);
39627 // Detect constant shift amounts.
39629 SmallVector<APInt, 32> EltBits;
39630 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
39631 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
39632 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
39633 EltBits[0].getZExtValue(), DAG);
39636 APInt KnownUndef, KnownZero;
39637 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39638 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
39639 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
39641 return SDValue(N, 0);
39646 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
39647 TargetLowering::DAGCombinerInfo &DCI,
39648 const X86Subtarget &Subtarget) {
39649 unsigned Opcode = N->getOpcode();
39650 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
39651 X86ISD::VSRLI == Opcode) &&
39652 "Unexpected shift opcode");
39653 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
39654 EVT VT = N->getValueType(0);
39655 SDValue N0 = N->getOperand(0);
39656 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
39657 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
39658 "Unexpected value type");
39659 assert(N->getOperand(1).getValueType() == MVT::i8 &&
39660 "Unexpected shift amount type");
39662 // Out of range logical bit shifts are guaranteed to be zero.
39663 // Out of range arithmetic bit shifts splat the sign bit.
39664 unsigned ShiftVal = N->getConstantOperandVal(1);
39665 if (ShiftVal >= NumBitsPerElt) {
39667 return DAG.getConstant(0, SDLoc(N), VT);
39669 ShiftVal = NumBitsPerElt - 1;
39672 // Shift N0 by zero -> N0.
39676 // Shift zero -> zero.
39677 if (ISD::isBuildVectorAllZeros(N0.getNode()))
39678 return DAG.getConstant(0, SDLoc(N), VT);
39680 // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
39681 // clamped to (NumBitsPerElt - 1).
39682 if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
39683 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
39684 unsigned NewShiftVal = ShiftVal + ShiftVal2;
39685 if (NewShiftVal >= NumBitsPerElt)
39686 NewShiftVal = NumBitsPerElt - 1;
39687 return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
39688 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
39691 // We can decode 'whole byte' logical bit shifts as shuffles.
39692 if (LogicalShift && (ShiftVal % 8) == 0) {
39694 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39698 // Constant Folding.
39700 SmallVector<APInt, 32> EltBits;
39701 if (N->isOnlyUserOf(N0.getNode()) &&
39702 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
39703 assert(EltBits.size() == VT.getVectorNumElements() &&
39704 "Unexpected shift value type");
39705 // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
39706 // created an undef input due to no input bits being demanded, but user
39707 // still expects 0 in other bits.
39708 for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
39709 APInt &Elt = EltBits[i];
39712 else if (X86ISD::VSHLI == Opcode)
39714 else if (X86ISD::VSRAI == Opcode)
39715 Elt.ashrInPlace(ShiftVal);
39717 Elt.lshrInPlace(ShiftVal);
39719 // Reset undef elements since they were zeroed above.
39721 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
39724 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39725 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
39726 APInt::getAllOnesValue(NumBitsPerElt), DCI))
39727 return SDValue(N, 0);
39732 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
39733 TargetLowering::DAGCombinerInfo &DCI,
39734 const X86Subtarget &Subtarget) {
39735 EVT VT = N->getValueType(0);
39736 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
39737 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
39738 "Unexpected vector insertion");
39740 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
39741 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39742 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
39743 APInt::getAllOnesValue(NumBitsPerElt), DCI))
39744 return SDValue(N, 0);
39746 // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
39748 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39754 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
39755 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
39756 /// OR -> CMPNEQSS.
39757 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
39758 TargetLowering::DAGCombinerInfo &DCI,
39759 const X86Subtarget &Subtarget) {
39762 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
39763 // we're requiring SSE2 for both.
39764 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
39765 SDValue N0 = N->getOperand(0);
39766 SDValue N1 = N->getOperand(1);
39767 SDValue CMP0 = N0.getOperand(1);
39768 SDValue CMP1 = N1.getOperand(1);
39771 // The SETCCs should both refer to the same CMP.
39772 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
39775 SDValue CMP00 = CMP0->getOperand(0);
39776 SDValue CMP01 = CMP0->getOperand(1);
39777 EVT VT = CMP00.getValueType();
39779 if (VT == MVT::f32 || VT == MVT::f64) {
39780 bool ExpectingFlags = false;
39781 // Check for any users that want flags:
39782 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
39783 !ExpectingFlags && UI != UE; ++UI)
39784 switch (UI->getOpcode()) {
39789 ExpectingFlags = true;
39791 case ISD::CopyToReg:
39792 case ISD::SIGN_EXTEND:
39793 case ISD::ZERO_EXTEND:
39794 case ISD::ANY_EXTEND:
39798 if (!ExpectingFlags) {
39799 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
39800 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
39802 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
39803 X86::CondCode tmp = cc0;
39808 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
39809 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
39810 // FIXME: need symbolic constants for these magic numbers.
39811 // See X86ATTInstPrinter.cpp:printSSECC().
39812 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
39813 if (Subtarget.hasAVX512()) {
39815 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
39816 DAG.getTargetConstant(x86cc, DL, MVT::i8));
39817 // Need to fill with zeros to ensure the bitcast will produce zeroes
39818 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
39819 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
39820 DAG.getConstant(0, DL, MVT::v16i1),
39821 FSetCC, DAG.getIntPtrConstant(0, DL));
39822 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
39823 N->getSimpleValueType(0));
39825 SDValue OnesOrZeroesF =
39826 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
39827 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
39829 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
39830 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
39832 if (is64BitFP && !Subtarget.is64Bit()) {
39833 // On a 32-bit target, we cannot bitcast the 64-bit float to a
39834 // 64-bit integer, since that's not a legal type. Since
39835 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
39836 // bits, but can do this little dance to extract the lowest 32 bits
39837 // and work with those going forward.
39838 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
39840 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
39841 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
39842 Vector32, DAG.getIntPtrConstant(0, DL));
39846 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
39847 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
39848 DAG.getConstant(1, DL, IntVT));
39849 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
39851 return OneBitOfTruth;
39859 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
39860 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
39861 assert(N->getOpcode() == ISD::AND);
39863 MVT VT = N->getSimpleValueType(0);
39864 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
39868 SDValue N0 = N->getOperand(0);
39869 SDValue N1 = N->getOperand(1);
39871 if (SDValue Not = IsNOT(N0, DAG)) {
39874 } else if (SDValue Not = IsNOT(N1, DAG)) {
39880 X = DAG.getBitcast(VT, X);
39881 Y = DAG.getBitcast(VT, Y);
39882 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
39885 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
39886 // register. In most cases we actually compare or select YMM-sized registers
39887 // and mixing the two types creates horrible code. This method optimizes
39888 // some of the transition sequences.
39889 // Even with AVX-512 this is still useful for removing casts around logical
39890 // operations on vXi1 mask types.
39891 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
39892 const X86Subtarget &Subtarget) {
39893 EVT VT = N->getValueType(0);
39894 assert(VT.isVector() && "Expected vector type");
39896 assert((N->getOpcode() == ISD::ANY_EXTEND ||
39897 N->getOpcode() == ISD::ZERO_EXTEND ||
39898 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
39900 SDValue Narrow = N->getOperand(0);
39901 EVT NarrowVT = Narrow.getValueType();
39903 if (Narrow->getOpcode() != ISD::XOR &&
39904 Narrow->getOpcode() != ISD::AND &&
39905 Narrow->getOpcode() != ISD::OR)
39908 SDValue N0 = Narrow->getOperand(0);
39909 SDValue N1 = Narrow->getOperand(1);
39912 // The Left side has to be a trunc.
39913 if (N0.getOpcode() != ISD::TRUNCATE)
39916 // The type of the truncated inputs.
39917 if (N0.getOperand(0).getValueType() != VT)
39920 // The right side has to be a 'trunc' or a constant vector.
39921 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
39922 N1.getOperand(0).getValueType() == VT;
39924 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
39927 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39929 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
39932 // Set N0 and N1 to hold the inputs to the new wide operation.
39933 N0 = N0.getOperand(0);
39935 N1 = N1.getOperand(0);
39937 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
39939 // Generate the wide operation.
39940 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
39941 unsigned Opcode = N->getOpcode();
39943 default: llvm_unreachable("Unexpected opcode");
39944 case ISD::ANY_EXTEND:
39946 case ISD::ZERO_EXTEND:
39947 return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
39948 case ISD::SIGN_EXTEND:
39949 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
39950 Op, DAG.getValueType(NarrowVT));
39954 /// If both input operands of a logic op are being cast from floating point
39955 /// types, try to convert this into a floating point logic node to avoid
39956 /// unnecessary moves from SSE to integer registers.
39957 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
39958 const X86Subtarget &Subtarget) {
39959 EVT VT = N->getValueType(0);
39960 SDValue N0 = N->getOperand(0);
39961 SDValue N1 = N->getOperand(1);
39964 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
39967 SDValue N00 = N0.getOperand(0);
39968 SDValue N10 = N1.getOperand(0);
39969 EVT N00Type = N00.getValueType();
39970 EVT N10Type = N10.getValueType();
39972 // Ensure that both types are the same and are legal scalar fp types.
39973 if (N00Type != N10Type ||
39974 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
39975 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
39979 switch (N->getOpcode()) {
39980 default: llvm_unreachable("Unexpected input node for FP logic conversion");
39981 case ISD::AND: FPOpcode = X86ISD::FAND; break;
39982 case ISD::OR: FPOpcode = X86ISD::FOR; break;
39983 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
39986 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
39987 return DAG.getBitcast(VT, FPLogic);
39990 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
39991 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
39992 /// with a shift-right to eliminate loading the vector constant mask value.
39993 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
39994 const X86Subtarget &Subtarget) {
39995 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
39996 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
39997 EVT VT0 = Op0.getValueType();
39998 EVT VT1 = Op1.getValueType();
40000 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
40004 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
40005 !SplatVal.isMask())
40008 // Don't prevent creation of ANDN.
40009 if (isBitwiseNot(Op0))
40012 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
40015 unsigned EltBitWidth = VT0.getScalarSizeInBits();
40016 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
40020 unsigned ShiftVal = SplatVal.countTrailingOnes();
40021 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
40022 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
40023 return DAG.getBitcast(N->getValueType(0), Shift);
40026 // Get the index node from the lowered DAG of a GEP IR instruction with one
40027 // indexing dimension.
40028 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
40029 if (Ld->isIndexed())
40032 SDValue Base = Ld->getBasePtr();
40034 if (Base.getOpcode() != ISD::ADD)
40037 SDValue ShiftedIndex = Base.getOperand(0);
40039 if (ShiftedIndex.getOpcode() != ISD::SHL)
40042 return ShiftedIndex.getOperand(0);
40046 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
40047 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
40048 switch (VT.getSizeInBits()) {
40049 default: return false;
40050 case 64: return Subtarget.is64Bit() ? true : false;
40051 case 32: return true;
40057 // This function recognizes cases where X86 bzhi instruction can replace and
40058 // 'and-load' sequence.
40059 // In case of loading integer value from an array of constants which is defined
40062 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
40064 // then applying a bitwise and on the result with another input.
40065 // It's equivalent to performing bzhi (zero high bits) on the input, with the
40066 // same index of the load.
40067 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
40068 const X86Subtarget &Subtarget) {
40069 MVT VT = Node->getSimpleValueType(0);
40072 // Check if subtarget has BZHI instruction for the node's type
40073 if (!hasBZHI(Subtarget, VT))
40076 // Try matching the pattern for both operands.
40077 for (unsigned i = 0; i < 2; i++) {
40078 SDValue N = Node->getOperand(i);
40079 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
40081 // continue if the operand is not a load instruction
40085 const Value *MemOp = Ld->getMemOperand()->getValue();
40090 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
40091 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
40092 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
40094 Constant *Init = GV->getInitializer();
40095 Type *Ty = Init->getType();
40096 if (!isa<ConstantDataArray>(Init) ||
40097 !Ty->getArrayElementType()->isIntegerTy() ||
40098 Ty->getArrayElementType()->getScalarSizeInBits() !=
40099 VT.getSizeInBits() ||
40100 Ty->getArrayNumElements() >
40101 Ty->getArrayElementType()->getScalarSizeInBits())
40104 // Check if the array's constant elements are suitable to our case.
40105 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
40106 bool ConstantsMatch = true;
40107 for (uint64_t j = 0; j < ArrayElementCount; j++) {
40108 ConstantInt *Elem =
40109 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
40110 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
40111 ConstantsMatch = false;
40115 if (!ConstantsMatch)
40118 // Do the transformation (For 32-bit type):
40119 // -> (and (load arr[idx]), inp)
40120 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
40121 // that will be replaced with one bzhi instruction.
40122 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
40123 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
40125 // Get the Node which indexes into the array.
40126 SDValue Index = getIndexFromUnindexedLoad(Ld);
40129 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
40131 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
40132 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
40134 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
40135 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
40137 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
40145 // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
40146 // Turn it into series of XORs and a setnp.
40147 static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
40148 const X86Subtarget &Subtarget) {
40149 EVT VT = N->getValueType(0);
40151 // We only support 64-bit and 32-bit. 64-bit requires special handling
40152 // unless the 64-bit popcnt instruction is legal.
40153 if (VT != MVT::i32 && VT != MVT::i64)
40156 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40157 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
40160 SDValue N0 = N->getOperand(0);
40161 SDValue N1 = N->getOperand(1);
40163 // LHS needs to be a single use CTPOP.
40164 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
40167 // RHS needs to be 1.
40168 if (!isOneConstant(N1))
40172 SDValue X = N0.getOperand(0);
40174 // If this is 64-bit, its always best to xor the two 32-bit pieces together
40175 // even if we have popcnt.
40176 if (VT == MVT::i64) {
40177 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
40178 DAG.getNode(ISD::SRL, DL, VT, X,
40179 DAG.getConstant(32, DL, MVT::i8)));
40180 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
40181 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
40182 // Generate a 32-bit parity idiom. This will bring us back here if we need
40183 // to expand it too.
40184 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
40185 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
40186 DAG.getConstant(1, DL, MVT::i32));
40187 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
40189 assert(VT == MVT::i32 && "Unexpected VT!");
40191 // Xor the high and low 16-bits together using a 32-bit operation.
40192 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
40193 DAG.getConstant(16, DL, MVT::i8));
40194 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
40196 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
40197 // This should allow an h-reg to be used to save a shift.
40198 // FIXME: We only get an h-reg in 32-bit mode.
40199 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
40200 DAG.getNode(ISD::SRL, DL, VT, X,
40201 DAG.getConstant(8, DL, MVT::i8)));
40202 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
40203 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
40204 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
40206 // Copy the inverse of the parity flag into a register with setcc.
40207 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
40208 // Zero extend to original type.
40209 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
40213 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
40214 // Where C is a mask containing the same number of bits as the setcc and
40215 // where the setcc will freely 0 upper bits of k-register. We can replace the
40216 // undef in the concat with 0s and remove the AND. This mainly helps with
40217 // v2i1/v4i1 setcc being casted to scalar.
40218 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
40219 const X86Subtarget &Subtarget) {
40220 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
40222 EVT VT = N->getValueType(0);
40224 // Make sure this is an AND with constant. We will check the value of the
40226 if (!isa<ConstantSDNode>(N->getOperand(1)))
40229 // This is implied by the ConstantSDNode.
40230 assert(!VT.isVector() && "Expected scalar VT!");
40232 if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
40233 !N->getOperand(0).hasOneUse() ||
40234 !N->getOperand(0).getOperand(0).hasOneUse())
40237 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40238 SDValue Src = N->getOperand(0).getOperand(0);
40239 EVT SrcVT = Src.getValueType();
40240 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
40241 !TLI.isTypeLegal(SrcVT))
40244 if (Src.getOpcode() != ISD::CONCAT_VECTORS)
40247 // We only care about the first subvector of the concat, we expect the
40248 // other subvectors to be ignored due to the AND if we make the change.
40249 SDValue SubVec = Src.getOperand(0);
40250 EVT SubVecVT = SubVec.getValueType();
40252 // First subvector should be a setcc with a legal result type. The RHS of the
40253 // AND should be a mask with this many bits.
40254 if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
40255 !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
40258 EVT SetccVT = SubVec.getOperand(0).getValueType();
40259 if (!TLI.isTypeLegal(SetccVT) ||
40260 !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
40263 if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
40266 // We passed all the checks. Rebuild the concat_vectors with zeroes
40267 // and cast it back to VT.
40269 SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
40270 DAG.getConstant(0, dl, SubVecVT));
40272 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
40274 return DAG.getBitcast(VT, Concat);
40277 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
40278 TargetLowering::DAGCombinerInfo &DCI,
40279 const X86Subtarget &Subtarget) {
40280 EVT VT = N->getValueType(0);
40282 // If this is SSE1 only convert to FAND to avoid scalarization.
40283 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
40284 return DAG.getBitcast(
40285 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
40286 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
40287 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
40290 // Use a 32-bit and+zext if upper bits known zero.
40291 if (VT == MVT::i64 && Subtarget.is64Bit() &&
40292 !isa<ConstantSDNode>(N->getOperand(1))) {
40293 APInt HiMask = APInt::getHighBitsSet(64, 32);
40294 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
40295 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
40297 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
40298 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
40299 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
40300 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
40304 // This must be done before legalization has expanded the ctpop.
40305 if (SDValue V = combineParity(N, DAG, Subtarget))
40308 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
40309 // TODO: Support multiple SrcOps.
40310 if (VT == MVT::i1) {
40311 SmallVector<SDValue, 2> SrcOps;
40312 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
40313 SrcOps.size() == 1) {
40315 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40316 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
40317 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40318 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
40319 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
40320 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
40322 APInt AllBits = APInt::getAllOnesValue(NumElts);
40323 return DAG.getSetCC(dl, MVT::i1, Mask,
40324 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
40329 if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
40332 if (DCI.isBeforeLegalizeOps())
40335 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
40338 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40341 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
40344 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
40347 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
40350 // Attempt to recursively combine a bitmask AND with shuffles.
40351 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
40353 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40357 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
40358 if ((VT.getScalarSizeInBits() % 8) == 0 &&
40359 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
40360 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
40361 SDValue BitMask = N->getOperand(1);
40362 SDValue SrcVec = N->getOperand(0).getOperand(0);
40363 EVT SrcVecVT = SrcVec.getValueType();
40365 // Check that the constant bitmask masks whole bytes.
40367 SmallVector<APInt, 64> EltBits;
40368 if (VT == SrcVecVT.getScalarType() &&
40369 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
40370 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
40371 llvm::all_of(EltBits, [](APInt M) {
40372 return M.isNullValue() || M.isAllOnesValue();
40374 unsigned NumElts = SrcVecVT.getVectorNumElements();
40375 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
40376 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
40378 // Create a root shuffle mask from the byte mask and the extracted index.
40379 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
40380 for (unsigned i = 0; i != Scale; ++i) {
40383 int VecIdx = Scale * Idx + i;
40384 ShuffleMask[VecIdx] =
40385 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
40388 if (SDValue Shuffle = combineX86ShufflesRecursively(
40389 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
40390 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
40391 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
40392 N->getOperand(0).getOperand(1));
40399 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
40400 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
40401 const X86Subtarget &Subtarget) {
40402 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
40404 MVT VT = N->getSimpleValueType(0);
40405 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
40408 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
40409 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
40410 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
40413 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
40414 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
40415 bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
40416 Subtarget.hasVLX();
40417 if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
40418 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
40421 // Attempt to extract constant byte masks.
40422 APInt UndefElts0, UndefElts1;
40423 SmallVector<APInt, 32> EltBits0, EltBits1;
40424 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
40427 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
40431 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
40432 // TODO - add UNDEF elts support.
40433 if (UndefElts0[i] || UndefElts1[i])
40435 if (EltBits0[i] != ~EltBits1[i])
40440 SDValue X = N->getOperand(0);
40442 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
40443 DAG.getBitcast(VT, N1.getOperand(0)));
40444 return DAG.getNode(ISD::OR, DL, VT, X, Y);
40447 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
40448 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
40449 if (N->getOpcode() != ISD::OR)
40452 SDValue N0 = N->getOperand(0);
40453 SDValue N1 = N->getOperand(1);
40455 // Canonicalize AND to LHS.
40456 if (N1.getOpcode() == ISD::AND)
40459 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
40460 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
40463 Mask = N1.getOperand(0);
40464 X = N1.getOperand(1);
40466 // Check to see if the mask appeared in both the AND and ANDNP.
40467 if (N0.getOperand(0) == Mask)
40468 Y = N0.getOperand(1);
40469 else if (N0.getOperand(1) == Mask)
40470 Y = N0.getOperand(0);
40474 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
40475 // ANDNP combine allows other combines to happen that prevent matching.
40480 // (or (and (m, y), (pandn m, x)))
40482 // (vselect m, x, y)
40483 // As a special case, try to fold:
40484 // (or (and (m, (sub 0, x)), (pandn m, x)))
40486 // (sub (xor X, M), M)
40487 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
40488 const X86Subtarget &Subtarget) {
40489 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
40491 EVT VT = N->getValueType(0);
40492 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
40493 (VT.is256BitVector() && Subtarget.hasInt256())))
40496 SDValue X, Y, Mask;
40497 if (!matchLogicBlend(N, X, Y, Mask))
40500 // Validate that X, Y, and Mask are bitcasts, and see through them.
40501 Mask = peekThroughBitcasts(Mask);
40502 X = peekThroughBitcasts(X);
40503 Y = peekThroughBitcasts(Y);
40505 EVT MaskVT = Mask.getValueType();
40506 unsigned EltBits = MaskVT.getScalarSizeInBits();
40508 // TODO: Attempt to handle floating point cases as well?
40509 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
40514 // Attempt to combine to conditional negate: (sub (xor X, M), M)
40515 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
40519 // PBLENDVB is only available on SSE 4.1.
40520 if (!Subtarget.hasSSE41())
40523 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
40525 X = DAG.getBitcast(BlendVT, X);
40526 Y = DAG.getBitcast(BlendVT, Y);
40527 Mask = DAG.getBitcast(BlendVT, Mask);
40528 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
40529 return DAG.getBitcast(VT, Mask);
40532 // Helper function for combineOrCmpEqZeroToCtlzSrl
40536 // srl(ctlz x), log2(bitsize(x))
40537 // Input pattern is checked by caller.
40538 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
40539 SelectionDAG &DAG) {
40540 SDValue Cmp = Op.getOperand(1);
40541 EVT VT = Cmp.getOperand(0).getValueType();
40542 unsigned Log2b = Log2_32(VT.getSizeInBits());
40544 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
40545 // The result of the shift is true or false, and on X86, the 32-bit
40546 // encoding of shr and lzcnt is more desirable.
40547 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
40548 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
40549 DAG.getConstant(Log2b, dl, MVT::i8));
40550 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
40553 // Try to transform:
40554 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
40556 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
40557 // Will also attempt to match more generic cases, eg:
40558 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
40559 // Only applies if the target supports the FastLZCNT feature.
40560 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
40561 TargetLowering::DAGCombinerInfo &DCI,
40562 const X86Subtarget &Subtarget) {
40563 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
40566 auto isORCandidate = [](SDValue N) {
40567 return (N->getOpcode() == ISD::OR && N->hasOneUse());
40570 // Check the zero extend is extending to 32-bit or more. The code generated by
40571 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
40572 // instructions to clear the upper bits.
40573 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
40574 !isORCandidate(N->getOperand(0)))
40577 // Check the node matches: setcc(eq, cmp 0)
40578 auto isSetCCCandidate = [](SDValue N) {
40579 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
40580 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
40581 N->getOperand(1).getOpcode() == X86ISD::CMP &&
40582 isNullConstant(N->getOperand(1).getOperand(1)) &&
40583 N->getOperand(1).getValueType().bitsGE(MVT::i32);
40586 SDNode *OR = N->getOperand(0).getNode();
40587 SDValue LHS = OR->getOperand(0);
40588 SDValue RHS = OR->getOperand(1);
40590 // Save nodes matching or(or, setcc(eq, cmp 0)).
40591 SmallVector<SDNode *, 2> ORNodes;
40592 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
40593 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
40594 ORNodes.push_back(OR);
40595 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
40596 LHS = OR->getOperand(0);
40597 RHS = OR->getOperand(1);
40600 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
40601 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
40602 !isORCandidate(SDValue(OR, 0)))
40605 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
40607 // or(srl(ctlz),srl(ctlz)).
40608 // The dag combiner can then fold it into:
40609 // srl(or(ctlz, ctlz)).
40610 EVT VT = OR->getValueType(0);
40611 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
40612 SDValue Ret, NewRHS;
40613 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
40614 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
40619 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
40620 while (ORNodes.size() > 0) {
40621 OR = ORNodes.pop_back_val();
40622 LHS = OR->getOperand(0);
40623 RHS = OR->getOperand(1);
40624 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
40625 if (RHS->getOpcode() == ISD::OR)
40626 std::swap(LHS, RHS);
40627 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
40630 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
40634 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
40639 static SDValue combineOrShiftToFunnelShift(SDNode *N, SelectionDAG &DAG,
40640 const X86Subtarget &Subtarget) {
40641 assert(N->getOpcode() == ISD::OR && "Expected ISD::OR node");
40642 SDValue N0 = N->getOperand(0);
40643 SDValue N1 = N->getOperand(1);
40644 EVT VT = N->getValueType(0);
40645 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40647 if (!TLI.isOperationLegalOrCustom(ISD::FSHL, VT) ||
40648 !TLI.isOperationLegalOrCustom(ISD::FSHR, VT))
40651 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
40652 bool OptForSize = DAG.shouldOptForSize();
40653 unsigned Bits = VT.getScalarSizeInBits();
40655 // SHLD/SHRD instructions have lower register pressure, but on some
40656 // platforms they have higher latency than the equivalent
40657 // series of shifts/or that would otherwise be generated.
40658 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
40659 // have higher latencies and we are not optimizing for size.
40660 if (!OptForSize && Subtarget.isSHLDSlow())
40663 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
40665 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
40667 if (!N0.hasOneUse() || !N1.hasOneUse())
40670 EVT ShiftVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
40672 SDValue ShAmt0 = N0.getOperand(1);
40673 if (ShAmt0.getValueType() != ShiftVT)
40675 SDValue ShAmt1 = N1.getOperand(1);
40676 if (ShAmt1.getValueType() != ShiftVT)
40679 // Peek through any modulo shift masks.
40681 if (ShAmt0.getOpcode() == ISD::AND &&
40682 isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
40683 ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
40685 ShAmt0 = ShAmt0.getOperand(0);
40688 if (ShAmt1.getOpcode() == ISD::AND &&
40689 isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
40690 ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
40692 ShAmt1 = ShAmt1.getOperand(0);
40695 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
40696 ShAmt0 = ShAmt0.getOperand(0);
40697 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
40698 ShAmt1 = ShAmt1.getOperand(0);
40701 unsigned Opc = ISD::FSHL;
40702 SDValue Op0 = N0.getOperand(0);
40703 SDValue Op1 = N1.getOperand(0);
40704 if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
40706 std::swap(Op0, Op1);
40707 std::swap(ShAmt0, ShAmt1);
40708 std::swap(ShMsk0, ShMsk1);
40711 auto GetFunnelShift = [&DAG, &DL, VT, Opc, &ShiftVT](SDValue Op0, SDValue Op1,
40713 if (Opc == ISD::FSHR)
40714 std::swap(Op0, Op1);
40715 return DAG.getNode(Opc, DL, VT, Op0, Op1,
40716 DAG.getNode(ISD::TRUNCATE, DL, ShiftVT, Amt));
40719 // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
40720 // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
40721 // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
40722 // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
40723 // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
40724 // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
40725 if (ShAmt1.getOpcode() == ISD::SUB) {
40726 SDValue Sum = ShAmt1.getOperand(0);
40727 if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
40728 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
40729 if (ShAmt1Op1.getOpcode() == ISD::AND &&
40730 isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
40731 ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
40732 ShMsk1 = ShAmt1Op1;
40733 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
40735 if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
40736 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
40737 if ((SumC->getAPIntValue() == Bits ||
40738 (SumC->getAPIntValue() == 0 && ShMsk1)) &&
40739 ShAmt1Op1 == ShAmt0)
40740 return GetFunnelShift(Op0, Op1, ShAmt0);
40742 } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
40743 auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
40744 if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
40745 return GetFunnelShift(Op0, Op1, ShAmt0);
40746 } else if (ShAmt1.getOpcode() == ISD::XOR) {
40747 SDValue Mask = ShAmt1.getOperand(1);
40748 if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
40749 unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
40750 SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
40751 if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
40752 ShAmt1Op0 = ShAmt1Op0.getOperand(0);
40753 if (MaskC->getSExtValue() == (Bits - 1) &&
40754 (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
40755 if (Op1.getOpcode() == InnerShift &&
40756 isa<ConstantSDNode>(Op1.getOperand(1)) &&
40757 Op1.getConstantOperandAPInt(1).isOneValue()) {
40758 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
40760 // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
40761 if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
40762 Op1.getOperand(0) == Op1.getOperand(1)) {
40763 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
40772 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
40773 TargetLowering::DAGCombinerInfo &DCI,
40774 const X86Subtarget &Subtarget) {
40775 SDValue N0 = N->getOperand(0);
40776 SDValue N1 = N->getOperand(1);
40777 EVT VT = N->getValueType(0);
40779 // If this is SSE1 only convert to FOR to avoid scalarization.
40780 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
40781 return DAG.getBitcast(MVT::v4i32,
40782 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
40783 DAG.getBitcast(MVT::v4f32, N0),
40784 DAG.getBitcast(MVT::v4f32, N1)));
40787 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
40788 // TODO: Support multiple SrcOps.
40789 if (VT == MVT::i1) {
40790 SmallVector<SDValue, 2> SrcOps;
40791 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps) &&
40792 SrcOps.size() == 1) {
40794 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40795 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
40796 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40797 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
40798 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
40799 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
40801 APInt AllBits = APInt::getNullValue(NumElts);
40802 return DAG.getSetCC(dl, MVT::i1, Mask,
40803 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETNE);
40808 if (DCI.isBeforeLegalizeOps())
40811 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
40814 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40817 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
40820 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
40823 if (SDValue R = combineOrShiftToFunnelShift(N, DAG, Subtarget))
40826 // Attempt to recursively combine an OR of shuffles.
40827 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
40829 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40836 /// Try to turn tests against the signbit in the form of:
40837 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
40840 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
40841 // This is only worth doing if the output type is i8 or i1.
40842 EVT ResultType = N->getValueType(0);
40843 if (ResultType != MVT::i8 && ResultType != MVT::i1)
40846 SDValue N0 = N->getOperand(0);
40847 SDValue N1 = N->getOperand(1);
40849 // We should be performing an xor against a truncated shift.
40850 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
40853 // Make sure we are performing an xor against one.
40854 if (!isOneConstant(N1))
40857 // SetCC on x86 zero extends so only act on this if it's a logical shift.
40858 SDValue Shift = N0.getOperand(0);
40859 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
40862 // Make sure we are truncating from one of i16, i32 or i64.
40863 EVT ShiftTy = Shift.getValueType();
40864 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
40867 // Make sure the shift amount extracts the sign bit.
40868 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
40869 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
40872 // Create a greater-than comparison against -1.
40873 // N.B. Using SETGE against 0 works but we want a canonical looking
40874 // comparison, using SETGT matches up with what TranslateX86CC.
40876 SDValue ShiftOp = Shift.getOperand(0);
40877 EVT ShiftOpTy = ShiftOp.getValueType();
40878 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40879 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
40880 *DAG.getContext(), ResultType);
40881 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
40882 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
40883 if (SetCCResultType != ResultType)
40884 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
40888 /// Turn vector tests of the signbit in the form of:
40889 /// xor (sra X, elt_size(X)-1), -1
40893 /// This should be called before type legalization because the pattern may not
40894 /// persist after that.
40895 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
40896 const X86Subtarget &Subtarget) {
40897 EVT VT = N->getValueType(0);
40898 if (!VT.isSimple())
40901 switch (VT.getSimpleVT().SimpleTy) {
40902 default: return SDValue();
40906 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
40910 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
40913 // There must be a shift right algebraic before the xor, and the xor must be a
40914 // 'not' operation.
40915 SDValue Shift = N->getOperand(0);
40916 SDValue Ones = N->getOperand(1);
40917 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
40918 !ISD::isBuildVectorAllOnes(Ones.getNode()))
40921 // The shift should be smearing the sign bit across each vector element.
40923 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
40925 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
40928 // Create a greater-than comparison against -1. We don't use the more obvious
40929 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
40930 return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
40933 /// Detect patterns of truncation with unsigned saturation:
40935 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
40936 /// Return the source value x to be truncated or SDValue() if the pattern was
40939 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
40940 /// where C1 >= 0 and C2 is unsigned max of destination type.
40942 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
40943 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
40945 /// These two patterns are equivalent to:
40946 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
40947 /// So return the smax(x, C1) value to be truncated or SDValue() if the
40948 /// pattern was not matched.
40949 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
40951 EVT InVT = In.getValueType();
40953 // Saturation with truncation. We truncate from InVT to VT.
40954 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
40955 "Unexpected types for truncate operation");
40957 // Match min/max and return limit value as a parameter.
40958 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
40959 if (V.getOpcode() == Opcode &&
40960 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
40961 return V.getOperand(0);
40966 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
40967 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
40968 // the element size of the destination type.
40969 if (C2.isMask(VT.getScalarSizeInBits()))
40972 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
40973 if (MatchMinMax(SMin, ISD::SMAX, C1))
40974 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
40977 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
40978 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
40979 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
40981 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
40987 /// Detect patterns of truncation with signed saturation:
40988 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
40989 /// signed_max_of_dest_type)) to dest_type)
40991 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
40992 /// signed_min_of_dest_type)) to dest_type).
40993 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
40994 /// Return the source value to be truncated or SDValue() if the pattern was not
40996 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
40997 unsigned NumDstBits = VT.getScalarSizeInBits();
40998 unsigned NumSrcBits = In.getScalarValueSizeInBits();
40999 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
41001 auto MatchMinMax = [](SDValue V, unsigned Opcode,
41002 const APInt &Limit) -> SDValue {
41004 if (V.getOpcode() == Opcode &&
41005 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
41006 return V.getOperand(0);
41010 APInt SignedMax, SignedMin;
41012 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
41013 SignedMin = APInt(NumSrcBits, 0);
41015 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
41016 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
41019 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
41020 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
41023 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
41024 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
41030 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
41032 const X86Subtarget &Subtarget) {
41033 if (!Subtarget.hasSSE2() || !VT.isVector())
41036 EVT SVT = VT.getVectorElementType();
41037 EVT InVT = In.getValueType();
41038 EVT InSVT = InVT.getVectorElementType();
41040 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
41041 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
41042 // and concatenate at the same time. Then we can use a final vpmovuswb to
41044 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
41045 InVT == MVT::v16i32 && VT == MVT::v16i8) {
41046 if (auto USatVal = detectSSatPattern(In, VT, true)) {
41047 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
41048 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
41049 DL, DAG, Subtarget);
41050 assert(Mid && "Failed to pack!");
41051 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
41055 // vXi32 truncate instructions are available with AVX512F.
41056 // vXi16 truncate instructions are only available with AVX512BW.
41057 // For 256-bit or smaller vectors, we require VLX.
41058 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
41059 // If the result type is 256-bits or larger and we have disable 512-bit
41060 // registers, we should go ahead and use the pack instructions if possible.
41061 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
41062 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
41063 (InVT.getSizeInBits() > 128) &&
41064 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
41065 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
41067 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
41068 VT.getSizeInBits() >= 64 &&
41069 (SVT == MVT::i8 || SVT == MVT::i16) &&
41070 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
41071 if (auto USatVal = detectSSatPattern(In, VT, true)) {
41072 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
41073 // Only do this when the result is at least 64 bits or we'll leaving
41074 // dangling PACKSSDW nodes.
41075 if (SVT == MVT::i8 && InSVT == MVT::i32) {
41076 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
41077 VT.getVectorNumElements());
41078 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
41080 assert(Mid && "Failed to pack!");
41081 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
41083 assert(V && "Failed to pack!");
41085 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
41086 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
41089 if (auto SSatVal = detectSSatPattern(In, VT))
41090 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
41094 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41095 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
41096 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
41097 unsigned TruncOpc = 0;
41099 if (auto SSatVal = detectSSatPattern(In, VT)) {
41101 TruncOpc = X86ISD::VTRUNCS;
41102 } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
41104 TruncOpc = X86ISD::VTRUNCUS;
41107 unsigned ResElts = VT.getVectorNumElements();
41108 // If the input type is less than 512 bits and we don't have VLX, we need
41109 // to widen to 512 bits.
41110 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
41111 unsigned NumConcats = 512 / InVT.getSizeInBits();
41112 ResElts *= NumConcats;
41113 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
41114 ConcatOps[0] = SatVal;
41115 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
41116 NumConcats * InVT.getVectorNumElements());
41117 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
41119 // Widen the result if its narrower than 128 bits.
41120 if (ResElts * SVT.getSizeInBits() < 128)
41121 ResElts = 128 / SVT.getSizeInBits();
41122 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
41123 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
41124 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
41125 DAG.getIntPtrConstant(0, DL));
41132 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
41133 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
41134 /// X86ISD::AVG instruction.
41135 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
41136 const X86Subtarget &Subtarget,
41138 if (!VT.isVector())
41140 EVT InVT = In.getValueType();
41141 unsigned NumElems = VT.getVectorNumElements();
41143 EVT ScalarVT = VT.getVectorElementType();
41144 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
41145 NumElems >= 2 && isPowerOf2_32(NumElems)))
41148 // InScalarVT is the intermediate type in AVG pattern and it should be greater
41149 // than the original input type (i8/i16).
41150 EVT InScalarVT = InVT.getVectorElementType();
41151 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
41154 if (!Subtarget.hasSSE2())
41157 // Detect the following pattern:
41159 // %1 = zext <N x i8> %a to <N x i32>
41160 // %2 = zext <N x i8> %b to <N x i32>
41161 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
41162 // %4 = add nuw nsw <N x i32> %3, %2
41163 // %5 = lshr <N x i32> %N, <i32 1 x N>
41164 // %6 = trunc <N x i32> %5 to <N x i8>
41166 // In AVX512, the last instruction can also be a trunc store.
41167 if (In.getOpcode() != ISD::SRL)
41170 // A lambda checking the given SDValue is a constant vector and each element
41171 // is in the range [Min, Max].
41172 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
41173 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
41174 if (!BV || !BV->isConstant())
41176 for (SDValue Op : V->ops()) {
41177 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
41180 const APInt &Val = C->getAPIntValue();
41181 if (Val.ult(Min) || Val.ugt(Max))
41187 // Check if each element of the vector is right-shifted by one.
41188 auto LHS = In.getOperand(0);
41189 auto RHS = In.getOperand(1);
41190 if (!IsConstVectorInRange(RHS, 1, 1))
41192 if (LHS.getOpcode() != ISD::ADD)
41195 // Detect a pattern of a + b + 1 where the order doesn't matter.
41196 SDValue Operands[3];
41197 Operands[0] = LHS.getOperand(0);
41198 Operands[1] = LHS.getOperand(1);
41200 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41201 ArrayRef<SDValue> Ops) {
41202 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
41205 // Take care of the case when one of the operands is a constant vector whose
41206 // element is in the range [1, 256].
41207 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
41208 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
41209 Operands[0].getOperand(0).getValueType() == VT) {
41210 // The pattern is detected. Subtract one from the constant vector, then
41211 // demote it and emit X86ISD::AVG instruction.
41212 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
41213 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
41214 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
41215 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
41216 { Operands[0].getOperand(0), Operands[1] },
41220 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
41221 // Match the or case only if its 'add-like' - can be replaced by an add.
41222 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
41223 if (ISD::ADD == V.getOpcode()) {
41224 Op0 = V.getOperand(0);
41225 Op1 = V.getOperand(1);
41228 if (ISD::ZERO_EXTEND != V.getOpcode())
41230 V = V.getOperand(0);
41231 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
41232 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
41234 Op0 = V.getOperand(0);
41235 Op1 = V.getOperand(1);
41240 if (FindAddLike(Operands[0], Op0, Op1))
41241 std::swap(Operands[0], Operands[1]);
41242 else if (!FindAddLike(Operands[1], Op0, Op1))
41247 // Now we have three operands of two additions. Check that one of them is a
41248 // constant vector with ones, and the other two can be promoted from i8/i16.
41249 for (int i = 0; i < 3; ++i) {
41250 if (!IsConstVectorInRange(Operands[i], 1, 1))
41252 std::swap(Operands[i], Operands[2]);
41254 // Check if Operands[0] and Operands[1] are results of type promotion.
41255 for (int j = 0; j < 2; ++j)
41256 if (Operands[j].getValueType() != VT) {
41257 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
41258 Operands[j].getOperand(0).getValueType() != VT)
41260 Operands[j] = Operands[j].getOperand(0);
41263 // The pattern is detected, emit X86ISD::AVG instruction(s).
41264 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
41271 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
41272 TargetLowering::DAGCombinerInfo &DCI,
41273 const X86Subtarget &Subtarget) {
41274 LoadSDNode *Ld = cast<LoadSDNode>(N);
41275 EVT RegVT = Ld->getValueType(0);
41276 EVT MemVT = Ld->getMemoryVT();
41278 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41280 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
41281 // into two 16-byte operations. Also split non-temporal aligned loads on
41282 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
41283 ISD::LoadExtType Ext = Ld->getExtensionType();
41285 unsigned Alignment = Ld->getAlignment();
41286 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
41287 Ext == ISD::NON_EXTLOAD &&
41288 ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
41289 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
41290 *Ld->getMemOperand(), &Fast) &&
41292 unsigned NumElems = RegVT.getVectorNumElements();
41296 unsigned HalfAlign = 16;
41297 SDValue Ptr1 = Ld->getBasePtr();
41298 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
41299 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
41302 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
41303 Alignment, Ld->getMemOperand()->getFlags());
41304 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
41305 Ld->getPointerInfo().getWithOffset(HalfAlign),
41306 MinAlign(Alignment, HalfAlign),
41307 Ld->getMemOperand()->getFlags());
41308 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
41309 Load1.getValue(1), Load2.getValue(1));
41311 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
41312 return DCI.CombineTo(N, NewVec, TF, true);
41315 // Bool vector load - attempt to cast to an integer, as we have good
41316 // (vXiY *ext(vXi1 bitcast(iX))) handling.
41317 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
41318 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
41319 unsigned NumElts = RegVT.getVectorNumElements();
41320 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
41321 if (TLI.isTypeLegal(IntVT)) {
41322 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
41323 Ld->getPointerInfo(), Alignment,
41324 Ld->getMemOperand()->getFlags());
41325 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
41326 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
41333 /// If V is a build vector of boolean constants and exactly one of those
41334 /// constants is true, return the operand index of that true element.
41335 /// Otherwise, return -1.
41336 static int getOneTrueElt(SDValue V) {
41337 // This needs to be a build vector of booleans.
41338 // TODO: Checking for the i1 type matches the IR definition for the mask,
41339 // but the mask check could be loosened to i8 or other types. That might
41340 // also require checking more than 'allOnesValue'; eg, the x86 HW
41341 // instructions only require that the MSB is set for each mask element.
41342 // The ISD::MSTORE comments/definition do not specify how the mask operand
41344 auto *BV = dyn_cast<BuildVectorSDNode>(V);
41345 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
41348 int TrueIndex = -1;
41349 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
41350 for (unsigned i = 0; i < NumElts; ++i) {
41351 const SDValue &Op = BV->getOperand(i);
41354 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
41357 if (ConstNode->getAPIntValue().isAllOnesValue()) {
41358 // If we already found a one, this is too many.
41359 if (TrueIndex >= 0)
41367 /// Given a masked memory load/store operation, return true if it has one mask
41368 /// bit set. If it has one mask bit set, then also return the memory address of
41369 /// the scalar element to load/store, the vector index to insert/extract that
41370 /// scalar element, and the alignment for the scalar memory access.
41371 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
41372 SelectionDAG &DAG, SDValue &Addr,
41373 SDValue &Index, unsigned &Alignment) {
41374 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
41375 if (TrueMaskElt < 0)
41378 // Get the address of the one scalar element that is specified by the mask
41379 // using the appropriate offset from the base pointer.
41380 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
41381 Addr = MaskedOp->getBasePtr();
41382 if (TrueMaskElt != 0) {
41383 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
41384 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
41387 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
41388 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
41392 /// If exactly one element of the mask is set for a non-extending masked load,
41393 /// it is a scalar load and vector insert.
41394 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
41395 /// mask have already been optimized in IR, so we don't bother with those here.
41397 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
41398 TargetLowering::DAGCombinerInfo &DCI) {
41399 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
41400 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
41401 // However, some target hooks may need to be added to know when the transform
41402 // is profitable. Endianness would also have to be considered.
41404 SDValue Addr, VecIndex;
41405 unsigned Alignment;
41406 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
41409 // Load the one scalar element that is specified by the mask using the
41410 // appropriate offset from the base pointer.
41412 EVT VT = ML->getValueType(0);
41413 EVT EltVT = VT.getVectorElementType();
41415 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
41416 Alignment, ML->getMemOperand()->getFlags());
41418 // Insert the loaded element into the appropriate place in the vector.
41419 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
41420 ML->getPassThru(), Load, VecIndex);
41421 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
41425 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
41426 TargetLowering::DAGCombinerInfo &DCI) {
41427 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
41428 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
41432 EVT VT = ML->getValueType(0);
41434 // If we are loading the first and last elements of a vector, it is safe and
41435 // always faster to load the whole vector. Replace the masked load with a
41436 // vector load and select.
41437 unsigned NumElts = VT.getVectorNumElements();
41438 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
41439 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
41440 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
41441 if (LoadFirstElt && LoadLastElt) {
41442 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
41443 ML->getMemOperand());
41444 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
41445 ML->getPassThru());
41446 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
41449 // Convert a masked load with a constant mask into a masked load and a select.
41450 // This allows the select operation to use a faster kind of select instruction
41451 // (for example, vblendvps -> vblendps).
41453 // Don't try this if the pass-through operand is already undefined. That would
41454 // cause an infinite loop because that's what we're about to create.
41455 if (ML->getPassThru().isUndef())
41458 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
41461 // The new masked load has an undef pass-through operand. The select uses the
41462 // original pass-through operand.
41463 SDValue NewML = DAG.getMaskedLoad(
41464 VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
41465 DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
41466 ML->getAddressingMode(), ML->getExtensionType());
41467 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
41468 ML->getPassThru());
41470 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
41473 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
41474 TargetLowering::DAGCombinerInfo &DCI,
41475 const X86Subtarget &Subtarget) {
41476 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
41478 // TODO: Expanding load with constant mask may be optimized as well.
41479 if (Mld->isExpandingLoad())
41482 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
41483 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
41485 // TODO: Do some AVX512 subsets benefit from this transform?
41486 if (!Subtarget.hasAVX512())
41487 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
41494 /// If exactly one element of the mask is set for a non-truncating masked store,
41495 /// it is a vector extract and scalar store.
41496 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
41497 /// mask have already been optimized in IR, so we don't bother with those here.
41498 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
41499 SelectionDAG &DAG) {
41500 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
41501 // However, some target hooks may need to be added to know when the transform
41502 // is profitable. Endianness would also have to be considered.
41504 SDValue Addr, VecIndex;
41505 unsigned Alignment;
41506 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
41509 // Extract the one scalar element that is actually being stored.
41511 EVT VT = MS->getValue().getValueType();
41512 EVT EltVT = VT.getVectorElementType();
41513 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
41514 MS->getValue(), VecIndex);
41516 // Store that element at the appropriate offset from the base pointer.
41517 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
41518 Alignment, MS->getMemOperand()->getFlags());
41521 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
41522 TargetLowering::DAGCombinerInfo &DCI,
41523 const X86Subtarget &Subtarget) {
41524 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
41525 if (Mst->isCompressingStore())
41528 EVT VT = Mst->getValue().getValueType();
41530 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41532 if (Mst->isTruncatingStore())
41535 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
41536 return ScalarStore;
41538 // If the mask value has been legalized to a non-boolean vector, try to
41539 // simplify ops leading up to it. We only demand the MSB of each lane.
41540 SDValue Mask = Mst->getMask();
41541 if (Mask.getScalarValueSizeInBits() != 1) {
41542 APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
41543 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
41544 return SDValue(N, 0);
41547 SDValue Value = Mst->getValue();
41548 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
41549 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
41550 Mst->getMemoryVT())) {
41551 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
41552 Mst->getBasePtr(), Mst->getOffset(), Mask,
41553 Mst->getMemoryVT(), Mst->getMemOperand(),
41554 Mst->getAddressingMode(), true);
41560 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
41561 TargetLowering::DAGCombinerInfo &DCI,
41562 const X86Subtarget &Subtarget) {
41563 StoreSDNode *St = cast<StoreSDNode>(N);
41564 EVT StVT = St->getMemoryVT();
41566 unsigned Alignment = St->getAlignment();
41567 SDValue StoredVal = St->getValue();
41568 EVT VT = StoredVal.getValueType();
41569 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41571 // Convert a store of vXi1 into a store of iX and a bitcast.
41572 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
41573 VT.getVectorElementType() == MVT::i1) {
41575 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
41576 StoredVal = DAG.getBitcast(NewVT, StoredVal);
41578 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41579 St->getPointerInfo(), St->getAlignment(),
41580 St->getMemOperand()->getFlags());
41583 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
41584 // This will avoid a copy to k-register.
41585 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
41586 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
41587 StoredVal.getOperand(0).getValueType() == MVT::i8) {
41588 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
41589 St->getBasePtr(), St->getPointerInfo(),
41590 St->getAlignment(), St->getMemOperand()->getFlags());
41593 // Widen v2i1/v4i1 stores to v8i1.
41594 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
41595 Subtarget.hasAVX512()) {
41596 unsigned NumConcats = 8 / VT.getVectorNumElements();
41597 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
41598 Ops[0] = StoredVal;
41599 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
41600 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41601 St->getPointerInfo(), St->getAlignment(),
41602 St->getMemOperand()->getFlags());
41605 // Turn vXi1 stores of constants into a scalar store.
41606 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
41607 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
41608 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
41609 // If its a v64i1 store without 64-bit support, we need two stores.
41610 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
41611 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
41612 StoredVal->ops().slice(0, 32));
41613 Lo = combinevXi1ConstantToInteger(Lo, DAG);
41614 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
41615 StoredVal->ops().slice(32, 32));
41616 Hi = combinevXi1ConstantToInteger(Hi, DAG);
41618 SDValue Ptr0 = St->getBasePtr();
41619 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
41622 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
41623 Alignment, St->getMemOperand()->getFlags());
41625 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
41626 St->getPointerInfo().getWithOffset(4),
41627 MinAlign(Alignment, 4U),
41628 St->getMemOperand()->getFlags());
41629 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
41632 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
41633 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41634 St->getPointerInfo(), St->getAlignment(),
41635 St->getMemOperand()->getFlags());
41638 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
41639 // Sandy Bridge, perform two 16-byte stores.
41641 if (VT.is256BitVector() && StVT == VT &&
41642 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
41643 *St->getMemOperand(), &Fast) &&
41645 unsigned NumElems = VT.getVectorNumElements();
41649 return splitVectorStore(St, DAG);
41652 // Split under-aligned vector non-temporal stores.
41653 if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
41654 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
41655 // vectors or the legalizer can scalarize it to use MOVNTI.
41656 if (VT.is256BitVector() || VT.is512BitVector()) {
41657 unsigned NumElems = VT.getVectorNumElements();
41660 return splitVectorStore(St, DAG);
41663 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
41665 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
41666 MVT NTVT = Subtarget.hasSSE4A()
41668 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
41669 return scalarizeVectorStore(St, NTVT, DAG);
41673 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
41674 // supported, but avx512f is by extending to v16i32 and truncating.
41675 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
41676 St->getValue().getOpcode() == ISD::TRUNCATE &&
41677 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
41678 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
41679 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
41680 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
41681 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
41682 MVT::v16i8, St->getMemOperand());
41685 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
41686 if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
41687 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
41688 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
41689 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
41690 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
41691 return EmitTruncSStore(IsSigned, St->getChain(),
41692 dl, StoredVal.getOperand(0), St->getBasePtr(),
41693 VT, St->getMemOperand(), DAG);
41696 // Optimize trunc store (of multiple scalars) to shuffle and store.
41697 // First, pack all of the elements in one place. Next, store to memory
41698 // in fewer chunks.
41699 if (St->isTruncatingStore() && VT.isVector()) {
41700 // Check if we can detect an AVG pattern from the truncation. If yes,
41701 // replace the trunc store by a normal store with the result of X86ISD::AVG
41703 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
41704 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
41706 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
41707 St->getPointerInfo(), St->getAlignment(),
41708 St->getMemOperand()->getFlags());
41710 if (TLI.isTruncStoreLegal(VT, StVT)) {
41711 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
41712 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
41713 dl, Val, St->getBasePtr(),
41714 St->getMemoryVT(), St->getMemOperand(), DAG);
41715 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
41717 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
41718 dl, Val, St->getBasePtr(),
41719 St->getMemoryVT(), St->getMemOperand(), DAG);
41725 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
41726 // the FP state in cases where an emms may be missing.
41727 // A preferable solution to the general problem is to figure out the right
41728 // places to insert EMMS. This qualifies as a quick hack.
41730 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
41731 if (VT.getSizeInBits() != 64)
41734 const Function &F = DAG.getMachineFunction().getFunction();
41735 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
41737 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
41738 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
41739 isa<LoadSDNode>(St->getValue()) &&
41740 cast<LoadSDNode>(St->getValue())->isSimple() &&
41741 St->getChain().hasOneUse() && St->isSimple()) {
41742 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
41744 if (!ISD::isNormalLoad(Ld))
41747 // Avoid the transformation if there are multiple uses of the loaded value.
41748 if (!Ld->hasNUsesOfValue(1, 0))
41753 // Lower to a single movq load/store pair.
41754 SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
41755 Ld->getBasePtr(), Ld->getMemOperand());
41757 // Make sure new load is placed in same chain order.
41758 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
41759 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
41760 St->getMemOperand());
41763 // This is similar to the above case, but here we handle a scalar 64-bit
41764 // integer store that is extracted from a vector on a 32-bit target.
41765 // If we have SSE2, then we can treat it like a floating-point double
41766 // to get past legalization. The execution dependencies fixup pass will
41767 // choose the optimal machine instruction for the store if this really is
41768 // an integer or v2f32 rather than an f64.
41769 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
41770 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
41771 SDValue OldExtract = St->getOperand(1);
41772 SDValue ExtOp0 = OldExtract.getOperand(0);
41773 unsigned VecSize = ExtOp0.getValueSizeInBits();
41774 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
41775 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
41776 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
41777 BitCast, OldExtract.getOperand(1));
41778 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
41779 St->getPointerInfo(), St->getAlignment(),
41780 St->getMemOperand()->getFlags());
41786 /// Return 'true' if this vector operation is "horizontal"
41787 /// and return the operands for the horizontal operation in LHS and RHS. A
41788 /// horizontal operation performs the binary operation on successive elements
41789 /// of its first operand, then on successive elements of its second operand,
41790 /// returning the resulting values in a vector. For example, if
41791 /// A = < float a0, float a1, float a2, float a3 >
41793 /// B = < float b0, float b1, float b2, float b3 >
41794 /// then the result of doing a horizontal operation on A and B is
41795 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
41796 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
41797 /// A horizontal-op B, for some already available A and B, and if so then LHS is
41798 /// set to A, RHS to B, and the routine returns 'true'.
41799 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
41800 const X86Subtarget &Subtarget,
41801 bool IsCommutative) {
41802 // If either operand is undef, bail out. The binop should be simplified.
41803 if (LHS.isUndef() || RHS.isUndef())
41806 // Look for the following pattern:
41807 // A = < float a0, float a1, float a2, float a3 >
41808 // B = < float b0, float b1, float b2, float b3 >
41810 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
41811 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
41812 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
41813 // which is A horizontal-op B.
41815 MVT VT = LHS.getSimpleValueType();
41816 assert((VT.is128BitVector() || VT.is256BitVector()) &&
41817 "Unsupported vector type for horizontal add/sub");
41818 unsigned NumElts = VT.getVectorNumElements();
41820 // TODO - can we make a general helper method that does all of this for us?
41821 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
41822 SmallVectorImpl<int> &ShuffleMask) {
41823 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
41824 if (!Op.getOperand(0).isUndef())
41825 N0 = Op.getOperand(0);
41826 if (!Op.getOperand(1).isUndef())
41827 N1 = Op.getOperand(1);
41828 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
41829 ShuffleMask.append(Mask.begin(), Mask.end());
41832 bool UseSubVector = false;
41833 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41834 Op.getOperand(0).getValueType().is256BitVector() &&
41835 llvm::isNullConstant(Op.getOperand(1))) {
41836 Op = Op.getOperand(0);
41837 UseSubVector = true;
41840 SmallVector<SDValue, 2> SrcOps;
41841 SmallVector<int, 16> SrcShuffleMask;
41842 SDValue BC = peekThroughBitcasts(Op);
41843 if (isTargetShuffle(BC.getOpcode()) &&
41844 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
41845 SrcOps, SrcShuffleMask, IsUnary)) {
41846 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
41847 SrcOps.size() <= 2) {
41848 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
41849 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
41850 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
41852 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
41853 SrcOps.size() == 1) {
41854 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
41855 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
41856 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
41857 ShuffleMask.append(Mask.begin(), Mask.end());
41862 // View LHS in the form
41863 // LHS = VECTOR_SHUFFLE A, B, LMask
41864 // If LHS is not a shuffle, then pretend it is the identity shuffle:
41865 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
41866 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
41868 SmallVector<int, 16> LMask;
41869 GetShuffle(LHS, A, B, LMask);
41871 // Likewise, view RHS in the form
41872 // RHS = VECTOR_SHUFFLE C, D, RMask
41874 SmallVector<int, 16> RMask;
41875 GetShuffle(RHS, C, D, RMask);
41877 // At least one of the operands should be a vector shuffle.
41878 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
41879 if (NumShuffles == 0)
41882 if (LMask.empty()) {
41884 for (unsigned i = 0; i != NumElts; ++i)
41885 LMask.push_back(i);
41888 if (RMask.empty()) {
41890 for (unsigned i = 0; i != NumElts; ++i)
41891 RMask.push_back(i);
41894 // If A and B occur in reverse order in RHS, then canonicalize by commuting
41895 // RHS operands and shuffle mask.
41898 ShuffleVectorSDNode::commuteMask(RMask);
41900 // Check that the shuffles are both shuffling the same vectors.
41901 if (!(A == C && B == D))
41904 // LHS and RHS are now:
41905 // LHS = shuffle A, B, LMask
41906 // RHS = shuffle A, B, RMask
41907 // Check that the masks correspond to performing a horizontal operation.
41908 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
41909 // so we just repeat the inner loop if this is a 256-bit op.
41910 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
41911 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
41912 assert((NumEltsPer128BitChunk % 2 == 0) &&
41913 "Vector type should have an even number of elements in each lane");
41914 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
41915 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
41916 // Ignore undefined components.
41917 int LIdx = LMask[i + j], RIdx = RMask[i + j];
41918 if (LIdx < 0 || RIdx < 0 ||
41919 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
41920 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
41923 // The low half of the 128-bit result must choose from A.
41924 // The high half of the 128-bit result must choose from B,
41925 // unless B is undef. In that case, we are always choosing from A.
41926 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
41927 unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
41929 // Check that successive elements are being operated on. If not, this is
41930 // not a horizontal operation.
41931 int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
41932 if (!(LIdx == Index && RIdx == Index + 1) &&
41933 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
41938 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
41939 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
41941 if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
41944 LHS = DAG.getBitcast(VT, LHS);
41945 RHS = DAG.getBitcast(VT, RHS);
41949 /// Do target-specific dag combines on floating-point adds/subs.
41950 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
41951 const X86Subtarget &Subtarget) {
41952 EVT VT = N->getValueType(0);
41953 SDValue LHS = N->getOperand(0);
41954 SDValue RHS = N->getOperand(1);
41955 bool IsFadd = N->getOpcode() == ISD::FADD;
41956 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
41957 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
41959 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
41960 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
41961 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
41962 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
41963 return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
41968 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
41970 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
41971 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
41972 /// anything that is guaranteed to be transformed by DAGCombiner.
41973 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
41974 const X86Subtarget &Subtarget,
41976 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
41977 SDValue Src = N->getOperand(0);
41978 unsigned SrcOpcode = Src.getOpcode();
41979 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41981 EVT VT = N->getValueType(0);
41982 EVT SrcVT = Src.getValueType();
41984 auto IsFreeTruncation = [VT](SDValue Op) {
41985 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
41987 // See if this has been extended from a smaller/equal size to
41988 // the truncation size, allowing a truncation to combine with the extend.
41989 unsigned Opcode = Op.getOpcode();
41990 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
41991 Opcode == ISD::ZERO_EXTEND) &&
41992 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
41995 // See if this is a single use constant which can be constant folded.
41996 // NOTE: We don't peek throught bitcasts here because there is currently
41997 // no support for constant folding truncate+bitcast+vector_of_constants. So
41998 // we'll just send up with a truncate on both operands which will
41999 // get turned back into (truncate (binop)) causing an infinite loop.
42000 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
42003 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
42004 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
42005 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
42006 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
42009 // Don't combine if the operation has other uses.
42010 if (!Src.hasOneUse())
42013 // Only support vector truncation for now.
42014 // TODO: i64 scalar math would benefit as well.
42015 if (!VT.isVector())
42018 // In most cases its only worth pre-truncating if we're only facing the cost
42019 // of one truncation.
42020 // i.e. if one of the inputs will constant fold or the input is repeated.
42021 switch (SrcOpcode) {
42025 SDValue Op0 = Src.getOperand(0);
42026 SDValue Op1 = Src.getOperand(1);
42027 if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
42028 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
42029 return TruncateArithmetic(Op0, Op1);
42034 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
42035 // better to truncate if we have the chance.
42036 if (SrcVT.getScalarType() == MVT::i64 &&
42037 TLI.isOperationLegal(SrcOpcode, VT) &&
42038 !TLI.isOperationLegal(SrcOpcode, SrcVT))
42039 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
42042 SDValue Op0 = Src.getOperand(0);
42043 SDValue Op1 = Src.getOperand(1);
42044 if (TLI.isOperationLegal(SrcOpcode, VT) &&
42045 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
42046 return TruncateArithmetic(Op0, Op1);
42050 // TODO: ISD::SUB We are conservative and require both sides to be freely
42051 // truncatable to avoid interfering with combineSubToSubus.
42052 SDValue Op0 = Src.getOperand(0);
42053 SDValue Op1 = Src.getOperand(1);
42054 if (TLI.isOperationLegal(SrcOpcode, VT) &&
42055 (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
42056 return TruncateArithmetic(Op0, Op1);
42064 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
42065 /// e.g. trunc <8 x i32> X to <8 x i16> -->
42066 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
42067 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
42068 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
42069 const X86Subtarget &Subtarget,
42070 SelectionDAG &DAG) {
42071 SDValue In = N->getOperand(0);
42072 EVT InVT = In.getValueType();
42073 EVT OutVT = N->getValueType(0);
42075 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
42076 OutVT.getScalarSizeInBits());
42077 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
42078 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
42081 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
42082 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
42083 const X86Subtarget &Subtarget,
42084 SelectionDAG &DAG) {
42085 SDValue In = N->getOperand(0);
42086 EVT InVT = In.getValueType();
42087 EVT OutVT = N->getValueType(0);
42088 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
42089 DAG.getValueType(OutVT));
42090 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
42093 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
42094 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
42095 /// legalization the truncation will be translated into a BUILD_VECTOR with each
42096 /// element that is extracted from a vector and then truncated, and it is
42097 /// difficult to do this optimization based on them.
42098 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
42099 const X86Subtarget &Subtarget) {
42100 EVT OutVT = N->getValueType(0);
42101 if (!OutVT.isVector())
42104 SDValue In = N->getOperand(0);
42105 if (!In.getValueType().isSimple())
42108 EVT InVT = In.getValueType();
42109 unsigned NumElems = OutVT.getVectorNumElements();
42111 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
42112 // SSE2, and we need to take care of it specially.
42113 // AVX512 provides vpmovdb.
42114 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
42117 EVT OutSVT = OutVT.getVectorElementType();
42118 EVT InSVT = InVT.getVectorElementType();
42119 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
42120 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
42124 // SSSE3's pshufb results in less instructions in the cases below.
42125 if (Subtarget.hasSSSE3() && NumElems == 8 &&
42126 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
42127 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
42131 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
42132 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
42133 // truncate 2 x v4i32 to v8i16.
42134 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
42135 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
42136 if (InSVT == MVT::i32)
42137 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
42142 /// This function transforms vector truncation of 'extended sign-bits' or
42143 /// 'extended zero-bits' values.
42144 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
42145 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
42147 const X86Subtarget &Subtarget) {
42149 if (!Subtarget.hasSSE2())
42152 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
42155 SDValue In = N->getOperand(0);
42156 if (!In.getValueType().isSimple())
42159 MVT VT = N->getValueType(0).getSimpleVT();
42160 MVT SVT = VT.getScalarType();
42162 MVT InVT = In.getValueType().getSimpleVT();
42163 MVT InSVT = InVT.getScalarType();
42165 // Check we have a truncation suited for PACKSS/PACKUS.
42166 if (!VT.is128BitVector() && !VT.is256BitVector())
42168 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
42170 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
42173 // AVX512 has fast truncate, but if the input is already going to be split,
42174 // there's no harm in trying pack.
42175 if (Subtarget.hasAVX512() &&
42176 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
42177 InVT.is512BitVector()))
42180 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
42181 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
42183 // Use PACKUS if the input has zero-bits that extend all the way to the
42184 // packed/truncated value. e.g. masks, zext_in_reg, etc.
42185 KnownBits Known = DAG.computeKnownBits(In);
42186 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
42187 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
42188 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
42190 // Use PACKSS if the input has sign-bits that extend all the way to the
42191 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
42192 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
42193 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
42194 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
42199 // Try to form a MULHU or MULHS node by looking for
42200 // (trunc (srl (mul ext, ext), 16))
42201 // TODO: This is X86 specific because we want to be able to handle wide types
42202 // before type legalization. But we can only do it if the vector will be
42203 // legalized via widening/splitting. Type legalization can't handle promotion
42204 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
42206 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
42207 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
42208 // First instruction should be a right shift of a multiply.
42209 if (Src.getOpcode() != ISD::SRL ||
42210 Src.getOperand(0).getOpcode() != ISD::MUL)
42213 if (!Subtarget.hasSSE2())
42216 // Only handle vXi16 types that are at least 128-bits unless they will be
42218 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
42221 // Input type should be vXi32.
42222 EVT InVT = Src.getValueType();
42223 if (InVT.getVectorElementType() != MVT::i32)
42226 // Need a shift by 16.
42228 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
42232 SDValue LHS = Src.getOperand(0).getOperand(0);
42233 SDValue RHS = Src.getOperand(0).getOperand(1);
42235 unsigned ExtOpc = LHS.getOpcode();
42236 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
42237 RHS.getOpcode() != ExtOpc)
42240 // Peek through the extends.
42241 LHS = LHS.getOperand(0);
42242 RHS = RHS.getOperand(0);
42244 // Ensure the input types match.
42245 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
42248 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
42249 return DAG.getNode(Opc, DL, VT, LHS, RHS);
42252 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
42253 // from one vector with signed bytes from another vector, adds together
42254 // adjacent pairs of 16-bit products, and saturates the result before
42255 // truncating to 16-bits.
42257 // Which looks something like this:
42258 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
42259 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
42260 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
42261 const X86Subtarget &Subtarget,
42263 if (!VT.isVector() || !Subtarget.hasSSSE3())
42266 unsigned NumElems = VT.getVectorNumElements();
42267 EVT ScalarVT = VT.getVectorElementType();
42268 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
42271 SDValue SSatVal = detectSSatPattern(In, VT);
42272 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
42275 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
42276 // of multiplies from even/odd elements.
42277 SDValue N0 = SSatVal.getOperand(0);
42278 SDValue N1 = SSatVal.getOperand(1);
42280 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
42283 SDValue N00 = N0.getOperand(0);
42284 SDValue N01 = N0.getOperand(1);
42285 SDValue N10 = N1.getOperand(0);
42286 SDValue N11 = N1.getOperand(1);
42288 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
42289 // Canonicalize zero_extend to LHS.
42290 if (N01.getOpcode() == ISD::ZERO_EXTEND)
42291 std::swap(N00, N01);
42292 if (N11.getOpcode() == ISD::ZERO_EXTEND)
42293 std::swap(N10, N11);
42295 // Ensure we have a zero_extend and a sign_extend.
42296 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
42297 N01.getOpcode() != ISD::SIGN_EXTEND ||
42298 N10.getOpcode() != ISD::ZERO_EXTEND ||
42299 N11.getOpcode() != ISD::SIGN_EXTEND)
42302 // Peek through the extends.
42303 N00 = N00.getOperand(0);
42304 N01 = N01.getOperand(0);
42305 N10 = N10.getOperand(0);
42306 N11 = N11.getOperand(0);
42308 // Ensure the extend is from vXi8.
42309 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
42310 N01.getValueType().getVectorElementType() != MVT::i8 ||
42311 N10.getValueType().getVectorElementType() != MVT::i8 ||
42312 N11.getValueType().getVectorElementType() != MVT::i8)
42315 // All inputs should be build_vectors.
42316 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
42317 N01.getOpcode() != ISD::BUILD_VECTOR ||
42318 N10.getOpcode() != ISD::BUILD_VECTOR ||
42319 N11.getOpcode() != ISD::BUILD_VECTOR)
42322 // N00/N10 are zero extended. N01/N11 are sign extended.
42324 // For each element, we need to ensure we have an odd element from one vector
42325 // multiplied by the odd element of another vector and the even element from
42326 // one of the same vectors being multiplied by the even element from the
42327 // other vector. So we need to make sure for each element i, this operator
42328 // is being performed:
42329 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
42330 SDValue ZExtIn, SExtIn;
42331 for (unsigned i = 0; i != NumElems; ++i) {
42332 SDValue N00Elt = N00.getOperand(i);
42333 SDValue N01Elt = N01.getOperand(i);
42334 SDValue N10Elt = N10.getOperand(i);
42335 SDValue N11Elt = N11.getOperand(i);
42336 // TODO: Be more tolerant to undefs.
42337 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42338 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42339 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42340 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
42342 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
42343 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
42344 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
42345 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
42346 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
42348 unsigned IdxN00 = ConstN00Elt->getZExtValue();
42349 unsigned IdxN01 = ConstN01Elt->getZExtValue();
42350 unsigned IdxN10 = ConstN10Elt->getZExtValue();
42351 unsigned IdxN11 = ConstN11Elt->getZExtValue();
42352 // Add is commutative so indices can be reordered.
42353 if (IdxN00 > IdxN10) {
42354 std::swap(IdxN00, IdxN10);
42355 std::swap(IdxN01, IdxN11);
42357 // N0 indices be the even element. N1 indices must be the next odd element.
42358 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
42359 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
42361 SDValue N00In = N00Elt.getOperand(0);
42362 SDValue N01In = N01Elt.getOperand(0);
42363 SDValue N10In = N10Elt.getOperand(0);
42364 SDValue N11In = N11Elt.getOperand(0);
42365 // First time we find an input capture it.
42370 if (ZExtIn != N00In || SExtIn != N01In ||
42371 ZExtIn != N10In || SExtIn != N11In)
42375 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
42376 ArrayRef<SDValue> Ops) {
42377 // Shrink by adding truncate nodes and let DAGCombine fold with the
42379 EVT InVT = Ops[0].getValueType();
42380 assert(InVT.getScalarType() == MVT::i8 &&
42381 "Unexpected scalar element type");
42382 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
42383 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
42384 InVT.getVectorNumElements() / 2);
42385 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
42387 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
42391 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
42392 const X86Subtarget &Subtarget) {
42393 EVT VT = N->getValueType(0);
42394 SDValue Src = N->getOperand(0);
42397 // Attempt to pre-truncate inputs to arithmetic ops instead.
42398 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
42401 // Try to detect AVG pattern first.
42402 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
42405 // Try to detect PMADD
42406 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
42409 // Try to combine truncation with signed/unsigned saturation.
42410 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
42413 // Try to combine PMULHUW/PMULHW for vXi16.
42414 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
42417 // The bitcast source is a direct mmx result.
42418 // Detect bitcasts between i32 to x86mmx
42419 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
42420 SDValue BCSrc = Src.getOperand(0);
42421 if (BCSrc.getValueType() == MVT::x86mmx)
42422 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
42425 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
42426 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
42429 return combineVectorTruncation(N, DAG, Subtarget);
42432 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
42433 EVT VT = N->getValueType(0);
42434 SDValue In = N->getOperand(0);
42437 if (auto SSatVal = detectSSatPattern(In, VT))
42438 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
42439 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
42440 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
42445 /// Returns the negated value if the node \p N flips sign of FP value.
42447 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
42449 /// AVX512F does not have FXOR, so FNEG is lowered as
42450 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
42451 /// In this case we go though all bitcasts.
42452 /// This also recognizes splat of a negated value and returns the splat of that
42454 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
42455 if (N->getOpcode() == ISD::FNEG)
42456 return N->getOperand(0);
42458 // Don't recurse exponentially.
42459 if (Depth > SelectionDAG::MaxRecursionDepth)
42462 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
42464 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
42465 EVT VT = Op->getValueType(0);
42467 // Make sure the element size doesn't change.
42468 if (VT.getScalarSizeInBits() != ScalarSize)
42471 unsigned Opc = Op.getOpcode();
42473 case ISD::VECTOR_SHUFFLE: {
42474 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
42475 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
42476 if (!Op.getOperand(1).isUndef())
42478 if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
42479 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
42480 return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
42481 cast<ShuffleVectorSDNode>(Op)->getMask());
42484 case ISD::INSERT_VECTOR_ELT: {
42485 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
42487 SDValue InsVector = Op.getOperand(0);
42488 SDValue InsVal = Op.getOperand(1);
42489 if (!InsVector.isUndef())
42491 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
42492 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
42493 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
42494 NegInsVal, Op.getOperand(2));
42499 case X86ISD::FXOR: {
42500 SDValue Op1 = Op.getOperand(1);
42501 SDValue Op0 = Op.getOperand(0);
42503 // For XOR and FXOR, we want to check if constant
42504 // bits of Op1 are sign bit masks. For FSUB, we
42505 // have to check if constant bits of Op0 are sign
42506 // bit masks and hence we swap the operands.
42507 if (Opc == ISD::FSUB)
42508 std::swap(Op0, Op1);
42511 SmallVector<APInt, 16> EltBits;
42512 // Extract constant bits and see if they are all
42513 // sign bit masks. Ignore the undef elements.
42514 if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
42515 /* AllowWholeUndefs */ true,
42516 /* AllowPartialUndefs */ false)) {
42517 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
42518 if (!UndefElts[I] && !EltBits[I].isSignMask())
42521 return peekThroughBitcasts(Op0);
42529 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
42533 default: llvm_unreachable("Unexpected opcode");
42534 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
42535 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
42536 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
42537 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
42538 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
42539 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
42540 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
42541 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
42547 default: llvm_unreachable("Unexpected opcode");
42548 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
42549 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
42550 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
42551 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
42552 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
42553 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
42554 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
42555 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
42556 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
42557 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
42558 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
42559 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
42565 default: llvm_unreachable("Unexpected opcode");
42566 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
42567 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
42568 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
42569 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
42570 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
42571 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
42572 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
42573 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
42580 /// Do target-specific dag combines on floating point negations.
42581 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
42582 const X86Subtarget &Subtarget) {
42583 EVT OrigVT = N->getValueType(0);
42584 SDValue Arg = isFNEG(DAG, N);
42588 EVT VT = Arg.getValueType();
42589 EVT SVT = VT.getScalarType();
42592 // Let legalize expand this if it isn't a legal type yet.
42593 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
42596 // If we're negating a FMUL node on a target with FMA, then we can avoid the
42597 // use of a constant by performing (-0 - A*B) instead.
42598 // FIXME: Check rounding control flags as well once it becomes available.
42599 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
42600 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
42601 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
42602 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
42603 Arg.getOperand(1), Zero);
42604 return DAG.getBitcast(OrigVT, NewNode);
42607 // If we're negating an FMA node, then we can adjust the
42608 // instruction to include the extra negation.
42609 if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
42610 switch (Arg.getOpcode()) {
42612 case X86ISD::FMSUB:
42613 case X86ISD::FNMADD:
42614 case X86ISD::FNMSUB:
42615 case X86ISD::FMADD_RND:
42616 case X86ISD::FMSUB_RND:
42617 case X86ISD::FNMADD_RND:
42618 case X86ISD::FNMSUB_RND: {
42619 // We can't handle scalar intrinsic node here because it would only
42620 // invert one element and not the whole vector. But we could try to handle
42621 // a negation of the lower element only.
42622 unsigned NewOpcode = negateFMAOpcode(Arg.getOpcode(), false, false, true);
42623 return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, Arg->ops()));
42631 char X86TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
42632 bool LegalOperations,
42634 unsigned Depth) const {
42635 // fneg patterns are removable even if they have multiple uses.
42636 if (isFNEG(DAG, Op.getNode(), Depth))
42639 // Don't recurse exponentially.
42640 if (Depth > SelectionDAG::MaxRecursionDepth)
42643 EVT VT = Op.getValueType();
42644 EVT SVT = VT.getScalarType();
42645 switch (Op.getOpcode()) {
42647 case X86ISD::FMSUB:
42648 case X86ISD::FNMADD:
42649 case X86ISD::FNMSUB:
42650 case X86ISD::FMADD_RND:
42651 case X86ISD::FMSUB_RND:
42652 case X86ISD::FNMADD_RND:
42653 case X86ISD::FNMSUB_RND: {
42654 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
42655 !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
42658 // This is always negatible for free but we might be able to remove some
42659 // extra operand negations as well.
42660 for (int i = 0; i != 3; ++i) {
42661 char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
42662 ForCodeSize, Depth + 1);
42670 return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
42671 ForCodeSize, Depth);
42674 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
42675 bool LegalOperations,
42677 unsigned Depth) const {
42678 // fneg patterns are removable even if they have multiple uses.
42679 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth))
42680 return DAG.getBitcast(Op.getValueType(), Arg);
42682 EVT VT = Op.getValueType();
42683 EVT SVT = VT.getScalarType();
42684 unsigned Opc = Op.getOpcode();
42687 case X86ISD::FMSUB:
42688 case X86ISD::FNMADD:
42689 case X86ISD::FNMSUB:
42690 case X86ISD::FMADD_RND:
42691 case X86ISD::FMSUB_RND:
42692 case X86ISD::FNMADD_RND:
42693 case X86ISD::FNMSUB_RND: {
42694 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
42695 !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
42698 // This is always negatible for free but we might be able to remove some
42699 // extra operand negations as well.
42700 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
42701 for (int i = 0; i != 3; ++i) {
42702 char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
42703 ForCodeSize, Depth + 1);
42705 NewOps[i] = getNegatedExpression(Op.getOperand(i), DAG, LegalOperations,
42706 ForCodeSize, Depth + 1);
42709 bool NegA = !!NewOps[0];
42710 bool NegB = !!NewOps[1];
42711 bool NegC = !!NewOps[2];
42712 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
42714 // Fill in the non-negated ops with the original values.
42715 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
42717 NewOps[i] = Op.getOperand(i);
42718 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
42722 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
42723 ForCodeSize, Depth);
42726 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
42727 const X86Subtarget &Subtarget) {
42728 MVT VT = N->getSimpleValueType(0);
42729 // If we have integer vector types available, use the integer opcodes.
42730 if (!VT.isVector() || !Subtarget.hasSSE2())
42735 unsigned IntBits = VT.getScalarSizeInBits();
42736 MVT IntSVT = MVT::getIntegerVT(IntBits);
42737 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
42739 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
42740 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
42741 unsigned IntOpcode;
42742 switch (N->getOpcode()) {
42743 default: llvm_unreachable("Unexpected FP logic op");
42744 case X86ISD::FOR: IntOpcode = ISD::OR; break;
42745 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
42746 case X86ISD::FAND: IntOpcode = ISD::AND; break;
42747 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
42749 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
42750 return DAG.getBitcast(VT, IntOp);
42754 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
42755 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
42756 if (N->getOpcode() != ISD::XOR)
42759 SDValue LHS = N->getOperand(0);
42760 if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
42763 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
42764 X86::CondCode(LHS->getConstantOperandVal(0)));
42766 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
42769 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
42770 TargetLowering::DAGCombinerInfo &DCI,
42771 const X86Subtarget &Subtarget) {
42772 // If this is SSE1 only convert to FXOR to avoid scalarization.
42773 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
42774 N->getValueType(0) == MVT::v4i32) {
42775 return DAG.getBitcast(
42776 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
42777 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
42778 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
42781 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
42784 if (DCI.isBeforeLegalizeOps())
42787 if (SDValue SetCC = foldXor1SetCC(N, DAG))
42790 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
42793 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
42796 return combineFneg(N, DAG, Subtarget);
42799 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
42800 TargetLowering::DAGCombinerInfo &DCI,
42801 const X86Subtarget &Subtarget) {
42802 SDValue Op0 = N->getOperand(0);
42803 SDValue Op1 = N->getOperand(1);
42804 EVT VT = N->getValueType(0);
42805 unsigned NumBits = VT.getSizeInBits();
42807 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42809 // TODO - Constant Folding.
42810 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
42811 // Reduce Cst1 to the bottom 16-bits.
42812 // NOTE: SimplifyDemandedBits won't do this for constants.
42813 const APInt &Val1 = Cst1->getAPIntValue();
42814 APInt MaskedVal1 = Val1 & 0xFFFF;
42815 if (MaskedVal1 != Val1)
42816 return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
42817 DAG.getConstant(MaskedVal1, SDLoc(N), VT));
42820 // Only bottom 16-bits of the control bits are required.
42821 APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
42822 if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
42823 return SDValue(N, 0);
42828 static bool isNullFPScalarOrVectorConst(SDValue V) {
42829 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
42832 /// If a value is a scalar FP zero or a vector FP zero (potentially including
42833 /// undefined elements), return a zero constant that may be used to fold away
42834 /// that value. In the case of a vector, the returned constant will not contain
42835 /// undefined elements even if the input parameter does. This makes it suitable
42836 /// to be used as a replacement operand with operations (eg, bitwise-and) where
42837 /// an undef should not propagate.
42838 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
42839 const X86Subtarget &Subtarget) {
42840 if (!isNullFPScalarOrVectorConst(V))
42843 if (V.getValueType().isVector())
42844 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
42849 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
42850 const X86Subtarget &Subtarget) {
42851 SDValue N0 = N->getOperand(0);
42852 SDValue N1 = N->getOperand(1);
42853 EVT VT = N->getValueType(0);
42856 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
42857 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
42858 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
42859 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
42862 auto isAllOnesConstantFP = [](SDValue V) {
42863 if (V.getSimpleValueType().isVector())
42864 return ISD::isBuildVectorAllOnes(V.getNode());
42865 auto *C = dyn_cast<ConstantFPSDNode>(V);
42866 return C && C->getConstantFPValue()->isAllOnesValue();
42869 // fand (fxor X, -1), Y --> fandn X, Y
42870 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
42871 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
42873 // fand X, (fxor Y, -1) --> fandn Y, X
42874 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
42875 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
42880 /// Do target-specific dag combines on X86ISD::FAND nodes.
42881 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
42882 const X86Subtarget &Subtarget) {
42883 // FAND(0.0, x) -> 0.0
42884 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
42887 // FAND(x, 0.0) -> 0.0
42888 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
42891 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
42894 return lowerX86FPLogicOp(N, DAG, Subtarget);
42897 /// Do target-specific dag combines on X86ISD::FANDN nodes.
42898 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
42899 const X86Subtarget &Subtarget) {
42900 // FANDN(0.0, x) -> x
42901 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
42902 return N->getOperand(1);
42904 // FANDN(x, 0.0) -> 0.0
42905 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
42908 return lowerX86FPLogicOp(N, DAG, Subtarget);
42911 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
42912 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
42913 const X86Subtarget &Subtarget) {
42914 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
42916 // F[X]OR(0.0, x) -> x
42917 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
42918 return N->getOperand(1);
42920 // F[X]OR(x, 0.0) -> x
42921 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
42922 return N->getOperand(0);
42924 if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
42927 return lowerX86FPLogicOp(N, DAG, Subtarget);
42930 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
42931 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
42932 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
42934 // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
42935 if (!DAG.getTarget().Options.NoNaNsFPMath ||
42936 !DAG.getTarget().Options.NoSignedZerosFPMath)
42939 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
42940 // into FMINC and FMAXC, which are Commutative operations.
42941 unsigned NewOp = 0;
42942 switch (N->getOpcode()) {
42943 default: llvm_unreachable("unknown opcode");
42944 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
42945 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
42948 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
42949 N->getOperand(0), N->getOperand(1));
42952 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
42953 const X86Subtarget &Subtarget) {
42954 if (Subtarget.useSoftFloat())
42957 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42959 EVT VT = N->getValueType(0);
42960 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
42961 (Subtarget.hasSSE2() && VT == MVT::f64) ||
42962 (VT.isVector() && TLI.isTypeLegal(VT))))
42965 SDValue Op0 = N->getOperand(0);
42966 SDValue Op1 = N->getOperand(1);
42968 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
42970 // If we don't have to respect NaN inputs, this is a direct translation to x86
42971 // min/max instructions.
42972 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
42973 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
42975 // If one of the operands is known non-NaN use the native min/max instructions
42976 // with the non-NaN input as second operand.
42977 if (DAG.isKnownNeverNaN(Op1))
42978 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
42979 if (DAG.isKnownNeverNaN(Op0))
42980 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
42982 // If we have to respect NaN inputs, this takes at least 3 instructions.
42983 // Favor a library call when operating on a scalar and minimizing code size.
42984 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
42987 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
42990 // There are 4 possibilities involving NaN inputs, and these are the required
42994 // ----------------
42995 // Num | Max | Op0 |
42996 // Op0 ----------------
42997 // NaN | Op1 | NaN |
42998 // ----------------
43000 // The SSE FP max/min instructions were not designed for this case, but rather
43002 // Min = Op1 < Op0 ? Op1 : Op0
43003 // Max = Op1 > Op0 ? Op1 : Op0
43005 // So they always return Op0 if either input is a NaN. However, we can still
43006 // use those instructions for fmaxnum by selecting away a NaN input.
43008 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
43009 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
43010 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
43012 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
43013 // are NaN, the NaN value of Op1 is the result.
43014 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
43017 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
43018 TargetLowering::DAGCombinerInfo &DCI) {
43019 EVT VT = N->getValueType(0);
43020 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43022 APInt KnownUndef, KnownZero;
43023 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
43024 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
43026 return SDValue(N, 0);
43028 // Convert a full vector load into vzload when not all bits are needed.
43029 SDValue In = N->getOperand(0);
43030 MVT InVT = In.getSimpleValueType();
43031 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
43032 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
43033 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
43034 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
43035 // Unless the load is volatile or atomic.
43036 if (LN->isSimple()) {
43038 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
43039 MVT MemVT = MVT::getIntegerVT(NumBits);
43040 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
43041 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43042 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
43044 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
43045 LN->getPointerInfo(),
43046 LN->getAlignment(),
43047 LN->getMemOperand()->getFlags());
43048 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
43049 DAG.getBitcast(InVT, VZLoad));
43050 DCI.CombineTo(N, Convert);
43051 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
43052 return SDValue(N, 0);
43059 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
43060 TargetLowering::DAGCombinerInfo &DCI) {
43061 // FIXME: Handle strict fp nodes.
43062 EVT VT = N->getValueType(0);
43064 // Convert a full vector load into vzload when not all bits are needed.
43065 SDValue In = N->getOperand(0);
43066 MVT InVT = In.getSimpleValueType();
43067 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
43068 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
43069 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
43070 LoadSDNode *LN = cast<LoadSDNode>(In);
43071 // Unless the load is volatile or atomic.
43072 if (LN->isSimple()) {
43074 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
43075 MVT MemVT = MVT::getFloatingPointVT(NumBits);
43076 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
43077 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43078 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
43080 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
43081 LN->getPointerInfo(),
43082 LN->getAlignment(),
43083 LN->getMemOperand()->getFlags());
43084 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
43085 DAG.getBitcast(InVT, VZLoad));
43086 DCI.CombineTo(N, Convert);
43087 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
43088 return SDValue(N, 0);
43095 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
43096 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
43097 TargetLowering::DAGCombinerInfo &DCI,
43098 const X86Subtarget &Subtarget) {
43099 MVT VT = N->getSimpleValueType(0);
43101 // ANDNP(0, x) -> x
43102 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
43103 return N->getOperand(1);
43105 // ANDNP(x, 0) -> 0
43106 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
43107 return DAG.getConstant(0, SDLoc(N), VT);
43109 // Turn ANDNP back to AND if input is inverted.
43110 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
43111 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
43114 // Attempt to recursively combine a bitmask ANDNP with shuffles.
43115 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
43117 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43124 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
43125 TargetLowering::DAGCombinerInfo &DCI) {
43126 SDValue N0 = N->getOperand(0);
43127 SDValue N1 = N->getOperand(1);
43129 // BT ignores high bits in the bit index operand.
43130 unsigned BitWidth = N1.getValueSizeInBits();
43131 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
43132 if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
43133 return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
43138 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
43139 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
43140 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
43142 EVT DstVT = N->getValueType(0);
43144 SDValue N0 = N->getOperand(0);
43145 SDValue N1 = N->getOperand(1);
43146 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
43148 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
43151 // Look through single use any_extends / truncs.
43152 SDValue IntermediateBitwidthOp;
43153 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
43155 IntermediateBitwidthOp = N0;
43156 N0 = N0.getOperand(0);
43159 // See if we have a single use cmov.
43160 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
43163 SDValue CMovOp0 = N0.getOperand(0);
43164 SDValue CMovOp1 = N0.getOperand(1);
43166 // Make sure both operands are constants.
43167 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
43168 !isa<ConstantSDNode>(CMovOp1.getNode()))
43173 // If we looked through an any_extend/trunc above, add one to the constants.
43174 if (IntermediateBitwidthOp) {
43175 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
43176 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
43177 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
43180 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
43181 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
43183 EVT CMovVT = DstVT;
43184 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
43185 if (DstVT == MVT::i16) {
43187 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
43188 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
43191 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
43192 N0.getOperand(2), N0.getOperand(3));
43194 if (CMovVT != DstVT)
43195 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
43200 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
43201 const X86Subtarget &Subtarget) {
43202 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
43204 if (SDValue V = combineSextInRegCmov(N, DAG))
43207 EVT VT = N->getValueType(0);
43208 SDValue N0 = N->getOperand(0);
43209 SDValue N1 = N->getOperand(1);
43210 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
43213 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
43214 // both SSE and AVX2 since there is no sign-extended shift right
43215 // operation on a vector with 64-bit elements.
43216 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
43217 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
43218 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
43219 N0.getOpcode() == ISD::SIGN_EXTEND)) {
43220 SDValue N00 = N0.getOperand(0);
43222 // EXTLOAD has a better solution on AVX2,
43223 // it may be replaced with X86ISD::VSEXT node.
43224 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
43225 if (!ISD::isNormalLoad(N00.getNode()))
43228 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
43229 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
43231 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
43237 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
43238 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
43239 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
43240 /// opportunities to combine math ops, use an LEA, or use a complex addressing
43241 /// mode. This can eliminate extend, add, and shift instructions.
43242 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
43243 const X86Subtarget &Subtarget) {
43244 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
43245 Ext->getOpcode() != ISD::ZERO_EXTEND)
43248 // TODO: This should be valid for other integer types.
43249 EVT VT = Ext->getValueType(0);
43250 if (VT != MVT::i64)
43253 SDValue Add = Ext->getOperand(0);
43254 if (Add.getOpcode() != ISD::ADD)
43257 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
43258 bool NSW = Add->getFlags().hasNoSignedWrap();
43259 bool NUW = Add->getFlags().hasNoUnsignedWrap();
43261 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
43263 if ((Sext && !NSW) || (!Sext && !NUW))
43266 // Having a constant operand to the 'add' ensures that we are not increasing
43267 // the instruction count because the constant is extended for free below.
43268 // A constant operand can also become the displacement field of an LEA.
43269 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
43273 // Don't make the 'add' bigger if there's no hope of combining it with some
43274 // other 'add' or 'shl' instruction.
43275 // TODO: It may be profitable to generate simpler LEA instructions in place
43276 // of single 'add' instructions, but the cost model for selecting an LEA
43277 // currently has a high threshold.
43278 bool HasLEAPotential = false;
43279 for (auto *User : Ext->uses()) {
43280 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
43281 HasLEAPotential = true;
43285 if (!HasLEAPotential)
43288 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
43289 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
43290 SDValue AddOp0 = Add.getOperand(0);
43291 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
43292 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
43294 // The wider add is guaranteed to not wrap because both operands are
43297 Flags.setNoSignedWrap(NSW);
43298 Flags.setNoUnsignedWrap(NUW);
43299 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
43302 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
43303 // operands and the result of CMOV is not used anywhere else - promote CMOV
43304 // itself instead of promoting its result. This could be beneficial, because:
43305 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
43306 // (or more) pseudo-CMOVs only when they go one-after-another and
43307 // getting rid of result extension code after CMOV will help that.
43308 // 2) Promotion of constant CMOV arguments is free, hence the
43309 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
43310 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
43311 // promotion is also good in terms of code-size.
43312 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
43314 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
43315 SDValue CMovN = Extend->getOperand(0);
43316 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
43319 EVT TargetVT = Extend->getValueType(0);
43320 unsigned ExtendOpcode = Extend->getOpcode();
43323 EVT VT = CMovN.getValueType();
43324 SDValue CMovOp0 = CMovN.getOperand(0);
43325 SDValue CMovOp1 = CMovN.getOperand(1);
43327 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
43328 !isa<ConstantSDNode>(CMovOp1.getNode()))
43331 // Only extend to i32 or i64.
43332 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
43335 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
43337 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
43340 // If this a zero extend to i64, we should only extend to i32 and use a free
43341 // zero extend to finish.
43342 EVT ExtendVT = TargetVT;
43343 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
43344 ExtendVT = MVT::i32;
43346 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
43347 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
43349 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
43350 CMovN.getOperand(2), CMovN.getOperand(3));
43352 // Finish extending if needed.
43353 if (ExtendVT != TargetVT)
43354 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
43359 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
43360 // This is more or less the reverse of combineBitcastvxi1.
43362 combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
43363 TargetLowering::DAGCombinerInfo &DCI,
43364 const X86Subtarget &Subtarget) {
43365 unsigned Opcode = N->getOpcode();
43366 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
43367 Opcode != ISD::ANY_EXTEND)
43369 if (!DCI.isBeforeLegalizeOps())
43371 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
43374 SDValue N0 = N->getOperand(0);
43375 EVT VT = N->getValueType(0);
43376 EVT SVT = VT.getScalarType();
43377 EVT InSVT = N0.getValueType().getScalarType();
43378 unsigned EltSizeInBits = SVT.getSizeInBits();
43380 // Input type must be extending a bool vector (bit-casted from a scalar
43381 // integer) to legal integer types.
43382 if (!VT.isVector())
43384 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
43386 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
43389 SDValue N00 = N0.getOperand(0);
43390 EVT SclVT = N0.getOperand(0).getValueType();
43391 if (!SclVT.isScalarInteger())
43396 SmallVector<int, 32> ShuffleMask;
43397 unsigned NumElts = VT.getVectorNumElements();
43398 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
43400 // Broadcast the scalar integer to the vector elements.
43401 if (NumElts > EltSizeInBits) {
43402 // If the scalar integer is greater than the vector element size, then we
43403 // must split it down into sub-sections for broadcasting. For example:
43404 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
43405 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
43406 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
43407 unsigned Scale = NumElts / EltSizeInBits;
43409 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
43410 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
43411 Vec = DAG.getBitcast(VT, Vec);
43413 for (unsigned i = 0; i != Scale; ++i)
43414 ShuffleMask.append(EltSizeInBits, i);
43416 // For smaller scalar integers, we can simply any-extend it to the vector
43417 // element size (we don't care about the upper bits) and broadcast it to all
43419 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
43420 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
43421 ShuffleMask.append(NumElts, 0);
43423 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
43425 // Now, mask the relevant bit in each element.
43426 SmallVector<SDValue, 32> Bits;
43427 for (unsigned i = 0; i != NumElts; ++i) {
43428 int BitIdx = (i % EltSizeInBits);
43429 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
43430 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
43432 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
43433 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
43435 // Compare against the bitmask and extend the result.
43436 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
43437 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
43438 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
43440 // For SEXT, this is now done, otherwise shift the result down for
43442 if (Opcode == ISD::SIGN_EXTEND)
43444 return DAG.getNode(ISD::SRL, DL, VT, Vec,
43445 DAG.getConstant(EltSizeInBits - 1, DL, VT));
43448 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
43450 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
43451 const X86Subtarget &Subtarget) {
43452 SDValue N0 = N->getOperand(0);
43453 EVT VT = N->getValueType(0);
43456 // Only do this combine with AVX512 for vector extends.
43457 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
43460 // Only combine legal element types.
43461 EVT SVT = VT.getVectorElementType();
43462 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
43463 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
43466 // We can only do this if the vector size in 256 bits or less.
43467 unsigned Size = VT.getSizeInBits();
43471 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
43472 // that's the only integer compares with we have.
43473 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
43474 if (ISD::isUnsignedIntSetCC(CC))
43477 // Only do this combine if the extension will be fully consumed by the setcc.
43478 EVT N00VT = N0.getOperand(0).getValueType();
43479 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
43480 if (Size != MatchingVecType.getSizeInBits())
43483 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
43485 if (N->getOpcode() == ISD::ZERO_EXTEND)
43486 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
43491 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
43492 TargetLowering::DAGCombinerInfo &DCI,
43493 const X86Subtarget &Subtarget) {
43494 SDValue N0 = N->getOperand(0);
43495 EVT VT = N->getValueType(0);
43496 EVT InVT = N0.getValueType();
43499 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
43502 if (!DCI.isBeforeLegalizeOps())
43505 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
43508 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
43509 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
43510 // Invert and sign-extend a boolean is the same as zero-extend and subtract
43511 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
43512 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
43513 // sext (xor Bool, -1) --> sub (zext Bool), 1
43514 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
43515 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
43518 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
43522 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
43525 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
43531 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
43532 TargetLowering::DAGCombinerInfo &DCI,
43533 const X86Subtarget &Subtarget) {
43535 EVT VT = N->getValueType(0);
43537 // Let legalize expand this if it isn't a legal type yet.
43538 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43539 if (!TLI.isTypeLegal(VT))
43542 EVT ScalarVT = VT.getScalarType();
43543 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
43546 SDValue A = N->getOperand(0);
43547 SDValue B = N->getOperand(1);
43548 SDValue C = N->getOperand(2);
43550 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
43551 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
43552 bool LegalOperations = !DCI.isBeforeLegalizeOps();
43553 if (TLI.isNegatibleForFree(V, DAG, LegalOperations, CodeSize) == 2) {
43554 V = TLI.getNegatedExpression(V, DAG, LegalOperations, CodeSize);
43557 // Look through extract_vector_elts. If it comes from an FNEG, create a
43558 // new extract from the FNEG input.
43559 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
43560 isNullConstant(V.getOperand(1))) {
43561 SDValue Vec = V.getOperand(0);
43562 if (TLI.isNegatibleForFree(Vec, DAG, LegalOperations, CodeSize) == 2) {
43564 TLI.getNegatedExpression(Vec, DAG, LegalOperations, CodeSize);
43565 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
43566 NegVal, V.getOperand(1));
43574 // Do not convert the passthru input of scalar intrinsics.
43575 // FIXME: We could allow negations of the lower element only.
43576 bool NegA = invertIfNegative(A);
43577 bool NegB = invertIfNegative(B);
43578 bool NegC = invertIfNegative(C);
43580 if (!NegA && !NegB && !NegC)
43583 unsigned NewOpcode =
43584 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
43586 if (N->getNumOperands() == 4)
43587 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
43588 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
43591 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
43592 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
43593 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
43594 TargetLowering::DAGCombinerInfo &DCI) {
43596 EVT VT = N->getValueType(0);
43597 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43598 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
43599 bool LegalOperations = !DCI.isBeforeLegalizeOps();
43601 SDValue N2 = N->getOperand(2);
43602 if (TLI.isNegatibleForFree(N2, DAG, LegalOperations, CodeSize) != 2)
43605 SDValue NegN2 = TLI.getNegatedExpression(N2, DAG, LegalOperations, CodeSize);
43606 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
43608 if (N->getNumOperands() == 4)
43609 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
43610 NegN2, N->getOperand(3));
43611 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
43615 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
43616 TargetLowering::DAGCombinerInfo &DCI,
43617 const X86Subtarget &Subtarget) {
43618 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
43619 // (and (i32 x86isd::setcc_carry), 1)
43620 // This eliminates the zext. This transformation is necessary because
43621 // ISD::SETCC is always legalized to i8.
43623 SDValue N0 = N->getOperand(0);
43624 EVT VT = N->getValueType(0);
43626 if (N0.getOpcode() == ISD::AND &&
43628 N0.getOperand(0).hasOneUse()) {
43629 SDValue N00 = N0.getOperand(0);
43630 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
43631 if (!isOneConstant(N0.getOperand(1)))
43633 return DAG.getNode(ISD::AND, dl, VT,
43634 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
43635 N00.getOperand(0), N00.getOperand(1)),
43636 DAG.getConstant(1, dl, VT));
43640 if (N0.getOpcode() == ISD::TRUNCATE &&
43642 N0.getOperand(0).hasOneUse()) {
43643 SDValue N00 = N0.getOperand(0);
43644 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
43645 return DAG.getNode(ISD::AND, dl, VT,
43646 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
43647 N00.getOperand(0), N00.getOperand(1)),
43648 DAG.getConstant(1, dl, VT));
43652 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
43655 if (DCI.isBeforeLegalizeOps())
43656 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
43659 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
43663 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
43666 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
43669 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
43672 // TODO: Combine with any target/faux shuffle.
43673 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
43674 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
43675 SDValue N00 = N0.getOperand(0);
43676 SDValue N01 = N0.getOperand(1);
43677 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
43678 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
43679 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
43680 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
43681 return concatSubVectors(N00, N01, DAG, dl);
43688 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
43689 /// recognizable memcmp expansion.
43690 static bool isOrXorXorTree(SDValue X, bool Root = true) {
43691 if (X.getOpcode() == ISD::OR)
43692 return isOrXorXorTree(X.getOperand(0), false) &&
43693 isOrXorXorTree(X.getOperand(1), false);
43696 return X.getOpcode() == ISD::XOR;
43699 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
43701 template<typename F>
43702 static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
43703 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
43704 SDValue Op0 = X.getOperand(0);
43705 SDValue Op1 = X.getOperand(1);
43706 if (X.getOpcode() == ISD::OR) {
43707 SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
43708 SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
43709 if (VecVT != CmpVT)
43710 return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
43712 return DAG.getNode(ISD::OR, DL, VecVT, A, B);
43713 return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
43714 } else if (X.getOpcode() == ISD::XOR) {
43715 SDValue A = SToV(Op0);
43716 SDValue B = SToV(Op1);
43717 if (VecVT != CmpVT)
43718 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
43720 return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
43721 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
43723 llvm_unreachable("Impossible");
43726 /// Try to map a 128-bit or larger integer comparison to vector instructions
43727 /// before type legalization splits it up into chunks.
43728 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
43729 const X86Subtarget &Subtarget) {
43730 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
43731 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
43733 // We're looking for an oversized integer equality comparison.
43734 SDValue X = SetCC->getOperand(0);
43735 SDValue Y = SetCC->getOperand(1);
43736 EVT OpVT = X.getValueType();
43737 unsigned OpSize = OpVT.getSizeInBits();
43738 if (!OpVT.isScalarInteger() || OpSize < 128)
43741 // Ignore a comparison with zero because that gets special treatment in
43742 // EmitTest(). But make an exception for the special case of a pair of
43743 // logically-combined vector-sized operands compared to zero. This pattern may
43744 // be generated by the memcmp expansion pass with oversized integer compares
43746 bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
43747 if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
43750 // Don't perform this combine if constructing the vector will be expensive.
43751 auto IsVectorBitCastCheap = [](SDValue X) {
43752 X = peekThroughBitcasts(X);
43753 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
43754 X.getOpcode() == ISD::LOAD;
43756 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
43757 !IsOrXorXorTreeCCZero)
43760 EVT VT = SetCC->getValueType(0);
43762 bool HasAVX = Subtarget.hasAVX();
43764 // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
43765 // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
43766 // Otherwise use PCMPEQ (plus AND) and mask testing.
43767 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
43768 (OpSize == 256 && HasAVX) ||
43769 (OpSize == 512 && Subtarget.useAVX512Regs())) {
43770 bool HasPT = Subtarget.hasSSE41();
43772 // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
43773 // vector registers are essentially free. (Technically, widening registers
43774 // prevents load folding, but the tradeoff is worth it.)
43775 bool PreferKOT = Subtarget.preferMaskRegisters();
43776 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
43778 EVT VecVT = MVT::v16i8;
43779 EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
43780 if (OpSize == 256) {
43781 VecVT = MVT::v32i8;
43782 CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
43784 EVT CastVT = VecVT;
43785 bool NeedsAVX512FCast = false;
43786 if (OpSize == 512 || NeedZExt) {
43787 if (Subtarget.hasBWI()) {
43788 VecVT = MVT::v64i8;
43789 CmpVT = MVT::v64i1;
43793 VecVT = MVT::v16i32;
43794 CmpVT = MVT::v16i1;
43795 CastVT = OpSize == 512 ? VecVT :
43796 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
43797 NeedsAVX512FCast = true;
43801 auto ScalarToVector = [&](SDValue X) -> SDValue {
43802 bool TmpZext = false;
43803 EVT TmpCastVT = CastVT;
43804 if (X.getOpcode() == ISD::ZERO_EXTEND) {
43805 SDValue OrigX = X.getOperand(0);
43806 unsigned OrigSize = OrigX.getScalarValueSizeInBits();
43807 if (OrigSize < OpSize) {
43808 if (OrigSize == 128) {
43809 TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
43812 } else if (OrigSize == 256) {
43813 TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
43819 X = DAG.getBitcast(TmpCastVT, X);
43820 if (!NeedZExt && !TmpZext)
43822 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43823 MVT VecIdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
43824 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
43825 DAG.getConstant(0, DL, VecVT), X,
43826 DAG.getConstant(0, DL, VecIdxVT));
43830 if (IsOrXorXorTreeCCZero) {
43831 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
43832 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
43833 // Use 2 vector equality compares and 'and' the results before doing a
43835 Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
43837 SDValue VecX = ScalarToVector(X);
43838 SDValue VecY = ScalarToVector(Y);
43839 if (VecVT != CmpVT) {
43840 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
43841 } else if (HasPT) {
43842 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
43844 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
43847 // AVX512 should emit a setcc that will lower to kortest.
43848 if (VecVT != CmpVT) {
43849 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
43850 CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
43851 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
43852 DAG.getConstant(0, DL, KRegVT), CC);
43855 SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
43857 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
43858 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
43859 SDValue SetCC = getSETCC(X86CC, PT, DL, DAG);
43860 return DAG.getNode(ISD::TRUNCATE, DL, VT, SetCC.getValue(0));
43862 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
43863 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
43864 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
43865 // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
43866 // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
43867 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
43868 SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
43870 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
43876 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
43877 const X86Subtarget &Subtarget) {
43878 const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
43879 const SDValue LHS = N->getOperand(0);
43880 const SDValue RHS = N->getOperand(1);
43881 EVT VT = N->getValueType(0);
43882 EVT OpVT = LHS.getValueType();
43885 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
43886 // 0-x == y --> x+y == 0
43887 // 0-x != y --> x+y != 0
43888 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
43890 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
43891 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
43893 // x == 0-y --> x+y == 0
43894 // x != 0-y --> x+y != 0
43895 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
43897 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
43898 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
43901 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
43905 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43906 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
43907 // Using temporaries to avoid messing up operand ordering for later
43908 // transformations if this doesn't work.
43911 ISD::CondCode TmpCC = CC;
43912 // Put build_vector on the right.
43913 if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
43914 std::swap(Op0, Op1);
43915 TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
43919 (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
43920 (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
43921 bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
43923 if (IsSEXT0 && IsVZero1) {
43924 assert(VT == Op0.getOperand(0).getValueType() &&
43925 "Uexpected operand type");
43926 if (TmpCC == ISD::SETGT)
43927 return DAG.getConstant(0, DL, VT);
43928 if (TmpCC == ISD::SETLE)
43929 return DAG.getConstant(1, DL, VT);
43930 if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
43931 return DAG.getNOT(DL, Op0.getOperand(0), VT);
43933 assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
43934 "Unexpected condition code!");
43935 return Op0.getOperand(0);
43939 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
43940 // pre-promote its result type since vXi1 vectors don't get promoted
43941 // during type legalization.
43942 // NOTE: The element count check is to ignore operand types that need to
43943 // go through type promotion to a 128-bit vector.
43944 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
43945 VT.getVectorElementType() == MVT::i1 &&
43946 (OpVT.getVectorElementType() == MVT::i8 ||
43947 OpVT.getVectorElementType() == MVT::i16)) {
43948 SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
43949 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
43952 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
43953 // to avoid scalarization via legalization because v4i32 is not a legal type.
43954 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
43955 LHS.getValueType() == MVT::v4f32)
43956 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
43961 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
43962 TargetLowering::DAGCombinerInfo &DCI,
43963 const X86Subtarget &Subtarget) {
43964 SDValue Src = N->getOperand(0);
43965 MVT SrcVT = Src.getSimpleValueType();
43966 MVT VT = N->getSimpleValueType(0);
43967 unsigned NumBits = VT.getScalarSizeInBits();
43968 unsigned NumElts = SrcVT.getVectorNumElements();
43970 // Perform constant folding.
43971 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
43972 assert(VT == MVT::i32 && "Unexpected result type");
43974 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
43975 if (!Src.getOperand(Idx).isUndef() &&
43976 Src.getConstantOperandAPInt(Idx).isNegative())
43979 return DAG.getConstant(Imm, SDLoc(N), VT);
43982 // Look through int->fp bitcasts that don't change the element width.
43983 unsigned EltWidth = SrcVT.getScalarSizeInBits();
43984 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
43985 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
43986 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
43988 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
43989 // with scalar comparisons.
43990 if (SDValue NotSrc = IsNOT(Src, DAG)) {
43992 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
43993 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
43994 return DAG.getNode(ISD::XOR, DL, VT,
43995 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
43996 DAG.getConstant(NotMask, DL, VT));
43999 // Simplify the inputs.
44000 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44001 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
44002 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
44003 return SDValue(N, 0);
44008 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
44009 TargetLowering::DAGCombinerInfo &DCI) {
44010 // With vector masks we only demand the upper bit of the mask.
44011 SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
44012 if (Mask.getScalarValueSizeInBits() != 1) {
44013 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44014 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
44015 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
44016 return SDValue(N, 0);
44022 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
44023 TargetLowering::DAGCombinerInfo &DCI) {
44025 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
44026 SDValue Chain = GorS->getChain();
44027 SDValue Index = GorS->getIndex();
44028 SDValue Mask = GorS->getMask();
44029 SDValue Base = GorS->getBasePtr();
44030 SDValue Scale = GorS->getScale();
44032 if (DCI.isBeforeLegalize()) {
44033 unsigned IndexWidth = Index.getScalarValueSizeInBits();
44035 // Shrink constant indices if they are larger than 32-bits.
44036 // Only do this before legalize types since v2i64 could become v2i32.
44037 // FIXME: We could check that the type is legal if we're after legalize
44038 // types, but then we would need to construct test cases where that happens.
44039 // FIXME: We could support more than just constant vectors, but we need to
44040 // careful with costing. A truncate that can be optimized out would be fine.
44041 // Otherwise we might only want to create a truncate if it avoids a split.
44042 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
44043 if (BV->isConstant() && IndexWidth > 32 &&
44044 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
44045 unsigned NumElts = Index.getValueType().getVectorNumElements();
44046 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
44047 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
44048 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44049 SDValue Ops[] = { Chain, Gather->getPassThru(),
44050 Mask, Base, Index, Scale } ;
44051 return DAG.getMaskedGather(Gather->getVTList(),
44052 Gather->getMemoryVT(), DL, Ops,
44053 Gather->getMemOperand(),
44054 Gather->getIndexType());
44056 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44057 SDValue Ops[] = { Chain, Scatter->getValue(),
44058 Mask, Base, Index, Scale };
44059 return DAG.getMaskedScatter(Scatter->getVTList(),
44060 Scatter->getMemoryVT(), DL,
44061 Ops, Scatter->getMemOperand(),
44062 Scatter->getIndexType());
44066 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
44067 // there are sufficient sign bits. Only do this before legalize types to
44068 // avoid creating illegal types in truncate.
44069 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
44070 Index.getOpcode() == ISD::ZERO_EXTEND) &&
44072 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
44073 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
44074 unsigned NumElts = Index.getValueType().getVectorNumElements();
44075 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
44076 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
44077 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44078 SDValue Ops[] = { Chain, Gather->getPassThru(),
44079 Mask, Base, Index, Scale } ;
44080 return DAG.getMaskedGather(Gather->getVTList(),
44081 Gather->getMemoryVT(), DL, Ops,
44082 Gather->getMemOperand(),
44083 Gather->getIndexType());
44085 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44086 SDValue Ops[] = { Chain, Scatter->getValue(),
44087 Mask, Base, Index, Scale };
44088 return DAG.getMaskedScatter(Scatter->getVTList(),
44089 Scatter->getMemoryVT(), DL,
44090 Ops, Scatter->getMemOperand(),
44091 Scatter->getIndexType());
44095 if (DCI.isBeforeLegalizeOps()) {
44096 unsigned IndexWidth = Index.getScalarValueSizeInBits();
44098 // Make sure the index is either i32 or i64
44099 if (IndexWidth != 32 && IndexWidth != 64) {
44100 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
44101 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
44102 Index.getValueType().getVectorNumElements());
44103 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
44104 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44105 SDValue Ops[] = { Chain, Gather->getPassThru(),
44106 Mask, Base, Index, Scale } ;
44107 return DAG.getMaskedGather(Gather->getVTList(),
44108 Gather->getMemoryVT(), DL, Ops,
44109 Gather->getMemOperand(),
44110 Gather->getIndexType());
44112 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44113 SDValue Ops[] = { Chain, Scatter->getValue(),
44114 Mask, Base, Index, Scale };
44115 return DAG.getMaskedScatter(Scatter->getVTList(),
44116 Scatter->getMemoryVT(), DL,
44117 Ops, Scatter->getMemOperand(),
44118 Scatter->getIndexType());
44122 // With vector masks we only demand the upper bit of the mask.
44123 if (Mask.getScalarValueSizeInBits() != 1) {
44124 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44125 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
44126 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
44127 return SDValue(N, 0);
44133 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
44134 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
44135 const X86Subtarget &Subtarget) {
44137 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
44138 SDValue EFLAGS = N->getOperand(1);
44140 // Try to simplify the EFLAGS and condition code operands.
44141 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
44142 return getSETCC(CC, Flags, DL, DAG);
44147 /// Optimize branch condition evaluation.
44148 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
44149 const X86Subtarget &Subtarget) {
44151 SDValue EFLAGS = N->getOperand(3);
44152 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
44154 // Try to simplify the EFLAGS and condition code operands.
44155 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
44156 // RAUW them under us.
44157 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
44158 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
44159 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
44160 N->getOperand(1), Cond, Flags);
44166 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
44167 SelectionDAG &DAG) {
44168 // Take advantage of vector comparisons producing 0 or -1 in each lane to
44169 // optimize away operation when it's from a constant.
44171 // The general transformation is:
44172 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
44173 // AND(VECTOR_CMP(x,y), constant2)
44174 // constant2 = UNARYOP(constant)
44176 // Early exit if this isn't a vector operation, the operand of the
44177 // unary operation isn't a bitwise AND, or if the sizes of the operations
44178 // aren't the same.
44179 EVT VT = N->getValueType(0);
44180 bool IsStrict = N->isStrictFPOpcode();
44181 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44182 if (!VT.isVector() || Op0->getOpcode() != ISD::AND ||
44183 Op0->getOperand(0)->getOpcode() != ISD::SETCC ||
44184 VT.getSizeInBits() != Op0.getValueSizeInBits())
44187 // Now check that the other operand of the AND is a constant. We could
44188 // make the transformation for non-constant splats as well, but it's unclear
44189 // that would be a benefit as it would not eliminate any operations, just
44190 // perform one more step in scalar code before moving to the vector unit.
44191 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
44192 // Bail out if the vector isn't a constant.
44193 if (!BV->isConstant())
44196 // Everything checks out. Build up the new and improved node.
44198 EVT IntVT = BV->getValueType(0);
44199 // Create a new constant of the appropriate type for the transformed
44201 SDValue SourceConst;
44203 SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
44204 {N->getOperand(0), SDValue(BV, 0)});
44206 SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
44207 // The AND node needs bitcasts to/from an integer vector type around it.
44208 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
44209 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
44211 SDValue Res = DAG.getBitcast(VT, NewAnd);
44213 return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
44220 /// If we are converting a value to floating-point, try to replace scalar
44221 /// truncate of an extracted vector element with a bitcast. This tries to keep
44222 /// the sequence on XMM registers rather than moving between vector and GPRs.
44223 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
44224 // TODO: This is currently only used by combineSIntToFP, but it is generalized
44225 // to allow being called by any similar cast opcode.
44226 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
44227 SDValue Trunc = N->getOperand(0);
44228 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
44231 SDValue ExtElt = Trunc.getOperand(0);
44232 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44233 !isNullConstant(ExtElt.getOperand(1)))
44236 EVT TruncVT = Trunc.getValueType();
44237 EVT SrcVT = ExtElt.getValueType();
44238 unsigned DestWidth = TruncVT.getSizeInBits();
44239 unsigned SrcWidth = SrcVT.getSizeInBits();
44240 if (SrcWidth % DestWidth != 0)
44243 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
44244 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
44245 unsigned VecWidth = SrcVecVT.getSizeInBits();
44246 unsigned NumElts = VecWidth / DestWidth;
44247 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
44248 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
44250 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
44251 BitcastVec, ExtElt.getOperand(1));
44252 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
44255 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
44256 const X86Subtarget &Subtarget) {
44257 bool IsStrict = N->isStrictFPOpcode();
44258 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44259 EVT VT = N->getValueType(0);
44260 EVT InVT = Op0.getValueType();
44262 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
44263 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
44264 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
44265 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
44267 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44268 InVT.getVectorNumElements());
44269 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
44271 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
44273 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44274 {N->getOperand(0), P});
44275 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
44278 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
44279 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
44280 // the optimization here.
44281 if (DAG.SignBitIsZero(Op0)) {
44283 return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
44284 {N->getOperand(0), Op0});
44285 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
44291 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
44292 TargetLowering::DAGCombinerInfo &DCI,
44293 const X86Subtarget &Subtarget) {
44294 // First try to optimize away the conversion entirely when it's
44295 // conditionally from a constant. Vectors only.
44296 bool IsStrict = N->isStrictFPOpcode();
44297 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
44300 // Now move on to more general possibilities.
44301 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44302 EVT VT = N->getValueType(0);
44303 EVT InVT = Op0.getValueType();
44305 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
44306 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
44307 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
44308 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
44310 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44311 InVT.getVectorNumElements());
44312 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
44314 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44315 {N->getOperand(0), P});
44316 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
44319 // Without AVX512DQ we only support i64 to float scalar conversion. For both
44320 // vectors and scalars, see if we know that the upper bits are all the sign
44321 // bit, in which case we can truncate the input to i32 and convert from that.
44322 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
44323 unsigned BitWidth = InVT.getScalarSizeInBits();
44324 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
44325 if (NumSignBits >= (BitWidth - 31)) {
44326 EVT TruncVT = MVT::i32;
44327 if (InVT.isVector())
44328 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
44329 InVT.getVectorNumElements());
44331 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
44332 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
44334 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44335 {N->getOperand(0), Trunc});
44336 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
44338 // If we're after legalize and the type is v2i32 we need to shuffle and
44340 assert(InVT == MVT::v2i64 && "Unexpected VT!");
44341 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
44342 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
44345 return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
44346 {N->getOperand(0), Shuf});
44347 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
44351 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
44352 // a 32-bit target where SSE doesn't support i64->FP operations.
44353 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
44354 Op0.getOpcode() == ISD::LOAD) {
44355 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
44356 EVT LdVT = Ld->getValueType(0);
44358 // This transformation is not supported if the result type is f16 or f128.
44359 if (VT == MVT::f16 || VT == MVT::f128)
44362 // If we have AVX512DQ we can use packed conversion instructions unless
44364 if (Subtarget.hasDQI() && VT != MVT::f80)
44367 if (Ld->isSimple() && !VT.isVector() &&
44368 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
44369 !Subtarget.is64Bit() && LdVT == MVT::i64) {
44370 std::pair<SDValue, SDValue> Tmp = Subtarget.getTargetLowering()->BuildFILD(
44371 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
44372 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
44380 if (SDValue V = combineToFPTruncExtElt(N, DAG))
44386 static bool needCarryOrOverflowFlag(SDValue Flags) {
44387 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
44389 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
44391 SDNode *User = *UI;
44394 switch (User->getOpcode()) {
44396 // Be conservative.
44398 case X86ISD::SETCC:
44399 case X86ISD::SETCC_CARRY:
44400 CC = (X86::CondCode)User->getConstantOperandVal(0);
44402 case X86ISD::BRCOND:
44403 CC = (X86::CondCode)User->getConstantOperandVal(2);
44406 CC = (X86::CondCode)User->getConstantOperandVal(2);
44412 case X86::COND_A: case X86::COND_AE:
44413 case X86::COND_B: case X86::COND_BE:
44414 case X86::COND_O: case X86::COND_NO:
44415 case X86::COND_G: case X86::COND_GE:
44416 case X86::COND_L: case X86::COND_LE:
44424 static bool onlyZeroFlagUsed(SDValue Flags) {
44425 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
44427 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
44429 SDNode *User = *UI;
44432 switch (User->getOpcode()) {
44434 // Be conservative.
44436 case X86ISD::SETCC: CCOpNo = 0; break;
44437 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
44438 case X86ISD::BRCOND: CCOpNo = 2; break;
44439 case X86ISD::CMOV: CCOpNo = 2; break;
44442 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
44443 if (CC != X86::COND_E && CC != X86::COND_NE)
44450 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
44451 // Only handle test patterns.
44452 if (!isNullConstant(N->getOperand(1)))
44455 // If we have a CMP of a truncated binop, see if we can make a smaller binop
44456 // and use its flags directly.
44457 // TODO: Maybe we should try promoting compares that only use the zero flag
44458 // first if we can prove the upper bits with computeKnownBits?
44460 SDValue Op = N->getOperand(0);
44461 EVT VT = Op.getValueType();
44463 // If we have a constant logical shift that's only used in a comparison
44464 // against zero turn it into an equivalent AND. This allows turning it into
44465 // a TEST instruction later.
44466 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
44467 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
44468 onlyZeroFlagUsed(SDValue(N, 0))) {
44469 unsigned BitWidth = VT.getSizeInBits();
44470 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
44471 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
44472 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
44473 APInt Mask = Op.getOpcode() == ISD::SRL
44474 ? APInt::getHighBitsSet(BitWidth, MaskBits)
44475 : APInt::getLowBitsSet(BitWidth, MaskBits);
44476 if (Mask.isSignedIntN(32)) {
44477 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
44478 DAG.getConstant(Mask, dl, VT));
44479 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
44480 DAG.getConstant(0, dl, VT));
44485 // Look for a truncate with a single use.
44486 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
44489 Op = Op.getOperand(0);
44491 // Arithmetic op can only have one use.
44492 if (!Op.hasOneUse())
44496 switch (Op.getOpcode()) {
44497 default: return SDValue();
44499 // Skip and with constant. We have special handling for and with immediate
44500 // during isel to generate test instructions.
44501 if (isa<ConstantSDNode>(Op.getOperand(1)))
44503 NewOpc = X86ISD::AND;
44505 case ISD::OR: NewOpc = X86ISD::OR; break;
44506 case ISD::XOR: NewOpc = X86ISD::XOR; break;
44508 // If the carry or overflow flag is used, we can't truncate.
44509 if (needCarryOrOverflowFlag(SDValue(N, 0)))
44511 NewOpc = X86ISD::ADD;
44514 // If the carry or overflow flag is used, we can't truncate.
44515 if (needCarryOrOverflowFlag(SDValue(N, 0)))
44517 NewOpc = X86ISD::SUB;
44521 // We found an op we can narrow. Truncate its inputs.
44522 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
44523 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
44525 // Use a X86 specific opcode to avoid DAG combine messing with it.
44526 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44527 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
44529 // For AND, keep a CMP so that we can match the test pattern.
44530 if (NewOpc == X86ISD::AND)
44531 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
44532 DAG.getConstant(0, dl, VT));
44534 // Return the flags.
44535 return Op.getValue(1);
44538 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
44539 TargetLowering::DAGCombinerInfo &DCI) {
44540 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
44541 "Expected X86ISD::ADD or X86ISD::SUB");
44544 SDValue LHS = N->getOperand(0);
44545 SDValue RHS = N->getOperand(1);
44546 MVT VT = LHS.getSimpleValueType();
44547 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
44549 // If we don't use the flag result, simplify back to a generic ADD/SUB.
44550 if (!N->hasAnyUseOfValue(1)) {
44551 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
44552 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
44555 // Fold any similar generic ADD/SUB opcodes to reuse this node.
44556 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
44557 SDValue Ops[] = {N0, N1};
44558 SDVTList VTs = DAG.getVTList(N->getValueType(0));
44559 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
44562 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
44563 DCI.CombineTo(GenericAddSub, Op);
44566 MatchGeneric(LHS, RHS, false);
44567 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
44572 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
44573 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
44574 MVT VT = N->getSimpleValueType(0);
44575 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44576 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
44577 N->getOperand(0), N->getOperand(1),
44581 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
44582 // iff the flag result is dead.
44583 SDValue Op0 = N->getOperand(0);
44584 SDValue Op1 = N->getOperand(1);
44585 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
44586 !N->hasAnyUseOfValue(1))
44587 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
44588 Op0.getOperand(1), N->getOperand(2));
44593 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
44594 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
44595 TargetLowering::DAGCombinerInfo &DCI) {
44596 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
44597 // the result is either zero or one (depending on the input carry bit).
44598 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
44599 if (X86::isZeroNode(N->getOperand(0)) &&
44600 X86::isZeroNode(N->getOperand(1)) &&
44601 // We don't have a good way to replace an EFLAGS use, so only do this when
44603 SDValue(N, 1).use_empty()) {
44605 EVT VT = N->getValueType(0);
44606 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
44608 DAG.getNode(ISD::AND, DL, VT,
44609 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44610 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44612 DAG.getConstant(1, DL, VT));
44613 return DCI.CombineTo(N, Res1, CarryOut);
44616 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
44617 MVT VT = N->getSimpleValueType(0);
44618 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44619 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
44620 N->getOperand(0), N->getOperand(1),
44627 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
44628 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
44629 /// with CMP+{ADC, SBB}.
44630 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
44631 bool IsSub = N->getOpcode() == ISD::SUB;
44632 SDValue X = N->getOperand(0);
44633 SDValue Y = N->getOperand(1);
44635 // If this is an add, canonicalize a zext operand to the RHS.
44636 // TODO: Incomplete? What if both sides are zexts?
44637 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
44638 Y.getOpcode() != ISD::ZERO_EXTEND)
44641 // Look through a one-use zext.
44642 bool PeekedThroughZext = false;
44643 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
44644 Y = Y.getOperand(0);
44645 PeekedThroughZext = true;
44648 // If this is an add, canonicalize a setcc operand to the RHS.
44649 // TODO: Incomplete? What if both sides are setcc?
44650 // TODO: Should we allow peeking through a zext of the other operand?
44651 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
44652 Y.getOpcode() != X86ISD::SETCC)
44655 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
44659 EVT VT = N->getValueType(0);
44660 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
44662 // If X is -1 or 0, then we have an opportunity to avoid constants required in
44663 // the general case below.
44664 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
44666 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
44667 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
44668 // This is a complicated way to get -1 or 0 from the carry flag:
44669 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
44670 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
44671 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44672 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44676 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
44677 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
44678 SDValue EFLAGS = Y->getOperand(1);
44679 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
44680 EFLAGS.getValueType().isInteger() &&
44681 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
44682 // Swap the operands of a SUB, and we have the same pattern as above.
44683 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
44684 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
44685 SDValue NewSub = DAG.getNode(
44686 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
44687 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
44688 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
44689 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44690 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44696 if (CC == X86::COND_B) {
44697 // X + SETB Z --> adc X, 0
44698 // X - SETB Z --> sbb X, 0
44699 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
44700 DAG.getVTList(VT, MVT::i32), X,
44701 DAG.getConstant(0, DL, VT), Y.getOperand(1));
44704 if (CC == X86::COND_A) {
44705 SDValue EFLAGS = Y->getOperand(1);
44706 // Try to convert COND_A into COND_B in an attempt to facilitate
44707 // materializing "setb reg".
44709 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
44710 // cannot take an immediate as its first operand.
44712 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
44713 EFLAGS.getValueType().isInteger() &&
44714 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
44715 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
44716 EFLAGS.getNode()->getVTList(),
44717 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
44718 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
44719 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
44720 DAG.getVTList(VT, MVT::i32), X,
44721 DAG.getConstant(0, DL, VT), NewEFLAGS);
44725 if (CC != X86::COND_E && CC != X86::COND_NE)
44728 SDValue Cmp = Y.getOperand(1);
44729 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
44730 !X86::isZeroNode(Cmp.getOperand(1)) ||
44731 !Cmp.getOperand(0).getValueType().isInteger())
44734 SDValue Z = Cmp.getOperand(0);
44735 EVT ZVT = Z.getValueType();
44737 // If X is -1 or 0, then we have an opportunity to avoid constants required in
44738 // the general case below.
44740 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
44742 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
44743 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
44744 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
44745 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
44746 SDValue Zero = DAG.getConstant(0, DL, ZVT);
44747 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
44748 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
44749 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44750 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44751 SDValue(Neg.getNode(), 1));
44754 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
44755 // with fake operands:
44756 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
44757 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
44758 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
44759 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
44760 SDValue One = DAG.getConstant(1, DL, ZVT);
44761 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
44762 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44763 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
44767 // (cmp Z, 1) sets the carry flag if Z is 0.
44768 SDValue One = DAG.getConstant(1, DL, ZVT);
44769 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
44771 // Add the flags type for ADC/SBB nodes.
44772 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44774 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
44775 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
44776 if (CC == X86::COND_NE)
44777 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
44778 DAG.getConstant(-1ULL, DL, VT), Cmp1);
44780 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
44781 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
44782 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
44783 DAG.getConstant(0, DL, VT), Cmp1);
44786 static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
44787 const X86Subtarget &Subtarget) {
44788 if (!Subtarget.hasSSE2())
44791 EVT VT = N->getValueType(0);
44793 // If the vector size is less than 128, or greater than the supported RegSize,
44794 // do not use PMADD.
44795 if (!VT.isVector() || VT.getVectorNumElements() < 8)
44798 SDValue Op0 = N->getOperand(0);
44799 SDValue Op1 = N->getOperand(1);
44801 auto UsePMADDWD = [&](SDValue Op) {
44803 return Op.getOpcode() == ISD::MUL &&
44804 canReduceVMulWidth(Op.getNode(), DAG, Mode) &&
44805 Mode != ShrinkMode::MULU16 &&
44806 (!Subtarget.hasSSE41() ||
44807 (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
44808 Op->isOnlyUserOf(Op.getOperand(1).getNode())));
44811 SDValue MulOp, OtherOp;
44812 if (UsePMADDWD(Op0)) {
44815 } else if (UsePMADDWD(Op1)) {
44822 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
44823 VT.getVectorNumElements());
44824 EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44825 VT.getVectorNumElements() / 2);
44827 // Shrink the operands of mul.
44828 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
44829 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
44831 // Madd vector size is half of the original vector size
44832 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44833 ArrayRef<SDValue> Ops) {
44834 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
44835 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
44837 SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
44839 // Fill the rest of the output with 0
44840 SDValue Zero = DAG.getConstant(0, DL, Madd.getSimpleValueType());
44841 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
44843 // Preserve the reduction flag on the ADD. We may need to revisit for the
44846 Flags.setVectorReduction(true);
44847 return DAG.getNode(ISD::ADD, DL, VT, Concat, OtherOp, Flags);
44850 static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
44851 const X86Subtarget &Subtarget) {
44852 if (!Subtarget.hasSSE2())
44856 EVT VT = N->getValueType(0);
44858 // TODO: There's nothing special about i32, any integer type above i16 should
44859 // work just as well.
44860 if (!VT.isVector() || !VT.isSimple() ||
44861 !(VT.getVectorElementType() == MVT::i32))
44864 unsigned RegSize = 128;
44865 if (Subtarget.useBWIRegs())
44867 else if (Subtarget.hasAVX())
44870 // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
44871 // TODO: We should be able to handle larger vectors by splitting them before
44872 // feeding them into several SADs, and then reducing over those.
44873 if (VT.getSizeInBits() / 4 > RegSize)
44876 // We know N is a reduction add. To match SAD, we need one of the operands to
44878 SDValue AbsOp = N->getOperand(0);
44879 SDValue OtherOp = N->getOperand(1);
44880 if (AbsOp.getOpcode() != ISD::ABS)
44881 std::swap(AbsOp, OtherOp);
44882 if (AbsOp.getOpcode() != ISD::ABS)
44885 // Check whether we have an abs-diff pattern feeding into the select.
44886 SDValue SadOp0, SadOp1;
44887 if(!detectZextAbsDiff(AbsOp, SadOp0, SadOp1))
44890 // SAD pattern detected. Now build a SAD instruction and an addition for
44891 // reduction. Note that the number of elements of the result of SAD is less
44892 // than the number of elements of its input. Therefore, we could only update
44893 // part of elements in the reduction vector.
44894 SDValue Sad = createPSADBW(DAG, SadOp0, SadOp1, DL, Subtarget);
44896 // The output of PSADBW is a vector of i64.
44897 // We need to turn the vector of i64 into a vector of i32.
44898 // If the reduction vector is at least as wide as the psadbw result, just
44899 // bitcast. If it's narrower which can only occur for v2i32, bits 127:16 of
44900 // the PSADBW will be zero. If we promote/ narrow vectors, truncate the v2i64
44901 // result to v2i32 which will be removed by type legalization. If we/ widen
44902 // narrow vectors then we bitcast to v4i32 and extract v2i32.
44903 MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
44904 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
44906 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
44907 // Fill the upper elements with zero to match the add width.
44908 assert(VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs");
44909 unsigned NumConcats = VT.getSizeInBits() / ResVT.getSizeInBits();
44910 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, DL, ResVT));
44912 Sad = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
44913 } else if (VT.getSizeInBits() < ResVT.getSizeInBits()) {
44914 Sad = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Sad,
44915 DAG.getIntPtrConstant(0, DL));
44918 // Preserve the reduction flag on the ADD. We may need to revisit for the
44921 Flags.setVectorReduction(true);
44922 return DAG.getNode(ISD::ADD, DL, VT, Sad, OtherOp, Flags);
44925 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
44926 const SDLoc &DL, EVT VT,
44927 const X86Subtarget &Subtarget) {
44928 // Example of pattern we try to detect:
44929 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
44930 //(add (build_vector (extract_elt t, 0),
44931 // (extract_elt t, 2),
44932 // (extract_elt t, 4),
44933 // (extract_elt t, 6)),
44934 // (build_vector (extract_elt t, 1),
44935 // (extract_elt t, 3),
44936 // (extract_elt t, 5),
44937 // (extract_elt t, 7)))
44939 if (!Subtarget.hasSSE2())
44942 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
44943 Op1.getOpcode() != ISD::BUILD_VECTOR)
44946 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
44947 VT.getVectorNumElements() < 4 ||
44948 !isPowerOf2_32(VT.getVectorNumElements()))
44951 // Check if one of Op0,Op1 is of the form:
44952 // (build_vector (extract_elt Mul, 0),
44953 // (extract_elt Mul, 2),
44954 // (extract_elt Mul, 4),
44956 // the other is of the form:
44957 // (build_vector (extract_elt Mul, 1),
44958 // (extract_elt Mul, 3),
44959 // (extract_elt Mul, 5),
44961 // and identify Mul.
44963 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
44964 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
44965 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
44966 // TODO: Be more tolerant to undefs.
44967 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44968 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44969 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44970 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
44972 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
44973 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
44974 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
44975 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
44976 if (!Const0L || !Const1L || !Const0H || !Const1H)
44978 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
44979 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
44980 // Commutativity of mul allows factors of a product to reorder.
44982 std::swap(Idx0L, Idx1L);
44984 std::swap(Idx0H, Idx1H);
44985 // Commutativity of add allows pairs of factors to reorder.
44986 if (Idx0L > Idx0H) {
44987 std::swap(Idx0L, Idx0H);
44988 std::swap(Idx1L, Idx1H);
44990 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
44991 Idx1H != 2 * i + 3)
44994 // First time an extract_elt's source vector is visited. Must be a MUL
44995 // with 2X number of vector elements than the BUILD_VECTOR.
44996 // Both extracts must be from same MUL.
44997 Mul = Op0L->getOperand(0);
44998 if (Mul->getOpcode() != ISD::MUL ||
44999 Mul.getValueType().getVectorNumElements() != 2 * e)
45002 // Check that the extract is from the same MUL previously seen.
45003 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
45004 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
45008 // Check if the Mul source can be safely shrunk.
45010 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
45011 Mode == ShrinkMode::MULU16)
45014 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45015 ArrayRef<SDValue> Ops) {
45016 // Shrink by adding truncate nodes and let DAGCombine fold with the
45018 EVT InVT = Ops[0].getValueType();
45019 assert(InVT.getScalarType() == MVT::i32 &&
45020 "Unexpected scalar element type");
45021 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
45022 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
45023 InVT.getVectorNumElements() / 2);
45024 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
45025 InVT.getVectorNumElements());
45026 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
45027 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
45028 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
45030 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
45031 { Mul.getOperand(0), Mul.getOperand(1) },
45035 // Attempt to turn this pattern into PMADDWD.
45036 // (mul (add (sext (build_vector)), (sext (build_vector))),
45037 // (add (sext (build_vector)), (sext (build_vector)))
45038 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
45039 const SDLoc &DL, EVT VT,
45040 const X86Subtarget &Subtarget) {
45041 if (!Subtarget.hasSSE2())
45044 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
45047 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
45048 VT.getVectorNumElements() < 4 ||
45049 !isPowerOf2_32(VT.getVectorNumElements()))
45052 SDValue N00 = N0.getOperand(0);
45053 SDValue N01 = N0.getOperand(1);
45054 SDValue N10 = N1.getOperand(0);
45055 SDValue N11 = N1.getOperand(1);
45057 // All inputs need to be sign extends.
45058 // TODO: Support ZERO_EXTEND from known positive?
45059 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
45060 N01.getOpcode() != ISD::SIGN_EXTEND ||
45061 N10.getOpcode() != ISD::SIGN_EXTEND ||
45062 N11.getOpcode() != ISD::SIGN_EXTEND)
45065 // Peek through the extends.
45066 N00 = N00.getOperand(0);
45067 N01 = N01.getOperand(0);
45068 N10 = N10.getOperand(0);
45069 N11 = N11.getOperand(0);
45071 // Must be extending from vXi16.
45072 EVT InVT = N00.getValueType();
45073 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
45074 N10.getValueType() != InVT || N11.getValueType() != InVT)
45077 // All inputs should be build_vectors.
45078 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
45079 N01.getOpcode() != ISD::BUILD_VECTOR ||
45080 N10.getOpcode() != ISD::BUILD_VECTOR ||
45081 N11.getOpcode() != ISD::BUILD_VECTOR)
45084 // For each element, we need to ensure we have an odd element from one vector
45085 // multiplied by the odd element of another vector and the even element from
45086 // one of the same vectors being multiplied by the even element from the
45087 // other vector. So we need to make sure for each element i, this operator
45088 // is being performed:
45089 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
45091 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
45092 SDValue N00Elt = N00.getOperand(i);
45093 SDValue N01Elt = N01.getOperand(i);
45094 SDValue N10Elt = N10.getOperand(i);
45095 SDValue N11Elt = N11.getOperand(i);
45096 // TODO: Be more tolerant to undefs.
45097 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45098 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45099 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45100 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
45102 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
45103 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
45104 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
45105 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
45106 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
45108 unsigned IdxN00 = ConstN00Elt->getZExtValue();
45109 unsigned IdxN01 = ConstN01Elt->getZExtValue();
45110 unsigned IdxN10 = ConstN10Elt->getZExtValue();
45111 unsigned IdxN11 = ConstN11Elt->getZExtValue();
45112 // Add is commutative so indices can be reordered.
45113 if (IdxN00 > IdxN10) {
45114 std::swap(IdxN00, IdxN10);
45115 std::swap(IdxN01, IdxN11);
45117 // N0 indices be the even element. N1 indices must be the next odd element.
45118 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
45119 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
45121 SDValue N00In = N00Elt.getOperand(0);
45122 SDValue N01In = N01Elt.getOperand(0);
45123 SDValue N10In = N10Elt.getOperand(0);
45124 SDValue N11In = N11Elt.getOperand(0);
45125 // First time we find an input capture it.
45130 // Mul is commutative so the input vectors can be in any order.
45131 // Canonicalize to make the compares easier.
45133 std::swap(N00In, N01In);
45135 std::swap(N10In, N11In);
45136 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
45140 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45141 ArrayRef<SDValue> Ops) {
45142 // Shrink by adding truncate nodes and let DAGCombine fold with the
45144 EVT OpVT = Ops[0].getValueType();
45145 assert(OpVT.getScalarType() == MVT::i16 &&
45146 "Unexpected scalar element type");
45147 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
45148 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
45149 OpVT.getVectorNumElements() / 2);
45150 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
45152 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
45156 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
45157 TargetLowering::DAGCombinerInfo &DCI,
45158 const X86Subtarget &Subtarget) {
45159 const SDNodeFlags Flags = N->getFlags();
45160 if (Flags.hasVectorReduction()) {
45161 if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
45163 if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
45166 EVT VT = N->getValueType(0);
45167 SDValue Op0 = N->getOperand(0);
45168 SDValue Op1 = N->getOperand(1);
45170 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
45172 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
45175 // Try to synthesize horizontal adds from adds of shuffles.
45176 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
45177 VT == MVT::v8i32) &&
45178 Subtarget.hasSSSE3() &&
45179 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
45180 auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45181 ArrayRef<SDValue> Ops) {
45182 return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
45184 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
45188 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
45189 // (sub Y, (sext (vXi1 X))).
45190 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
45191 // generic DAG combine without a legal type check, but adding this there
45192 // caused regressions.
45193 if (VT.isVector()) {
45194 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45195 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
45196 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
45197 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
45199 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
45200 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
45203 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
45204 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
45205 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
45207 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
45208 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
45212 return combineAddOrSubToADCOrSBB(N, DAG);
45215 static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
45216 const X86Subtarget &Subtarget) {
45217 SDValue Op0 = N->getOperand(0);
45218 SDValue Op1 = N->getOperand(1);
45219 EVT VT = N->getValueType(0);
45221 if (!VT.isVector())
45224 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
45225 // is only worth it with SSSE3 (PSHUFB).
45226 EVT EltVT = VT.getVectorElementType();
45227 if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
45228 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
45229 !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
45232 SDValue SubusLHS, SubusRHS;
45233 // Try to find umax(a,b) - b or a - umin(a,b) patterns
45234 // they may be converted to subus(a,b).
45235 // TODO: Need to add IR canonicalization for this code.
45236 if (Op0.getOpcode() == ISD::UMAX) {
45238 SDValue MaxLHS = Op0.getOperand(0);
45239 SDValue MaxRHS = Op0.getOperand(1);
45242 else if (MaxRHS == Op1)
45246 } else if (Op1.getOpcode() == ISD::UMIN) {
45248 SDValue MinLHS = Op1.getOperand(0);
45249 SDValue MinRHS = Op1.getOperand(1);
45252 else if (MinRHS == Op0)
45259 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
45260 // special preprocessing in some cases.
45261 if (EltVT == MVT::i8 || EltVT == MVT::i16)
45262 return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
45264 assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
45267 // Special preprocessing case can be only applied
45268 // if the value was zero extended from 16 bit,
45269 // so we require first 16 bits to be zeros for 32 bit
45270 // values, or first 48 bits for 64 bit values.
45271 KnownBits Known = DAG.computeKnownBits(SubusLHS);
45272 unsigned NumZeros = Known.countMinLeadingZeros();
45273 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
45276 EVT ExtType = SubusLHS.getValueType();
45278 if (VT == MVT::v8i32 || VT == MVT::v8i64)
45279 ShrinkedType = MVT::v8i16;
45281 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
45283 // If SubusLHS is zeroextended - truncate SubusRHS to it's
45284 // size SubusRHS = umin(0xFFF.., SubusRHS).
45285 SDValue SaturationConst =
45286 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
45287 ShrinkedType.getScalarSizeInBits()),
45288 SDLoc(SubusLHS), ExtType);
45289 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
45291 SDValue NewSubusLHS =
45292 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
45293 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
45294 SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
45295 NewSubusLHS, NewSubusRHS);
45297 // Zero extend the result, it may be used somewhere as 32 bit,
45298 // if not zext and following trunc will shrink.
45299 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
45302 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
45303 TargetLowering::DAGCombinerInfo &DCI,
45304 const X86Subtarget &Subtarget) {
45305 SDValue Op0 = N->getOperand(0);
45306 SDValue Op1 = N->getOperand(1);
45308 // X86 can't encode an immediate LHS of a sub. See if we can push the
45309 // negation into a preceding instruction.
45310 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
45311 // If the RHS of the sub is a XOR with one use and a constant, invert the
45312 // immediate. Then add one to the LHS of the sub so we can turn
45313 // X-Y -> X+~Y+1, saving one register.
45314 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
45315 isa<ConstantSDNode>(Op1.getOperand(1))) {
45316 const APInt &XorC = Op1.getConstantOperandAPInt(1);
45317 EVT VT = Op0.getValueType();
45318 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
45320 DAG.getConstant(~XorC, SDLoc(Op1), VT));
45321 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
45322 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
45326 // Try to synthesize horizontal subs from subs of shuffles.
45327 EVT VT = N->getValueType(0);
45328 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
45329 VT == MVT::v8i32) &&
45330 Subtarget.hasSSSE3() &&
45331 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
45332 auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45333 ArrayRef<SDValue> Ops) {
45334 return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
45336 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
45340 // Try to create PSUBUS if SUB's argument is max/min
45341 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
45344 return combineAddOrSubToADCOrSBB(N, DAG);
45347 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
45348 const X86Subtarget &Subtarget) {
45349 MVT VT = N->getSimpleValueType(0);
45352 if (N->getOperand(0) == N->getOperand(1)) {
45353 if (N->getOpcode() == X86ISD::PCMPEQ)
45354 return DAG.getConstant(-1, DL, VT);
45355 if (N->getOpcode() == X86ISD::PCMPGT)
45356 return DAG.getConstant(0, DL, VT);
45362 /// Helper that combines an array of subvector ops as if they were the operands
45363 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
45364 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
45365 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
45366 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
45367 TargetLowering::DAGCombinerInfo &DCI,
45368 const X86Subtarget &Subtarget) {
45369 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
45371 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
45372 return DAG.getUNDEF(VT);
45374 if (llvm::all_of(Ops, [](SDValue Op) {
45375 return ISD::isBuildVectorAllZeros(Op.getNode());
45377 return getZeroVector(VT, Subtarget, DAG, DL);
45379 SDValue Op0 = Ops[0];
45381 // Fold subvector loads into one.
45382 // If needed, look through bitcasts to get to the load.
45383 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
45385 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
45386 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
45387 *FirstLd->getMemOperand(), &Fast) &&
45390 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
45395 // Repeated subvectors.
45396 if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
45397 // If this broadcast/subv_broadcast is inserted into both halves, use a
45398 // larger broadcast/subv_broadcast.
45399 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
45400 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
45401 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
45403 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
45404 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
45405 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
45406 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
45407 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
45409 DAG.getIntPtrConstant(0, DL)));
45411 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
45412 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
45413 (Subtarget.hasAVX2() ||
45414 (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
45415 Op0.getOperand(0).getValueType() == VT.getScalarType())
45416 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
45419 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
45421 // Repeated opcode.
45422 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
45423 // but it currently struggles with different vector widths.
45424 if (llvm::all_of(Ops, [Op0](SDValue Op) {
45425 return Op.getOpcode() == Op0.getOpcode();
45427 unsigned NumOps = Ops.size();
45428 switch (Op0.getOpcode()) {
45429 case X86ISD::PSHUFHW:
45430 case X86ISD::PSHUFLW:
45431 case X86ISD::PSHUFD:
45432 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
45433 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
45434 SmallVector<SDValue, 2> Src;
45435 for (unsigned i = 0; i != NumOps; ++i)
45436 Src.push_back(Ops[i].getOperand(0));
45437 return DAG.getNode(Op0.getOpcode(), DL, VT,
45438 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
45439 Op0.getOperand(1));
45442 case X86ISD::VPERMILPI:
45443 // TODO - add support for vXf64/vXi64 shuffles.
45444 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
45445 Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
45446 SmallVector<SDValue, 2> Src;
45447 for (unsigned i = 0; i != NumOps; ++i)
45448 Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
45449 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
45450 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
45451 Op0.getOperand(1));
45452 return DAG.getBitcast(VT, Res);
45455 case X86ISD::PACKUS:
45456 if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
45457 SmallVector<SDValue, 2> LHS, RHS;
45458 for (unsigned i = 0; i != NumOps; ++i) {
45459 LHS.push_back(Ops[i].getOperand(0));
45460 RHS.push_back(Ops[i].getOperand(1));
45462 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
45463 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
45464 NumOps * SrcVT.getVectorNumElements());
45465 return DAG.getNode(Op0.getOpcode(), DL, VT,
45466 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
45467 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
45476 static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
45477 TargetLowering::DAGCombinerInfo &DCI,
45478 const X86Subtarget &Subtarget) {
45479 EVT VT = N->getValueType(0);
45480 EVT SrcVT = N->getOperand(0).getValueType();
45481 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45483 // Don't do anything for i1 vectors.
45484 if (VT.getVectorElementType() == MVT::i1)
45487 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
45488 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
45489 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
45497 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
45498 TargetLowering::DAGCombinerInfo &DCI,
45499 const X86Subtarget &Subtarget) {
45500 if (DCI.isBeforeLegalizeOps())
45503 MVT OpVT = N->getSimpleValueType(0);
45505 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
45508 SDValue Vec = N->getOperand(0);
45509 SDValue SubVec = N->getOperand(1);
45511 uint64_t IdxVal = N->getConstantOperandVal(2);
45512 MVT SubVecVT = SubVec.getSimpleValueType();
45514 if (Vec.isUndef() && SubVec.isUndef())
45515 return DAG.getUNDEF(OpVT);
45517 // Inserting undefs/zeros into zeros/undefs is a zero vector.
45518 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
45519 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
45520 return getZeroVector(OpVT, Subtarget, DAG, dl);
45522 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
45523 // If we're inserting into a zero vector and then into a larger zero vector,
45524 // just insert into the larger zero vector directly.
45525 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
45526 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
45527 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
45528 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45529 getZeroVector(OpVT, Subtarget, DAG, dl),
45530 SubVec.getOperand(1),
45531 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
45534 // If we're inserting into a zero vector and our input was extracted from an
45535 // insert into a zero vector of the same type and the extraction was at
45536 // least as large as the original insertion. Just insert the original
45537 // subvector into a zero vector.
45538 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
45539 isNullConstant(SubVec.getOperand(1)) &&
45540 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
45541 SDValue Ins = SubVec.getOperand(0);
45542 if (isNullConstant(Ins.getOperand(2)) &&
45543 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
45544 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
45545 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45546 getZeroVector(OpVT, Subtarget, DAG, dl),
45547 Ins.getOperand(1), N->getOperand(2));
45551 // Stop here if this is an i1 vector.
45555 // If this is an insert of an extract, combine to a shuffle. Don't do this
45556 // if the insert or extract can be represented with a subregister operation.
45557 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45558 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
45559 (IdxVal != 0 || !Vec.isUndef())) {
45560 int ExtIdxVal = SubVec.getConstantOperandVal(1);
45561 if (ExtIdxVal != 0) {
45562 int VecNumElts = OpVT.getVectorNumElements();
45563 int SubVecNumElts = SubVecVT.getVectorNumElements();
45564 SmallVector<int, 64> Mask(VecNumElts);
45565 // First create an identity shuffle mask.
45566 for (int i = 0; i != VecNumElts; ++i)
45568 // Now insert the extracted portion.
45569 for (int i = 0; i != SubVecNumElts; ++i)
45570 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
45572 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
45576 // Match concat_vector style patterns.
45577 SmallVector<SDValue, 2> SubVectorOps;
45578 if (collectConcatOps(N, SubVectorOps)) {
45580 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
45583 // If we're inserting all zeros into the upper half, change this to
45584 // a concat with zero. We will match this to a move
45585 // with implicit upper bit zeroing during isel.
45586 // We do this here because we don't want combineConcatVectorOps to
45587 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
45588 if (SubVectorOps.size() == 2 &&
45589 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
45590 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45591 getZeroVector(OpVT, Subtarget, DAG, dl),
45592 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
45595 // If this is a broadcast insert into an upper undef, use a larger broadcast.
45596 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
45597 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
45599 // If this is a broadcast load inserted into an upper undef, use a larger
45601 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
45602 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
45603 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
45604 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
45605 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
45607 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
45608 MemIntr->getMemoryVT(),
45609 MemIntr->getMemOperand());
45610 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
45617 /// If we are extracting a subvector of a vector select and the select condition
45618 /// is composed of concatenated vectors, try to narrow the select width. This
45619 /// is a common pattern for AVX1 integer code because 256-bit selects may be
45620 /// legal, but there is almost no integer math/logic available for 256-bit.
45621 /// This function should only be called with legal types (otherwise, the calls
45622 /// to get simple value types will assert).
45623 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
45624 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
45625 SmallVector<SDValue, 4> CatOps;
45626 if (Sel.getOpcode() != ISD::VSELECT ||
45627 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
45630 // Note: We assume simple value types because this should only be called with
45631 // legal operations/types.
45632 // TODO: This can be extended to handle extraction to 256-bits.
45633 MVT VT = Ext->getSimpleValueType(0);
45634 if (!VT.is128BitVector())
45637 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
45638 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
45641 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
45642 MVT SelVT = Sel.getSimpleValueType();
45643 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
45644 "Unexpected vector type with legal operations");
45646 unsigned SelElts = SelVT.getVectorNumElements();
45647 unsigned CastedElts = WideVT.getVectorNumElements();
45648 unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
45649 if (SelElts % CastedElts == 0) {
45650 // The select has the same or more (narrower) elements than the extract
45651 // operand. The extraction index gets scaled by that factor.
45652 ExtIdx *= (SelElts / CastedElts);
45653 } else if (CastedElts % SelElts == 0) {
45654 // The select has less (wider) elements than the extract operand. Make sure
45655 // that the extraction index can be divided evenly.
45656 unsigned IndexDivisor = CastedElts / SelElts;
45657 if (ExtIdx % IndexDivisor != 0)
45659 ExtIdx /= IndexDivisor;
45661 llvm_unreachable("Element count of simple vector types are not divisible?");
45664 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
45665 unsigned NarrowElts = SelElts / NarrowingFactor;
45666 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
45668 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
45669 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
45670 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
45671 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
45672 return DAG.getBitcast(VT, NarrowSel);
45675 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
45676 TargetLowering::DAGCombinerInfo &DCI,
45677 const X86Subtarget &Subtarget) {
45678 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
45679 // eventually get combined/lowered into ANDNP) with a concatenated operand,
45680 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
45681 // We let generic combining take over from there to simplify the
45682 // insert/extract and 'not'.
45683 // This pattern emerges during AVX1 legalization. We handle it before lowering
45684 // to avoid complications like splitting constant vector loads.
45686 // Capture the original wide type in the likely case that we need to bitcast
45687 // back to this type.
45688 if (!N->getValueType(0).isSimple())
45691 MVT VT = N->getSimpleValueType(0);
45692 SDValue InVec = N->getOperand(0);
45693 SDValue InVecBC = peekThroughBitcasts(InVec);
45694 EVT InVecVT = InVec.getValueType();
45695 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45697 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
45698 TLI.isTypeLegal(InVecVT) &&
45699 InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
45700 auto isConcatenatedNot = [] (SDValue V) {
45701 V = peekThroughBitcasts(V);
45702 if (!isBitwiseNot(V))
45704 SDValue NotOp = V->getOperand(0);
45705 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
45707 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
45708 isConcatenatedNot(InVecBC.getOperand(1))) {
45709 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
45710 SDValue Concat = split256IntArith(InVecBC, DAG);
45711 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
45712 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
45716 if (DCI.isBeforeLegalizeOps())
45719 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
45722 unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
45724 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
45725 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
45727 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
45728 if (VT.getScalarType() == MVT::i1)
45729 return DAG.getConstant(1, SDLoc(N), VT);
45730 return getOnesVector(VT, DAG, SDLoc(N));
45733 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
45734 return DAG.getBuildVector(
45736 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
45738 // If we are extracting from an insert into a zero vector, replace with a
45739 // smaller insert into zero if we don't access less than the original
45740 // subvector. Don't do this for i1 vectors.
45741 if (VT.getVectorElementType() != MVT::i1 &&
45742 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
45743 InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
45744 ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
45745 InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
45747 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
45748 getZeroVector(VT, Subtarget, DAG, DL),
45749 InVec.getOperand(1), InVec.getOperand(2));
45752 // If we're extracting from a broadcast then we're better off just
45753 // broadcasting to the smaller type directly, assuming this is the only use.
45754 // As its a broadcast we don't care about the extraction index.
45755 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
45756 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
45757 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
45759 if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
45760 auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
45761 if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
45762 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
45763 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
45765 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
45766 MemIntr->getMemoryVT(),
45767 MemIntr->getMemOperand());
45768 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
45773 // If we're extracting the lowest subvector and we're the only user,
45774 // we may be able to perform this with a smaller vector width.
45775 if (IdxVal == 0 && InVec.hasOneUse()) {
45776 unsigned InOpcode = InVec.getOpcode();
45777 if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
45778 // v2f64 CVTDQ2PD(v4i32).
45779 if (InOpcode == ISD::SINT_TO_FP &&
45780 InVec.getOperand(0).getValueType() == MVT::v4i32) {
45781 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
45783 // v2f64 CVTUDQ2PD(v4i32).
45784 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
45785 InVec.getOperand(0).getValueType() == MVT::v4i32) {
45786 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
45788 // v2f64 CVTPS2PD(v4f32).
45789 if (InOpcode == ISD::FP_EXTEND &&
45790 InVec.getOperand(0).getValueType() == MVT::v4f32) {
45791 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
45794 if ((InOpcode == ISD::ANY_EXTEND ||
45795 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
45796 InOpcode == ISD::ZERO_EXTEND ||
45797 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
45798 InOpcode == ISD::SIGN_EXTEND ||
45799 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45800 VT.is128BitVector() &&
45801 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
45802 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
45803 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
45805 if (InOpcode == ISD::VSELECT &&
45806 InVec.getOperand(0).getValueType().is256BitVector() &&
45807 InVec.getOperand(1).getValueType().is256BitVector() &&
45808 InVec.getOperand(2).getValueType().is256BitVector()) {
45810 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
45811 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
45812 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
45813 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
45820 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
45821 EVT VT = N->getValueType(0);
45822 SDValue Src = N->getOperand(0);
45825 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
45826 // This occurs frequently in our masked scalar intrinsic code and our
45827 // floating point select lowering with AVX512.
45828 // TODO: SimplifyDemandedBits instead?
45829 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
45830 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
45831 if (C->getAPIntValue().isOneValue())
45832 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
45833 Src.getOperand(0));
45835 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
45836 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
45837 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
45838 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
45839 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
45840 if (C->isNullValue())
45841 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
45842 Src.getOperand(1));
45844 // Reduce v2i64 to v4i32 if we don't need the upper bits.
45845 // TODO: Move to DAGCombine?
45846 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
45847 Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
45848 Src.getOperand(0).getScalarValueSizeInBits() <= 32)
45849 return DAG.getBitcast(
45850 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
45851 DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
45856 // Simplify PMULDQ and PMULUDQ operations.
45857 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
45858 TargetLowering::DAGCombinerInfo &DCI,
45859 const X86Subtarget &Subtarget) {
45860 SDValue LHS = N->getOperand(0);
45861 SDValue RHS = N->getOperand(1);
45863 // Canonicalize constant to RHS.
45864 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
45865 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
45866 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
45868 // Multiply by zero.
45869 // Don't return RHS as it may contain UNDEFs.
45870 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
45871 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
45873 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
45874 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45875 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
45876 return SDValue(N, 0);
45878 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
45879 // convert it to any_extend_invec, due to the LegalOperations check, do the
45880 // conversion directly to a vector shuffle manually. This exposes combine
45881 // opportunities missed by combineExtInVec not calling
45882 // combineX86ShufflesRecursively on SSE4.1 targets.
45883 // FIXME: This is basically a hack around several other issues related to
45884 // ANY_EXTEND_VECTOR_INREG.
45885 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
45886 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
45887 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45888 LHS.getOperand(0).getValueType() == MVT::v4i32) {
45890 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
45891 LHS.getOperand(0), { 0, -1, 1, -1 });
45892 LHS = DAG.getBitcast(MVT::v2i64, LHS);
45893 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
45895 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
45896 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
45897 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45898 RHS.getOperand(0).getValueType() == MVT::v4i32) {
45900 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
45901 RHS.getOperand(0), { 0, -1, 1, -1 });
45902 RHS = DAG.getBitcast(MVT::v2i64, RHS);
45903 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
45909 static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
45910 TargetLowering::DAGCombinerInfo &DCI,
45911 const X86Subtarget &Subtarget) {
45912 EVT VT = N->getValueType(0);
45913 SDValue In = N->getOperand(0);
45914 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45916 // Try to merge vector loads and extend_inreg to an extload.
45917 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
45919 auto *Ld = cast<LoadSDNode>(In);
45920 if (Ld->isSimple()) {
45921 MVT SVT = In.getSimpleValueType().getVectorElementType();
45922 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
45923 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
45924 VT.getVectorNumElements());
45925 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
45927 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
45928 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
45929 Ld->getMemOperand()->getFlags());
45930 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
45936 // Attempt to combine as a shuffle.
45937 // TODO: SSE41 support
45938 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
45940 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
45941 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
45948 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
45949 TargetLowering::DAGCombinerInfo &DCI) {
45950 EVT VT = N->getValueType(0);
45952 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
45953 return DAG.getConstant(0, SDLoc(N), VT);
45955 APInt KnownUndef, KnownZero;
45956 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45957 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
45958 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
45960 return SDValue(N, 0);
45965 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
45966 DAGCombinerInfo &DCI) const {
45967 SelectionDAG &DAG = DCI.DAG;
45968 switch (N->getOpcode()) {
45970 case ISD::SCALAR_TO_VECTOR:
45971 return combineScalarToVector(N, DAG);
45972 case ISD::EXTRACT_VECTOR_ELT:
45973 case X86ISD::PEXTRW:
45974 case X86ISD::PEXTRB:
45975 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
45976 case ISD::CONCAT_VECTORS:
45977 return combineConcatVectors(N, DAG, DCI, Subtarget);
45978 case ISD::INSERT_SUBVECTOR:
45979 return combineInsertSubvector(N, DAG, DCI, Subtarget);
45980 case ISD::EXTRACT_SUBVECTOR:
45981 return combineExtractSubvector(N, DAG, DCI, Subtarget);
45984 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
45985 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
45986 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
45987 case X86ISD::CMP: return combineCMP(N, DAG);
45988 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
45989 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
45991 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
45992 case X86ISD::SBB: return combineSBB(N, DAG);
45993 case X86ISD::ADC: return combineADC(N, DAG, DCI);
45994 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
45995 case ISD::SHL: return combineShiftLeft(N, DAG);
45996 case ISD::SRA: return combineShiftRightArithmetic(N, DAG);
45997 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI);
45998 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
45999 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
46000 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
46001 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
46002 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
46003 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
46004 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
46005 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
46006 case ISD::SINT_TO_FP:
46007 case ISD::STRICT_SINT_TO_FP:
46008 return combineSIntToFP(N, DAG, DCI, Subtarget);
46009 case ISD::UINT_TO_FP:
46010 case ISD::STRICT_UINT_TO_FP:
46011 return combineUIntToFP(N, DAG, Subtarget);
46013 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
46014 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
46015 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
46016 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG);
46017 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
46018 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
46019 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
46021 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
46023 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
46025 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
46026 case X86ISD::CVTSI2P:
46027 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
46028 case X86ISD::CVTP2SI:
46029 case X86ISD::CVTP2UI:
46030 case X86ISD::CVTTP2SI:
46031 case X86ISD::CVTTP2UI: return combineCVTP2I_CVTTP2I(N, DAG, DCI);
46032 case X86ISD::BT: return combineBT(N, DAG, DCI);
46033 case ISD::ANY_EXTEND:
46034 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
46035 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
46036 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
46037 case ISD::ANY_EXTEND_VECTOR_INREG:
46038 case ISD::SIGN_EXTEND_VECTOR_INREG:
46039 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
46041 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
46042 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
46043 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
46044 case X86ISD::PACKSS:
46045 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
46049 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
46050 case X86ISD::VSHLI:
46051 case X86ISD::VSRAI:
46052 case X86ISD::VSRLI:
46053 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
46054 case X86ISD::PINSRB:
46055 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
46056 case X86ISD::SHUFP: // Handle all target specific shuffles
46057 case X86ISD::INSERTPS:
46058 case X86ISD::EXTRQI:
46059 case X86ISD::INSERTQI:
46060 case X86ISD::PALIGNR:
46061 case X86ISD::VSHLDQ:
46062 case X86ISD::VSRLDQ:
46063 case X86ISD::BLENDI:
46064 case X86ISD::UNPCKH:
46065 case X86ISD::UNPCKL:
46066 case X86ISD::MOVHLPS:
46067 case X86ISD::MOVLHPS:
46068 case X86ISD::PSHUFB:
46069 case X86ISD::PSHUFD:
46070 case X86ISD::PSHUFHW:
46071 case X86ISD::PSHUFLW:
46072 case X86ISD::MOVSHDUP:
46073 case X86ISD::MOVSLDUP:
46074 case X86ISD::MOVDDUP:
46075 case X86ISD::MOVSS:
46076 case X86ISD::MOVSD:
46077 case X86ISD::VBROADCAST:
46078 case X86ISD::VPPERM:
46079 case X86ISD::VPERMI:
46080 case X86ISD::VPERMV:
46081 case X86ISD::VPERMV3:
46082 case X86ISD::VPERMIL2:
46083 case X86ISD::VPERMILPI:
46084 case X86ISD::VPERMILPV:
46085 case X86ISD::VPERM2X128:
46086 case X86ISD::SHUF128:
46087 case X86ISD::VZEXT_MOVL:
46088 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
46089 case X86ISD::FMADD_RND:
46090 case X86ISD::FMSUB:
46091 case X86ISD::FMSUB_RND:
46092 case X86ISD::FNMADD:
46093 case X86ISD::FNMADD_RND:
46094 case X86ISD::FNMSUB:
46095 case X86ISD::FNMSUB_RND:
46096 case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget);
46097 case X86ISD::FMADDSUB_RND:
46098 case X86ISD::FMSUBADD_RND:
46099 case X86ISD::FMADDSUB:
46100 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
46101 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
46102 case X86ISD::MGATHER:
46103 case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
46105 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
46106 case X86ISD::PCMPEQ:
46107 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
46108 case X86ISD::PMULDQ:
46109 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
46110 case X86ISD::KSHIFTL:
46111 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
46117 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
46118 if (!isTypeLegal(VT))
46121 // There are no vXi8 shifts.
46122 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
46125 // TODO: Almost no 8-bit ops are desirable because they have no actual
46126 // size/speed advantages vs. 32-bit ops, but they do have a major
46127 // potential disadvantage by causing partial register stalls.
46129 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
46130 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
46131 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
46132 // check for a constant operand to the multiply.
46133 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
46136 // i16 instruction encodings are longer and some i16 instructions are slow,
46137 // so those are not desirable.
46138 if (VT == MVT::i16) {
46143 case ISD::SIGN_EXTEND:
46144 case ISD::ZERO_EXTEND:
46145 case ISD::ANY_EXTEND:
46159 // Any legal type not explicitly accounted for above here is desirable.
46163 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
46164 SDValue Value, SDValue Addr,
46165 SelectionDAG &DAG) const {
46166 const Module *M = DAG.getMachineFunction().getMMI().getModule();
46167 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
46168 if (IsCFProtectionSupported) {
46169 // In case control-flow branch protection is enabled, we need to add
46170 // notrack prefix to the indirect branch.
46171 // In order to do that we create NT_BRIND SDNode.
46172 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
46173 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
46176 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
46179 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
46180 EVT VT = Op.getValueType();
46181 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
46182 isa<ConstantSDNode>(Op.getOperand(1));
46184 // i16 is legal, but undesirable since i16 instruction encodings are longer
46185 // and some i16 instructions are slow.
46186 // 8-bit multiply-by-constant can usually be expanded to something cheaper
46187 // using LEA and/or other ALU ops.
46188 if (VT != MVT::i16 && !Is8BitMulByConstant)
46191 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
46192 if (!Op.hasOneUse())
46194 SDNode *User = *Op->use_begin();
46195 if (!ISD::isNormalStore(User))
46197 auto *Ld = cast<LoadSDNode>(Load);
46198 auto *St = cast<StoreSDNode>(User);
46199 return Ld->getBasePtr() == St->getBasePtr();
46202 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
46203 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
46205 if (!Op.hasOneUse())
46207 SDNode *User = *Op->use_begin();
46208 if (User->getOpcode() != ISD::ATOMIC_STORE)
46210 auto *Ld = cast<AtomicSDNode>(Load);
46211 auto *St = cast<AtomicSDNode>(User);
46212 return Ld->getBasePtr() == St->getBasePtr();
46215 bool Commute = false;
46216 switch (Op.getOpcode()) {
46217 default: return false;
46218 case ISD::SIGN_EXTEND:
46219 case ISD::ZERO_EXTEND:
46220 case ISD::ANY_EXTEND:
46225 SDValue N0 = Op.getOperand(0);
46226 // Look out for (store (shl (load), x)).
46227 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
46239 SDValue N0 = Op.getOperand(0);
46240 SDValue N1 = Op.getOperand(1);
46241 // Avoid disabling potential load folding opportunities.
46242 if (MayFoldLoad(N1) &&
46243 (!Commute || !isa<ConstantSDNode>(N0) ||
46244 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
46246 if (MayFoldLoad(N0) &&
46247 ((Commute && !isa<ConstantSDNode>(N1)) ||
46248 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
46250 if (IsFoldableAtomicRMW(N0, Op) ||
46251 (Commute && IsFoldableAtomicRMW(N1, Op)))
46260 bool X86TargetLowering::
46261 isDesirableToCombineBuildVectorToShuffleTruncate(
46262 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
46264 assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
46265 "Element count mismatch");
46267 Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
46268 "Shuffle Mask expected to be legal");
46270 // For 32-bit elements VPERMD is better than shuffle+truncate.
46271 // TODO: After we improve lowerBuildVector, add execption for VPERMW.
46272 if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
46275 if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
46281 //===----------------------------------------------------------------------===//
46282 // X86 Inline Assembly Support
46283 //===----------------------------------------------------------------------===//
46285 // Helper to match a string separated by whitespace.
46286 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
46287 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
46289 for (StringRef Piece : Pieces) {
46290 if (!S.startswith(Piece)) // Check if the piece matches.
46293 S = S.substr(Piece.size());
46294 StringRef::size_type Pos = S.find_first_not_of(" \t");
46295 if (Pos == 0) // We matched a prefix.
46304 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
46306 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
46307 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
46308 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
46309 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
46311 if (AsmPieces.size() == 3)
46313 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
46320 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
46321 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
46323 const std::string &AsmStr = IA->getAsmString();
46325 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
46326 if (!Ty || Ty->getBitWidth() % 16 != 0)
46329 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
46330 SmallVector<StringRef, 4> AsmPieces;
46331 SplitString(AsmStr, AsmPieces, ";\n");
46333 switch (AsmPieces.size()) {
46334 default: return false;
46336 // FIXME: this should verify that we are targeting a 486 or better. If not,
46337 // we will turn this bswap into something that will be lowered to logical
46338 // ops instead of emitting the bswap asm. For now, we don't support 486 or
46339 // lower so don't worry about this.
46341 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
46342 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
46343 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
46344 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
46345 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
46346 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
46347 // No need to check constraints, nothing other than the equivalent of
46348 // "=r,0" would be valid here.
46349 return IntrinsicLowering::LowerToByteSwap(CI);
46352 // rorw $$8, ${0:w} --> llvm.bswap.i16
46353 if (CI->getType()->isIntegerTy(16) &&
46354 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
46355 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
46356 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
46358 StringRef ConstraintsStr = IA->getConstraintString();
46359 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
46360 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
46361 if (clobbersFlagRegisters(AsmPieces))
46362 return IntrinsicLowering::LowerToByteSwap(CI);
46366 if (CI->getType()->isIntegerTy(32) &&
46367 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
46368 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
46369 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
46370 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
46372 StringRef ConstraintsStr = IA->getConstraintString();
46373 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
46374 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
46375 if (clobbersFlagRegisters(AsmPieces))
46376 return IntrinsicLowering::LowerToByteSwap(CI);
46379 if (CI->getType()->isIntegerTy(64)) {
46380 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
46381 if (Constraints.size() >= 2 &&
46382 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
46383 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
46384 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
46385 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
46386 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
46387 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
46388 return IntrinsicLowering::LowerToByteSwap(CI);
46396 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
46397 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
46398 .Case("{@cca}", X86::COND_A)
46399 .Case("{@ccae}", X86::COND_AE)
46400 .Case("{@ccb}", X86::COND_B)
46401 .Case("{@ccbe}", X86::COND_BE)
46402 .Case("{@ccc}", X86::COND_B)
46403 .Case("{@cce}", X86::COND_E)
46404 .Case("{@ccz}", X86::COND_E)
46405 .Case("{@ccg}", X86::COND_G)
46406 .Case("{@ccge}", X86::COND_GE)
46407 .Case("{@ccl}", X86::COND_L)
46408 .Case("{@ccle}", X86::COND_LE)
46409 .Case("{@ccna}", X86::COND_BE)
46410 .Case("{@ccnae}", X86::COND_B)
46411 .Case("{@ccnb}", X86::COND_AE)
46412 .Case("{@ccnbe}", X86::COND_A)
46413 .Case("{@ccnc}", X86::COND_AE)
46414 .Case("{@ccne}", X86::COND_NE)
46415 .Case("{@ccnz}", X86::COND_NE)
46416 .Case("{@ccng}", X86::COND_LE)
46417 .Case("{@ccnge}", X86::COND_L)
46418 .Case("{@ccnl}", X86::COND_GE)
46419 .Case("{@ccnle}", X86::COND_G)
46420 .Case("{@ccno}", X86::COND_NO)
46421 .Case("{@ccnp}", X86::COND_P)
46422 .Case("{@ccns}", X86::COND_NS)
46423 .Case("{@cco}", X86::COND_O)
46424 .Case("{@ccp}", X86::COND_P)
46425 .Case("{@ccs}", X86::COND_S)
46426 .Default(X86::COND_INVALID);
46430 /// Given a constraint letter, return the type of constraint for this target.
46431 X86TargetLowering::ConstraintType
46432 X86TargetLowering::getConstraintType(StringRef Constraint) const {
46433 if (Constraint.size() == 1) {
46434 switch (Constraint[0]) {
46446 case 'k': // AVX512 masking registers.
46447 return C_RegisterClass;
46463 return C_Immediate;
46472 else if (Constraint.size() == 2) {
46473 switch (Constraint[0]) {
46477 switch (Constraint[1]) {
46488 return C_RegisterClass;
46491 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
46493 return TargetLowering::getConstraintType(Constraint);
46496 /// Examine constraint type and operand type and determine a weight value.
46497 /// This object must already have been set up with the operand type
46498 /// and the current alternative constraint selected.
46499 TargetLowering::ConstraintWeight
46500 X86TargetLowering::getSingleConstraintMatchWeight(
46501 AsmOperandInfo &info, const char *constraint) const {
46502 ConstraintWeight weight = CW_Invalid;
46503 Value *CallOperandVal = info.CallOperandVal;
46504 // If we don't have a value, we can't do a match,
46505 // but allow it at the lowest weight.
46506 if (!CallOperandVal)
46508 Type *type = CallOperandVal->getType();
46509 // Look at the constraint type.
46510 switch (*constraint) {
46512 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
46524 if (CallOperandVal->getType()->isIntegerTy())
46525 weight = CW_SpecificReg;
46530 if (type->isFloatingPointTy())
46531 weight = CW_SpecificReg;
46534 if (type->isX86_MMXTy() && Subtarget.hasMMX())
46535 weight = CW_SpecificReg;
46538 unsigned Size = StringRef(constraint).size();
46539 // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
46540 char NextChar = Size == 2 ? constraint[1] : 'i';
46543 switch (NextChar) {
46549 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
46550 return CW_SpecificReg;
46552 // Conditional OpMask regs (AVX512)
46554 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
46555 return CW_Register;
46559 if (type->isX86_MMXTy() && Subtarget.hasMMX())
46562 // Any SSE reg when ISA >= SSE2, same as 'Y'
46566 if (!Subtarget.hasSSE2())
46570 // Fall through (handle "Y" constraint).
46574 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
46575 weight = CW_Register;
46578 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
46579 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
46580 weight = CW_Register;
46583 // Enable conditional vector operations using %k<#> registers.
46584 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
46585 weight = CW_Register;
46588 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
46589 if (C->getZExtValue() <= 31)
46590 weight = CW_Constant;
46594 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46595 if (C->getZExtValue() <= 63)
46596 weight = CW_Constant;
46600 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46601 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
46602 weight = CW_Constant;
46606 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46607 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
46608 weight = CW_Constant;
46612 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46613 if (C->getZExtValue() <= 3)
46614 weight = CW_Constant;
46618 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46619 if (C->getZExtValue() <= 0xff)
46620 weight = CW_Constant;
46625 if (isa<ConstantFP>(CallOperandVal)) {
46626 weight = CW_Constant;
46630 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46631 if ((C->getSExtValue() >= -0x80000000LL) &&
46632 (C->getSExtValue() <= 0x7fffffffLL))
46633 weight = CW_Constant;
46637 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46638 if (C->getZExtValue() <= 0xffffffff)
46639 weight = CW_Constant;
46646 /// Try to replace an X constraint, which matches anything, with another that
46647 /// has more specific requirements based on the type of the corresponding
46649 const char *X86TargetLowering::
46650 LowerXConstraint(EVT ConstraintVT) const {
46651 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
46652 // 'f' like normal targets.
46653 if (ConstraintVT.isFloatingPoint()) {
46654 if (Subtarget.hasSSE2())
46656 if (Subtarget.hasSSE1())
46660 return TargetLowering::LowerXConstraint(ConstraintVT);
46663 // Lower @cc targets via setcc.
46664 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
46665 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
46666 SelectionDAG &DAG) const {
46667 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
46668 if (Cond == X86::COND_INVALID)
46670 // Check that return type is valid.
46671 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
46672 OpInfo.ConstraintVT.getSizeInBits() < 8)
46673 report_fatal_error("Flag output operand is of invalid type");
46675 // Get EFLAGS register. Only update chain when copyfrom is glued.
46676 if (Flag.getNode()) {
46677 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
46678 Chain = Flag.getValue(1);
46680 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
46681 // Extract CC code.
46682 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
46683 // Extend to 32-bits
46684 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
46689 /// Lower the specified operand into the Ops vector.
46690 /// If it is invalid, don't add anything to Ops.
46691 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
46692 std::string &Constraint,
46693 std::vector<SDValue>&Ops,
46694 SelectionDAG &DAG) const {
46697 // Only support length 1 constraints for now.
46698 if (Constraint.length() > 1) return;
46700 char ConstraintLetter = Constraint[0];
46701 switch (ConstraintLetter) {
46704 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46705 if (C->getZExtValue() <= 31) {
46706 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46707 Op.getValueType());
46713 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46714 if (C->getZExtValue() <= 63) {
46715 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46716 Op.getValueType());
46722 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46723 if (isInt<8>(C->getSExtValue())) {
46724 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46725 Op.getValueType());
46731 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46732 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
46733 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
46734 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
46735 Op.getValueType());
46741 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46742 if (C->getZExtValue() <= 3) {
46743 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46744 Op.getValueType());
46750 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46751 if (C->getZExtValue() <= 255) {
46752 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46753 Op.getValueType());
46759 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46760 if (C->getZExtValue() <= 127) {
46761 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46762 Op.getValueType());
46768 // 32-bit signed value
46769 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46770 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
46771 C->getSExtValue())) {
46772 // Widen to 64 bits here to get it sign extended.
46773 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
46776 // FIXME gcc accepts some relocatable values here too, but only in certain
46777 // memory models; it's complicated.
46782 // 32-bit unsigned value
46783 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46784 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
46785 C->getZExtValue())) {
46786 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46787 Op.getValueType());
46791 // FIXME gcc accepts some relocatable values here too, but only in certain
46792 // memory models; it's complicated.
46796 // Literal immediates are always ok.
46797 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
46798 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
46799 BooleanContent BCont = getBooleanContents(MVT::i64);
46800 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
46801 : ISD::SIGN_EXTEND;
46802 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
46803 : CST->getSExtValue();
46804 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
46808 // In any sort of PIC mode addresses need to be computed at runtime by
46809 // adding in a register or some sort of table lookup. These can't
46810 // be used as immediates.
46811 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
46814 // If we are in non-pic codegen mode, we allow the address of a global (with
46815 // an optional displacement) to be used with 'i'.
46816 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
46817 // If we require an extra load to get this address, as in PIC mode, we
46818 // can't accept it.
46819 if (isGlobalStubReference(
46820 Subtarget.classifyGlobalReference(GA->getGlobal())))
46826 if (Result.getNode()) {
46827 Ops.push_back(Result);
46830 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
46833 /// Check if \p RC is a general purpose register class.
46834 /// I.e., GR* or one of their variant.
46835 static bool isGRClass(const TargetRegisterClass &RC) {
46836 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
46837 RC.hasSuperClassEq(&X86::GR16RegClass) ||
46838 RC.hasSuperClassEq(&X86::GR32RegClass) ||
46839 RC.hasSuperClassEq(&X86::GR64RegClass) ||
46840 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
46843 /// Check if \p RC is a vector register class.
46844 /// I.e., FR* / VR* or one of their variant.
46845 static bool isFRClass(const TargetRegisterClass &RC) {
46846 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
46847 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
46848 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
46849 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
46850 RC.hasSuperClassEq(&X86::VR512RegClass);
46853 /// Check if \p RC is a mask register class.
46854 /// I.e., VK* or one of their variant.
46855 static bool isVKClass(const TargetRegisterClass &RC) {
46856 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
46857 RC.hasSuperClassEq(&X86::VK2RegClass) ||
46858 RC.hasSuperClassEq(&X86::VK4RegClass) ||
46859 RC.hasSuperClassEq(&X86::VK8RegClass) ||
46860 RC.hasSuperClassEq(&X86::VK16RegClass) ||
46861 RC.hasSuperClassEq(&X86::VK32RegClass) ||
46862 RC.hasSuperClassEq(&X86::VK64RegClass);
46865 std::pair<unsigned, const TargetRegisterClass *>
46866 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
46867 StringRef Constraint,
46869 // First, see if this is a constraint that directly corresponds to an LLVM
46871 if (Constraint.size() == 1) {
46872 // GCC Constraint Letters
46873 switch (Constraint[0]) {
46875 // 'A' means [ER]AX + [ER]DX.
46877 if (Subtarget.is64Bit())
46878 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
46879 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
46880 "Expecting 64, 32 or 16 bit subtarget");
46881 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
46883 // TODO: Slight differences here in allocation order and leaving
46884 // RIP in the class. Do they matter any more here than they do
46885 // in the normal allocation?
46887 if (Subtarget.hasAVX512()) {
46889 return std::make_pair(0U, &X86::VK1RegClass);
46891 return std::make_pair(0U, &X86::VK8RegClass);
46892 if (VT == MVT::i16)
46893 return std::make_pair(0U, &X86::VK16RegClass);
46895 if (Subtarget.hasBWI()) {
46896 if (VT == MVT::i32)
46897 return std::make_pair(0U, &X86::VK32RegClass);
46898 if (VT == MVT::i64)
46899 return std::make_pair(0U, &X86::VK64RegClass);
46902 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
46903 if (Subtarget.is64Bit()) {
46904 if (VT == MVT::i32 || VT == MVT::f32)
46905 return std::make_pair(0U, &X86::GR32RegClass);
46906 if (VT == MVT::i16)
46907 return std::make_pair(0U, &X86::GR16RegClass);
46908 if (VT == MVT::i8 || VT == MVT::i1)
46909 return std::make_pair(0U, &X86::GR8RegClass);
46910 if (VT == MVT::i64 || VT == MVT::f64)
46911 return std::make_pair(0U, &X86::GR64RegClass);
46915 // 32-bit fallthrough
46916 case 'Q': // Q_REGS
46917 if (VT == MVT::i32 || VT == MVT::f32)
46918 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
46919 if (VT == MVT::i16)
46920 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
46921 if (VT == MVT::i8 || VT == MVT::i1)
46922 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
46923 if (VT == MVT::i64)
46924 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
46926 case 'r': // GENERAL_REGS
46927 case 'l': // INDEX_REGS
46928 if (VT == MVT::i8 || VT == MVT::i1)
46929 return std::make_pair(0U, &X86::GR8RegClass);
46930 if (VT == MVT::i16)
46931 return std::make_pair(0U, &X86::GR16RegClass);
46932 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
46933 return std::make_pair(0U, &X86::GR32RegClass);
46934 return std::make_pair(0U, &X86::GR64RegClass);
46935 case 'R': // LEGACY_REGS
46936 if (VT == MVT::i8 || VT == MVT::i1)
46937 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
46938 if (VT == MVT::i16)
46939 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
46940 if (VT == MVT::i32 || !Subtarget.is64Bit())
46941 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
46942 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
46943 case 'f': // FP Stack registers.
46944 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
46945 // value to the correct fpstack register class.
46946 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
46947 return std::make_pair(0U, &X86::RFP32RegClass);
46948 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
46949 return std::make_pair(0U, &X86::RFP64RegClass);
46950 return std::make_pair(0U, &X86::RFP80RegClass);
46951 case 'y': // MMX_REGS if MMX allowed.
46952 if (!Subtarget.hasMMX()) break;
46953 return std::make_pair(0U, &X86::VR64RegClass);
46954 case 'Y': // SSE_REGS if SSE2 allowed
46955 if (!Subtarget.hasSSE2()) break;
46958 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
46959 if (!Subtarget.hasSSE1()) break;
46960 bool VConstraint = (Constraint[0] == 'v');
46962 switch (VT.SimpleTy) {
46964 // Scalar SSE types.
46967 if (VConstraint && Subtarget.hasVLX())
46968 return std::make_pair(0U, &X86::FR32XRegClass);
46969 return std::make_pair(0U, &X86::FR32RegClass);
46972 if (VConstraint && Subtarget.hasVLX())
46973 return std::make_pair(0U, &X86::FR64XRegClass);
46974 return std::make_pair(0U, &X86::FR64RegClass);
46975 // TODO: Handle i128 in FR128RegClass after it is tested well.
46976 // Vector types and fp128.
46984 if (VConstraint && Subtarget.hasVLX())
46985 return std::make_pair(0U, &X86::VR128XRegClass);
46986 return std::make_pair(0U, &X86::VR128RegClass);
46994 if (VConstraint && Subtarget.hasVLX())
46995 return std::make_pair(0U, &X86::VR256XRegClass);
46996 if (Subtarget.hasAVX())
46997 return std::make_pair(0U, &X86::VR256RegClass);
47003 if (!Subtarget.hasAVX512()) break;
47005 return std::make_pair(0U, &X86::VR512RegClass);
47006 return std::make_pair(0U, &X86::VR512_0_15RegClass);
47010 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
47011 switch (Constraint[1]) {
47017 return getRegForInlineAsmConstraint(TRI, "Y", VT);
47019 if (!Subtarget.hasMMX()) break;
47020 return std::make_pair(0U, &X86::VR64RegClass);
47023 if (!Subtarget.hasSSE1()) break;
47024 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
47026 // This register class doesn't allocate k0 for masked vector operation.
47027 if (Subtarget.hasAVX512()) {
47029 return std::make_pair(0U, &X86::VK1WMRegClass);
47031 return std::make_pair(0U, &X86::VK8WMRegClass);
47032 if (VT == MVT::i16)
47033 return std::make_pair(0U, &X86::VK16WMRegClass);
47035 if (Subtarget.hasBWI()) {
47036 if (VT == MVT::i32)
47037 return std::make_pair(0U, &X86::VK32WMRegClass);
47038 if (VT == MVT::i64)
47039 return std::make_pair(0U, &X86::VK64WMRegClass);
47045 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
47046 return std::make_pair(0U, &X86::GR32RegClass);
47048 // Use the default implementation in TargetLowering to convert the register
47049 // constraint into a member of a register class.
47050 std::pair<unsigned, const TargetRegisterClass*> Res;
47051 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
47053 // Not found as a standard register?
47055 // Map st(0) -> st(7) -> ST0
47056 if (Constraint.size() == 7 && Constraint[0] == '{' &&
47057 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
47058 Constraint[3] == '(' &&
47059 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
47060 Constraint[5] == ')' && Constraint[6] == '}') {
47061 // st(7) is not allocatable and thus not a member of RFP80. Return
47062 // singleton class in cases where we have a reference to it.
47063 if (Constraint[4] == '7')
47064 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
47065 return std::make_pair(X86::FP0 + Constraint[4] - '0',
47066 &X86::RFP80RegClass);
47069 // GCC allows "st(0)" to be called just plain "st".
47070 if (StringRef("{st}").equals_lower(Constraint))
47071 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
47074 if (StringRef("{flags}").equals_lower(Constraint))
47075 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
47078 if (StringRef("{dirflag}").equals_lower(Constraint))
47079 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
47082 if (StringRef("{fpsr}").equals_lower(Constraint))
47083 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
47088 // Make sure it isn't a register that requires 64-bit mode.
47089 if (!Subtarget.is64Bit() &&
47090 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
47091 TRI->getEncodingValue(Res.first) >= 8) {
47092 // Register requires REX prefix, but we're in 32-bit mode.
47093 return std::make_pair(0, nullptr);
47096 // Make sure it isn't a register that requires AVX512.
47097 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
47098 TRI->getEncodingValue(Res.first) & 0x10) {
47099 // Register requires EVEX prefix.
47100 return std::make_pair(0, nullptr);
47103 // Otherwise, check to see if this is a register class of the wrong value
47104 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
47105 // turn into {ax},{dx}.
47106 // MVT::Other is used to specify clobber names.
47107 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
47108 return Res; // Correct type already, nothing to do.
47110 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
47111 // return "eax". This should even work for things like getting 64bit integer
47112 // registers when given an f64 type.
47113 const TargetRegisterClass *Class = Res.second;
47114 // The generic code will match the first register class that contains the
47115 // given register. Thus, based on the ordering of the tablegened file,
47116 // the "plain" GR classes might not come first.
47117 // Therefore, use a helper method.
47118 if (isGRClass(*Class)) {
47119 unsigned Size = VT.getSizeInBits();
47120 if (Size == 1) Size = 8;
47121 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
47123 bool is64Bit = Subtarget.is64Bit();
47124 const TargetRegisterClass *RC =
47125 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
47126 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
47127 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
47128 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
47130 if (Size == 64 && !is64Bit) {
47131 // Model GCC's behavior here and select a fixed pair of 32-bit
47135 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
47137 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
47139 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
47141 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
47143 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
47145 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
47147 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
47149 return std::make_pair(0, nullptr);
47152 if (RC && RC->contains(DestReg))
47153 return std::make_pair(DestReg, RC);
47156 // No register found/type mismatch.
47157 return std::make_pair(0, nullptr);
47158 } else if (isFRClass(*Class)) {
47159 // Handle references to XMM physical registers that got mapped into the
47160 // wrong class. This can happen with constraints like {xmm0} where the
47161 // target independent register mapper will just pick the first match it can
47162 // find, ignoring the required type.
47164 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
47165 if (VT == MVT::f32 || VT == MVT::i32)
47166 Res.second = &X86::FR32XRegClass;
47167 else if (VT == MVT::f64 || VT == MVT::i64)
47168 Res.second = &X86::FR64XRegClass;
47169 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
47170 Res.second = &X86::VR128XRegClass;
47171 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
47172 Res.second = &X86::VR256XRegClass;
47173 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
47174 Res.second = &X86::VR512RegClass;
47176 // Type mismatch and not a clobber: Return an error;
47178 Res.second = nullptr;
47180 } else if (isVKClass(*Class)) {
47182 Res.second = &X86::VK1RegClass;
47183 else if (VT == MVT::i8)
47184 Res.second = &X86::VK8RegClass;
47185 else if (VT == MVT::i16)
47186 Res.second = &X86::VK16RegClass;
47187 else if (VT == MVT::i32)
47188 Res.second = &X86::VK32RegClass;
47189 else if (VT == MVT::i64)
47190 Res.second = &X86::VK64RegClass;
47192 // Type mismatch and not a clobber: Return an error;
47194 Res.second = nullptr;
47201 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
47202 const AddrMode &AM, Type *Ty,
47203 unsigned AS) const {
47204 // Scaling factors are not free at all.
47205 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
47206 // will take 2 allocations in the out of order engine instead of 1
47207 // for plain addressing mode, i.e. inst (reg1).
47209 // vaddps (%rsi,%rdx), %ymm0, %ymm1
47210 // Requires two allocations (one for the load, one for the computation)
47212 // vaddps (%rsi), %ymm0, %ymm1
47213 // Requires just 1 allocation, i.e., freeing allocations for other operations
47214 // and having less micro operations to execute.
47216 // For some X86 architectures, this is even worse because for instance for
47217 // stores, the complex addressing mode forces the instruction to use the
47218 // "load" ports instead of the dedicated "store" port.
47219 // E.g., on Haswell:
47220 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
47221 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
47222 if (isLegalAddressingMode(DL, AM, Ty, AS))
47223 // Scale represents reg2 * scale, thus account for 1
47224 // as soon as we use a second register.
47225 return AM.Scale != 0;
47229 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
47230 // Integer division on x86 is expensive. However, when aggressively optimizing
47231 // for code size, we prefer to use a div instruction, as it is usually smaller
47232 // than the alternative sequence.
47233 // The exception to this is vector division. Since x86 doesn't have vector
47234 // integer division, leaving the division as-is is a loss even in terms of
47235 // size, because it will have to be scalarized, while the alternative code
47236 // sequence can be performed in vector form.
47238 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
47239 return OptSize && !VT.isVector();
47242 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
47243 if (!Subtarget.is64Bit())
47246 // Update IsSplitCSR in X86MachineFunctionInfo.
47247 X86MachineFunctionInfo *AFI =
47248 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
47249 AFI->setIsSplitCSR(true);
47252 void X86TargetLowering::insertCopiesSplitCSR(
47253 MachineBasicBlock *Entry,
47254 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
47255 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
47256 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
47260 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
47261 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
47262 MachineBasicBlock::iterator MBBI = Entry->begin();
47263 for (const MCPhysReg *I = IStart; *I; ++I) {
47264 const TargetRegisterClass *RC = nullptr;
47265 if (X86::GR64RegClass.contains(*I))
47266 RC = &X86::GR64RegClass;
47268 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
47270 Register NewVR = MRI->createVirtualRegister(RC);
47271 // Create copy from CSR to a virtual register.
47272 // FIXME: this currently does not emit CFI pseudo-instructions, it works
47273 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
47274 // nounwind. If we want to generalize this later, we may need to emit
47275 // CFI pseudo-instructions.
47277 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
47278 "Function should be nounwind in insertCopiesSplitCSR!");
47279 Entry->addLiveIn(*I);
47280 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
47283 // Insert the copy-back instructions right before the terminator.
47284 for (auto *Exit : Exits)
47285 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
47286 TII->get(TargetOpcode::COPY), *I)
47291 bool X86TargetLowering::supportSwiftError() const {
47292 return Subtarget.is64Bit();
47295 /// Returns the name of the symbol used to emit stack probes or the empty
47296 /// string if not applicable.
47298 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
47299 // If the function specifically requests stack probes, emit them.
47300 if (MF.getFunction().hasFnAttribute("probe-stack"))
47301 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
47303 // Generally, if we aren't on Windows, the platform ABI does not include
47304 // support for stack probes, so don't emit them.
47305 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
47306 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
47309 // We need a stack probe to conform to the Windows ABI. Choose the right
47311 if (Subtarget.is64Bit())
47312 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
47313 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
47317 X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
47318 // The default stack probe size is 4096 if the function has no stackprobesize
47320 unsigned StackProbeSize = 4096;
47321 const Function &Fn = MF.getFunction();
47322 if (Fn.hasFnAttribute("stack-probe-size"))
47323 Fn.getFnAttribute("stack-probe-size")
47324 .getValueAsString()
47325 .getAsInteger(0, StackProbeSize);
47326 return StackProbeSize;