1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
12 //===----------------------------------------------------------------------===//
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/MachineModuleInfo.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/IntrinsicsX86.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Support/MathExtras.h"
37 #define DEBUG_TYPE "x86-isel"
38 #define PASS_NAME "X86 DAG->DAG Instruction Selection"
40 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
42 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
43 cl::desc("Enable setting constant bits to reduce size of mask immediates"),
46 static cl::opt<bool> EnablePromoteAnyextLoad(
47 "x86-promote-anyext-load", cl::init(true),
48 cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden);
50 extern cl::opt<bool> IndirectBranchTracking;
52 //===----------------------------------------------------------------------===//
53 // Pattern Matcher Implementation
54 //===----------------------------------------------------------------------===//
57 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
58 /// numbers for the leaves of the matched tree.
59 struct X86ISelAddressMode {
65 // This is really a union, discriminated by BaseType!
67 int Base_FrameIndex = 0;
73 const GlobalValue *GV = nullptr;
74 const Constant *CP = nullptr;
75 const BlockAddress *BlockAddr = nullptr;
76 const char *ES = nullptr;
77 MCSymbol *MCSym = nullptr;
79 Align Alignment; // CP alignment.
80 unsigned char SymbolFlags = X86II::MO_NO_FLAG; // X86II::MO_*
81 bool NegateIndex = false;
83 X86ISelAddressMode() = default;
85 bool hasSymbolicDisplacement() const {
86 return GV != nullptr || CP != nullptr || ES != nullptr ||
87 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
90 bool hasBaseOrIndexReg() const {
91 return BaseType == FrameIndexBase ||
92 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
95 /// Return true if this addressing mode is already RIP-relative.
96 bool isRIPRelative() const {
97 if (BaseType != RegBase) return false;
98 if (RegisterSDNode *RegNode =
99 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
100 return RegNode->getReg() == X86::RIP;
104 void setBaseReg(SDValue Reg) {
109 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
110 void dump(SelectionDAG *DAG = nullptr) {
111 dbgs() << "X86ISelAddressMode " << this << '\n';
112 dbgs() << "Base_Reg ";
113 if (Base_Reg.getNode())
114 Base_Reg.getNode()->dump(DAG);
117 if (BaseType == FrameIndexBase)
118 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
119 dbgs() << " Scale " << Scale << '\n'
123 if (IndexReg.getNode())
124 IndexReg.getNode()->dump(DAG);
127 dbgs() << " Disp " << Disp << '\n'
149 dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n';
156 //===--------------------------------------------------------------------===//
157 /// ISel - X86-specific code to select X86 machine instructions for
158 /// SelectionDAG operations.
160 class X86DAGToDAGISel final : public SelectionDAGISel {
161 /// Keep a pointer to the X86Subtarget around so that we can
162 /// make the right decision when generating code for different targets.
163 const X86Subtarget *Subtarget;
165 /// If true, selector should try to optimize for minimum code size.
168 /// Disable direct TLS access through segment registers.
169 bool IndirectTlsSegRefs;
174 X86DAGToDAGISel() = delete;
176 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
177 : SelectionDAGISel(ID, tm, OptLevel), Subtarget(nullptr),
178 OptForMinSize(false), IndirectTlsSegRefs(false) {}
180 bool runOnMachineFunction(MachineFunction &MF) override {
181 // Reset the subtarget each time through.
182 Subtarget = &MF.getSubtarget<X86Subtarget>();
183 IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
184 "indirect-tls-seg-refs");
186 // OptFor[Min]Size are used in pattern predicates that isel is matching.
187 OptForMinSize = MF.getFunction().hasMinSize();
188 assert((!OptForMinSize || MF.getFunction().hasOptSize()) &&
189 "OptForMinSize implies OptForSize");
191 SelectionDAGISel::runOnMachineFunction(MF);
195 void emitFunctionEntryCode() override;
197 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
199 void PreprocessISelDAG() override;
200 void PostprocessISelDAG() override;
202 // Include the pieces autogenerated from the target description.
203 #include "X86GenDAGISel.inc"
206 void Select(SDNode *N) override;
208 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
209 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
210 bool AllowSegmentRegForX32 = false);
211 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
212 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
213 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
214 bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
215 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
217 bool matchVectorAddressRecursively(SDValue N, X86ISelAddressMode &AM,
219 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
220 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
221 SDValue &Scale, SDValue &Index, SDValue &Disp,
223 bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp,
224 SDValue ScaleOp, SDValue &Base, SDValue &Scale,
225 SDValue &Index, SDValue &Disp, SDValue &Segment);
226 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
227 bool selectLEAAddr(SDValue N, SDValue &Base,
228 SDValue &Scale, SDValue &Index, SDValue &Disp,
230 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
231 SDValue &Scale, SDValue &Index, SDValue &Disp,
233 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
234 SDValue &Scale, SDValue &Index, SDValue &Disp,
236 bool selectRelocImm(SDValue N, SDValue &Op);
238 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
239 SDValue &Base, SDValue &Scale,
240 SDValue &Index, SDValue &Disp,
243 // Convenience method where P is also root.
244 bool tryFoldLoad(SDNode *P, SDValue N,
245 SDValue &Base, SDValue &Scale,
246 SDValue &Index, SDValue &Disp,
248 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
251 bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
252 SDValue &Base, SDValue &Scale,
253 SDValue &Index, SDValue &Disp,
256 bool isProfitableToFormMaskedOp(SDNode *N) const;
258 /// Implement addressing mode selection for inline asm expressions.
259 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
260 unsigned ConstraintID,
261 std::vector<SDValue> &OutOps) override;
263 void emitSpecialCodeForMain();
265 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
266 MVT VT, SDValue &Base, SDValue &Scale,
267 SDValue &Index, SDValue &Disp,
269 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
270 Base = CurDAG->getTargetFrameIndex(
271 AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
272 else if (AM.Base_Reg.getNode())
275 Base = CurDAG->getRegister(0, VT);
277 Scale = getI8Imm(AM.Scale, DL);
279 // Negate the index if needed.
280 if (AM.NegateIndex) {
281 unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
282 SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
287 if (AM.IndexReg.getNode())
290 Index = CurDAG->getRegister(0, VT);
292 // These are 32-bit even in 64-bit mode since RIP-relative offset
295 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
299 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
300 AM.Disp, AM.SymbolFlags);
302 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
303 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
304 } else if (AM.MCSym) {
305 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
306 assert(AM.SymbolFlags == 0 && "oo");
307 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
308 } else if (AM.JT != -1) {
309 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
310 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
311 } else if (AM.BlockAddr)
312 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
315 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
317 if (AM.Segment.getNode())
318 Segment = AM.Segment;
320 Segment = CurDAG->getRegister(0, MVT::i16);
323 // Utility function to determine whether we should avoid selecting
324 // immediate forms of instructions for better code size or not.
325 // At a high level, we'd like to avoid such instructions when
326 // we have similar constants used within the same basic block
327 // that can be kept in a register.
329 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
330 uint32_t UseCount = 0;
332 // Do not want to hoist if we're not optimizing for size.
333 // TODO: We'd like to remove this restriction.
334 // See the comment in X86InstrInfo.td for more info.
335 if (!CurDAG->shouldOptForSize())
338 // Walk all the users of the immediate.
339 for (const SDNode *User : N->uses()) {
343 // This user is already selected. Count it as a legitimate use and
345 if (User->isMachineOpcode()) {
350 // We want to count stores of immediates as real uses.
351 if (User->getOpcode() == ISD::STORE &&
352 User->getOperand(1).getNode() == N) {
357 // We don't currently match users that have > 2 operands (except
358 // for stores, which are handled above)
359 // Those instruction won't match in ISEL, for now, and would
360 // be counted incorrectly.
361 // This may change in the future as we add additional instruction
363 if (User->getNumOperands() != 2)
366 // If this is a sign-extended 8-bit integer immediate used in an ALU
367 // instruction, there is probably an opcode encoding to save space.
368 auto *C = dyn_cast<ConstantSDNode>(N);
369 if (C && isInt<8>(C->getSExtValue()))
372 // Immediates that are used for offsets as part of stack
373 // manipulation should be left alone. These are typically
374 // used to indicate SP offsets for argument passing and
375 // will get pulled into stores/pushes (implicitly).
376 if (User->getOpcode() == X86ISD::ADD ||
377 User->getOpcode() == ISD::ADD ||
378 User->getOpcode() == X86ISD::SUB ||
379 User->getOpcode() == ISD::SUB) {
381 // Find the other operand of the add/sub.
382 SDValue OtherOp = User->getOperand(0);
383 if (OtherOp.getNode() == N)
384 OtherOp = User->getOperand(1);
386 // Don't count if the other operand is SP.
387 RegisterSDNode *RegNode;
388 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
389 (RegNode = dyn_cast_or_null<RegisterSDNode>(
390 OtherOp->getOperand(1).getNode())))
391 if ((RegNode->getReg() == X86::ESP) ||
392 (RegNode->getReg() == X86::RSP))
396 // ... otherwise, count this and move on.
400 // If we have more than 1 use, then recommend for hoisting.
401 return (UseCount > 1);
404 /// Return a target constant with the specified value of type i8.
405 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
406 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
409 /// Return a target constant with the specified value, of type i32.
410 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
411 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
414 /// Return a target constant with the specified value, of type i64.
415 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
416 return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
419 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
421 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
422 uint64_t Index = N->getConstantOperandVal(1);
423 MVT VecVT = N->getOperand(0).getSimpleValueType();
424 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
427 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
429 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
430 uint64_t Index = N->getConstantOperandVal(2);
431 MVT VecVT = N->getSimpleValueType(0);
432 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
435 SDValue getPermuteVINSERTCommutedImmediate(SDNode *N, unsigned VecWidth,
437 assert(VecWidth == 128 && "Unexpected vector width");
438 uint64_t Index = N->getConstantOperandVal(2);
439 MVT VecVT = N->getSimpleValueType(0);
440 uint64_t InsertIdx = (Index * VecVT.getScalarSizeInBits()) / VecWidth;
441 assert((InsertIdx == 0 || InsertIdx == 1) && "Bad insertf128 index");
442 // vinsert(0,sub,vec) -> [sub0][vec1] -> vperm2x128(0x30,vec,sub)
443 // vinsert(1,sub,vec) -> [vec0][sub0] -> vperm2x128(0x02,vec,sub)
444 return getI8Imm(InsertIdx ? 0x02 : 0x30, DL);
447 SDValue getSBBZero(SDNode *N) {
449 MVT VT = N->getSimpleValueType(0);
452 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
453 SDValue Zero = SDValue(
454 CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, std::nullopt), 0);
455 if (VT == MVT::i64) {
457 CurDAG->getMachineNode(
458 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
459 CurDAG->getTargetConstant(0, dl, MVT::i64), Zero,
460 CurDAG->getTargetConstant(X86::sub_32bit, dl, MVT::i32)),
464 // Copy flags to the EFLAGS register and glue it to next node.
465 unsigned Opcode = N->getOpcode();
466 assert((Opcode == X86ISD::SBB || Opcode == X86ISD::SETCC_CARRY) &&
467 "Unexpected opcode for SBB materialization");
468 unsigned FlagOpIndex = Opcode == X86ISD::SBB ? 2 : 1;
470 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
471 N->getOperand(FlagOpIndex), SDValue());
473 // Create a 64-bit instruction if the result is 64-bits otherwise use the
475 unsigned Opc = VT == MVT::i64 ? X86::SBB64rr : X86::SBB32rr;
476 MVT SBBVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
477 VTs = CurDAG->getVTList(SBBVT, MVT::i32);
479 CurDAG->getMachineNode(Opc, dl, VTs,
480 {Zero, Zero, EFLAGS, EFLAGS.getValue(1)}),
484 // Helper to detect unneeded and instructions on shift amounts. Called
485 // from PatFrags in tablegen.
486 bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
487 assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
488 const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
490 if (Val.countr_one() >= Width)
493 APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
494 return Mask.countr_one() >= Width;
497 /// Return an SDNode that returns the value of the global base register.
498 /// Output instructions required to initialize the global base register,
500 SDNode *getGlobalBaseReg();
502 /// Return a reference to the TargetMachine, casted to the target-specific
504 const X86TargetMachine &getTargetMachine() const {
505 return static_cast<const X86TargetMachine &>(TM);
508 /// Return a reference to the TargetInstrInfo, casted to the target-specific
510 const X86InstrInfo *getInstrInfo() const {
511 return Subtarget->getInstrInfo();
514 /// Return a condition code of the given SDNode
515 X86::CondCode getCondFromNode(SDNode *N) const;
517 /// Address-mode matching performs shift-of-and to and-of-shift
518 /// reassociation in order to expose more scaled addressing
520 bool ComplexPatternFuncMutatesDAG() const override {
524 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
526 // Indicates we should prefer to use a non-temporal load for this load.
527 bool useNonTemporalLoad(LoadSDNode *N) const {
528 if (!N->isNonTemporal())
531 unsigned StoreSize = N->getMemoryVT().getStoreSize();
533 if (N->getAlign().value() < StoreSize)
537 default: llvm_unreachable("Unsupported store size");
542 return Subtarget->hasSSE41();
544 return Subtarget->hasAVX2();
546 return Subtarget->hasAVX512();
550 bool foldLoadStoreIntoMemOperand(SDNode *Node);
551 MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
552 bool matchBitExtract(SDNode *Node);
553 bool shrinkAndImmediate(SDNode *N);
554 bool isMaskZeroExtended(SDNode *N) const;
555 bool tryShiftAmountMod(SDNode *N);
556 bool tryShrinkShlLogicImm(SDNode *N);
557 bool tryVPTERNLOG(SDNode *N);
558 bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentB,
559 SDNode *ParentC, SDValue A, SDValue B, SDValue C,
561 bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
562 bool tryMatchBitSelect(SDNode *N);
564 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
565 const SDLoc &dl, MVT VT, SDNode *Node);
566 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
567 const SDLoc &dl, MVT VT, SDNode *Node,
570 bool tryOptimizeRem8Extend(SDNode *N);
572 bool onlyUsesZeroFlag(SDValue Flags) const;
573 bool hasNoSignFlagUses(SDValue Flags) const;
574 bool hasNoCarryFlagUses(SDValue Flags) const;
578 char X86DAGToDAGISel::ID = 0;
580 INITIALIZE_PASS(X86DAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
582 // Returns true if this masked compare can be implemented legally with this
584 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
585 unsigned Opcode = N->getOpcode();
586 if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMM ||
587 Opcode == X86ISD::STRICT_CMPM || Opcode == ISD::SETCC ||
588 Opcode == X86ISD::CMPMM_SAE || Opcode == X86ISD::VFPCLASS) {
589 // We can get 256-bit 8 element types here without VLX being enabled. When
590 // this happens we will use 512-bit operations and the mask will not be
592 EVT OpVT = N->getOperand(0).getValueType();
593 // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the
595 if (Opcode == X86ISD::STRICT_CMPM)
596 OpVT = N->getOperand(1).getValueType();
597 if (OpVT.is256BitVector() || OpVT.is128BitVector())
598 return Subtarget->hasVLX();
602 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
603 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
604 Opcode == X86ISD::FSETCCM_SAE)
610 // Returns true if we can assume the writer of the mask has zero extended it
612 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
613 // If this is an AND, check if we have a compare on either side. As long as
614 // one side guarantees the mask is zero extended, the AND will preserve those
616 if (N->getOpcode() == ISD::AND)
617 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
618 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
620 return isLegalMaskCompare(N, Subtarget);
624 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
625 if (OptLevel == CodeGenOpt::None) return false;
630 if (N.getOpcode() != ISD::LOAD)
633 // Don't fold non-temporal loads if we have an instruction for them.
634 if (useNonTemporalLoad(cast<LoadSDNode>(N)))
637 // If N is a load, do additional profitability checks.
639 switch (U->getOpcode()) {
649 case ISD::UADDO_CARRY:
653 SDValue Op1 = U->getOperand(1);
655 // If the other operand is a 8-bit immediate we should fold the immediate
656 // instead. This reduces code size.
658 // movl 4(%esp), %eax
662 // addl 4(%esp), %eax
663 // The former is 2 bytes shorter. In case where the increment is 1, then
664 // the saving can be 4 bytes (by using incl %eax).
665 if (auto *Imm = dyn_cast<ConstantSDNode>(Op1)) {
666 if (Imm->getAPIntValue().isSignedIntN(8))
669 // If this is a 64-bit AND with an immediate that fits in 32-bits,
670 // prefer using the smaller and over folding the load. This is needed to
671 // make sure immediates created by shrinkAndImmediate are always folded.
672 // Ideally we would narrow the load during DAG combine and get the
673 // best of both worlds.
674 if (U->getOpcode() == ISD::AND &&
675 Imm->getAPIntValue().getBitWidth() == 64 &&
676 Imm->getAPIntValue().isIntN(32))
679 // If this really a zext_inreg that can be represented with a movzx
680 // instruction, prefer that.
681 // TODO: We could shrink the load and fold if it is non-volatile.
682 if (U->getOpcode() == ISD::AND &&
683 (Imm->getAPIntValue() == UINT8_MAX ||
684 Imm->getAPIntValue() == UINT16_MAX ||
685 Imm->getAPIntValue() == UINT32_MAX))
688 // ADD/SUB with can negate the immediate and use the opposite operation
689 // to fit 128 into a sign extended 8 bit immediate.
690 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
691 (-Imm->getAPIntValue()).isSignedIntN(8))
694 if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) &&
695 (-Imm->getAPIntValue()).isSignedIntN(8) &&
696 hasNoCarryFlagUses(SDValue(U, 1)))
700 // If the other operand is a TLS address, we should fold it instead.
703 // leal i@NTPOFF(%eax), %eax
705 // movl $i@NTPOFF, %eax
707 // if the block also has an access to a second TLS address this will save
709 // FIXME: This is probably also true for non-TLS addresses.
710 if (Op1.getOpcode() == X86ISD::Wrapper) {
711 SDValue Val = Op1.getOperand(0);
712 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
716 // Don't fold load if this matches the BTS/BTR/BTC patterns.
717 // BTS: (or X, (shl 1, n))
718 // BTR: (and X, (rotl -2, n))
719 // BTC: (xor X, (shl 1, n))
720 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
721 if (U->getOperand(0).getOpcode() == ISD::SHL &&
722 isOneConstant(U->getOperand(0).getOperand(0)))
725 if (U->getOperand(1).getOpcode() == ISD::SHL &&
726 isOneConstant(U->getOperand(1).getOperand(0)))
729 if (U->getOpcode() == ISD::AND) {
730 SDValue U0 = U->getOperand(0);
731 SDValue U1 = U->getOperand(1);
732 if (U0.getOpcode() == ISD::ROTL) {
733 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
734 if (C && C->getSExtValue() == -2)
738 if (U1.getOpcode() == ISD::ROTL) {
739 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
740 if (C && C->getSExtValue() == -2)
750 // Don't fold a load into a shift by immediate. The BMI2 instructions
751 // support folding a load, but not an immediate. The legacy instructions
752 // support folding an immediate, but can't fold a load. Folding an
753 // immediate is preferable to folding a load.
754 if (isa<ConstantSDNode>(U->getOperand(1)))
761 // Prevent folding a load if this can implemented with an insert_subreg or
762 // a move that implicitly zeroes.
763 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
764 isNullConstant(Root->getOperand(2)) &&
765 (Root->getOperand(0).isUndef() ||
766 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
772 // Indicates it is profitable to form an AVX512 masked operation. Returning
773 // false will favor a masked register-register masked move or vblendm and the
774 // operation will be selected separately.
775 bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const {
777 (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) &&
778 "Unexpected opcode!");
780 // If the operation has additional users, the operation will be duplicated.
781 // Check the use count to prevent that.
782 // FIXME: Are there cheap opcodes we might want to duplicate?
783 return N->getOperand(1).hasOneUse();
786 /// Replace the original chain operand of the call with
787 /// load's chain operand and move load below the call's chain operand.
788 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
789 SDValue Call, SDValue OrigChain) {
790 SmallVector<SDValue, 8> Ops;
791 SDValue Chain = OrigChain.getOperand(0);
792 if (Chain.getNode() == Load.getNode())
793 Ops.push_back(Load.getOperand(0));
795 assert(Chain.getOpcode() == ISD::TokenFactor &&
796 "Unexpected chain operand");
797 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
798 if (Chain.getOperand(i).getNode() == Load.getNode())
799 Ops.push_back(Load.getOperand(0));
801 Ops.push_back(Chain.getOperand(i));
803 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
805 Ops.push_back(NewChain);
807 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
808 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
809 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
810 Load.getOperand(1), Load.getOperand(2));
813 Ops.push_back(SDValue(Load.getNode(), 1));
814 Ops.append(Call->op_begin() + 1, Call->op_end());
815 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
818 /// Return true if call address is a load and it can be
819 /// moved below CALLSEQ_START and the chains leading up to the call.
820 /// Return the CALLSEQ_START by reference as a second output.
821 /// In the case of a tail call, there isn't a callseq node between the call
822 /// chain and the load.
823 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
824 // The transformation is somewhat dangerous if the call's chain was glued to
825 // the call. After MoveBelowOrigChain the load is moved between the call and
826 // the chain, this can create a cycle if the load is not folded. So it is
827 // *really* important that we are sure the load will be folded.
828 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
830 auto *LD = dyn_cast<LoadSDNode>(Callee.getNode());
833 LD->getAddressingMode() != ISD::UNINDEXED ||
834 LD->getExtensionType() != ISD::NON_EXTLOAD)
837 // Now let's find the callseq_start.
838 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
839 if (!Chain.hasOneUse())
841 Chain = Chain.getOperand(0);
844 if (!Chain.getNumOperands())
846 // Since we are not checking for AA here, conservatively abort if the chain
847 // writes to memory. It's not safe to move the callee (a load) across a store.
848 if (isa<MemSDNode>(Chain.getNode()) &&
849 cast<MemSDNode>(Chain.getNode())->writeMem())
851 if (Chain.getOperand(0).getNode() == Callee.getNode())
853 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
854 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
855 Callee.getValue(1).hasOneUse())
860 static bool isEndbrImm64(uint64_t Imm) {
861 // There may be some other prefix bytes between 0xF3 and 0x0F1EFA.
862 // i.g: 0xF3660F1EFA, 0xF3670F1EFA
863 if ((Imm & 0x00FFFFFF) != 0x0F1EFA)
866 uint8_t OptionalPrefixBytes [] = {0x26, 0x2e, 0x36, 0x3e, 0x64,
867 0x65, 0x66, 0x67, 0xf0, 0xf2};
868 int i = 24; // 24bit 0x0F1EFA has matched
870 uint8_t Byte = (Imm >> i) & 0xFF;
873 if (!llvm::is_contained(OptionalPrefixBytes, Byte))
881 void X86DAGToDAGISel::PreprocessISelDAG() {
882 bool MadeChange = false;
883 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
884 E = CurDAG->allnodes_end(); I != E; ) {
885 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
887 // This is for CET enhancement.
889 // ENDBR32 and ENDBR64 have specific opcodes:
890 // ENDBR32: F3 0F 1E FB
891 // ENDBR64: F3 0F 1E FA
892 // And we want that attackers won’t find unintended ENDBR32/64
893 // opcode matches in the binary
894 // Here’s an example:
895 // If the compiler had to generate asm for the following code:
897 // it could, for example, generate:
898 // mov 0xF30F1EFA, dword ptr[a]
899 // In such a case, the binary would include a gadget that starts
900 // with a fake ENDBR64 opcode. Therefore, we split such generation
901 // into multiple operations, let it not shows in the binary
902 if (N->getOpcode() == ISD::Constant) {
903 MVT VT = N->getSimpleValueType(0);
904 int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
905 int32_t EndbrImm = Subtarget->is64Bit() ? 0xF30F1EFA : 0xF30F1EFB;
906 if (Imm == EndbrImm || isEndbrImm64(Imm)) {
907 // Check that the cf-protection-branch is enabled.
908 Metadata *CFProtectionBranch =
909 MF->getMMI().getModule()->getModuleFlag("cf-protection-branch");
910 if (CFProtectionBranch || IndirectBranchTracking) {
912 SDValue Complement = CurDAG->getConstant(~Imm, dl, VT, false, true);
913 Complement = CurDAG->getNOT(dl, Complement, VT);
915 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Complement);
923 // If this is a target specific AND node with no flag usages, turn it back
924 // into ISD::AND to enable test instruction matching.
925 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
926 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
927 N->getOperand(0), N->getOperand(1));
929 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
935 // Convert vector increment or decrement to sub/add with an all-ones
937 // add X, <1, 1...> --> sub X, <-1, -1...>
938 // sub X, <1, 1...> --> add X, <-1, -1...>
939 // The all-ones vector constant can be materialized using a pcmpeq
940 // instruction that is commonly recognized as an idiom (has no register
941 // dependency), so that's better/smaller than loading a splat 1 constant.
943 // But don't do this if it would inhibit a potentially profitable load
944 // folding opportunity for the other operand. That only occurs with the
946 // (1) The other operand (op0) is load foldable.
947 // (2) The op is an add (otherwise, we are *creating* an add and can still
948 // load fold the other op).
949 // (3) The target has AVX (otherwise, we have a destructive add and can't
950 // load fold the other op without killing the constant op).
951 // (4) The constant 1 vector has multiple uses (so it is profitable to load
952 // into a register anyway).
953 auto mayPreventLoadFold = [&]() {
954 return X86::mayFoldLoad(N->getOperand(0), *Subtarget) &&
955 N->getOpcode() == ISD::ADD && Subtarget->hasAVX() &&
956 !N->getOperand(1).hasOneUse();
958 if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
959 N->getSimpleValueType(0).isVector() && !mayPreventLoadFold()) {
961 if (X86::isConstantSplat(N->getOperand(1), SplatVal) &&
965 MVT VT = N->getSimpleValueType(0);
966 unsigned NumElts = VT.getSizeInBits() / 32;
968 CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts));
969 AllOnes = CurDAG->getBitcast(VT, AllOnes);
971 unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
973 CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes);
975 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
982 switch (N->getOpcode()) {
983 case X86ISD::VBROADCAST: {
984 MVT VT = N->getSimpleValueType(0);
985 // Emulate v32i16/v64i8 broadcast without BWI.
986 if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
987 MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
989 SDValue NarrowBCast =
990 CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0));
992 CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
993 NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
994 unsigned Index = VT == MVT::v32i16 ? 16 : 32;
995 Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
996 CurDAG->getIntPtrConstant(Index, dl));
999 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1007 case X86ISD::VBROADCAST_LOAD: {
1008 MVT VT = N->getSimpleValueType(0);
1009 // Emulate v32i16/v64i8 broadcast without BWI.
1010 if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
1011 MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
1012 auto *MemNode = cast<MemSDNode>(N);
1014 SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other);
1015 SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()};
1016 SDValue NarrowBCast = CurDAG->getMemIntrinsicNode(
1017 X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(),
1018 MemNode->getMemOperand());
1020 CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
1021 NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
1022 unsigned Index = VT == MVT::v32i16 ? 16 : 32;
1023 Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
1024 CurDAG->getIntPtrConstant(Index, dl));
1027 SDValue To[] = {Res, NarrowBCast.getValue(1)};
1028 CurDAG->ReplaceAllUsesWith(N, To);
1036 case ISD::VSELECT: {
1037 // Replace VSELECT with non-mask conditions with with BLENDV/VPTERNLOG.
1038 EVT EleVT = N->getOperand(0).getValueType().getVectorElementType();
1039 if (EleVT == MVT::i1)
1042 assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
1043 assert(N->getValueType(0).getVectorElementType() != MVT::i16 &&
1044 "We can't replace VSELECT with BLENDV in vXi16!");
1046 if (Subtarget->hasVLX() && CurDAG->ComputeNumSignBits(N->getOperand(0)) ==
1047 EleVT.getSizeInBits()) {
1048 R = CurDAG->getNode(X86ISD::VPTERNLOG, SDLoc(N), N->getValueType(0),
1049 N->getOperand(0), N->getOperand(1), N->getOperand(2),
1050 CurDAG->getTargetConstant(0xCA, SDLoc(N), MVT::i8));
1052 R = CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
1053 N->getOperand(0), N->getOperand(1),
1057 CurDAG->ReplaceAllUsesWith(N, R.getNode());
1063 case ISD::STRICT_FP_ROUND:
1064 case ISD::FP_TO_SINT:
1065 case ISD::FP_TO_UINT:
1066 case ISD::STRICT_FP_TO_SINT:
1067 case ISD::STRICT_FP_TO_UINT: {
1068 // Replace vector fp_to_s/uint with their X86 specific equivalent so we
1069 // don't need 2 sets of patterns.
1070 if (!N->getSimpleValueType(0).isVector())
1074 switch (N->getOpcode()) {
1075 default: llvm_unreachable("Unexpected opcode!");
1076 case ISD::FP_ROUND: NewOpc = X86ISD::VFPROUND; break;
1077 case ISD::STRICT_FP_ROUND: NewOpc = X86ISD::STRICT_VFPROUND; break;
1078 case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break;
1079 case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
1080 case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break;
1081 case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
1084 if (N->isStrictFPOpcode())
1086 CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other},
1087 {N->getOperand(0), N->getOperand(1)});
1090 CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1093 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1101 // Replace vector shifts with their X86 specific equivalent so we don't
1102 // need 2 sets of patterns.
1103 if (!N->getValueType(0).isVector())
1107 switch (N->getOpcode()) {
1108 default: llvm_unreachable("Unexpected opcode!");
1109 case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
1110 case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
1111 case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
1113 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1114 N->getOperand(0), N->getOperand(1));
1116 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1121 case ISD::ANY_EXTEND:
1122 case ISD::ANY_EXTEND_VECTOR_INREG: {
1123 // Replace vector any extend with the zero extend equivalents so we don't
1124 // need 2 sets of patterns. Ignore vXi1 extensions.
1125 if (!N->getValueType(0).isVector())
1129 if (N->getOperand(0).getScalarValueSizeInBits() == 1) {
1130 assert(N->getOpcode() == ISD::ANY_EXTEND &&
1131 "Unexpected opcode for mask vector!");
1132 NewOpc = ISD::SIGN_EXTEND;
1134 NewOpc = N->getOpcode() == ISD::ANY_EXTEND
1136 : ISD::ZERO_EXTEND_VECTOR_INREG;
1139 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1142 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1148 case ISD::STRICT_FCEIL:
1150 case ISD::STRICT_FFLOOR:
1152 case ISD::STRICT_FTRUNC:
1153 case ISD::FROUNDEVEN:
1154 case ISD::STRICT_FROUNDEVEN:
1155 case ISD::FNEARBYINT:
1156 case ISD::STRICT_FNEARBYINT:
1158 case ISD::STRICT_FRINT: {
1159 // Replace fp rounding with their X86 specific equivalent so we don't
1160 // need 2 sets of patterns.
1162 switch (N->getOpcode()) {
1163 default: llvm_unreachable("Unexpected opcode!");
1164 case ISD::STRICT_FCEIL:
1165 case ISD::FCEIL: Imm = 0xA; break;
1166 case ISD::STRICT_FFLOOR:
1167 case ISD::FFLOOR: Imm = 0x9; break;
1168 case ISD::STRICT_FTRUNC:
1169 case ISD::FTRUNC: Imm = 0xB; break;
1170 case ISD::STRICT_FROUNDEVEN:
1171 case ISD::FROUNDEVEN: Imm = 0x8; break;
1172 case ISD::STRICT_FNEARBYINT:
1173 case ISD::FNEARBYINT: Imm = 0xC; break;
1174 case ISD::STRICT_FRINT:
1175 case ISD::FRINT: Imm = 0x4; break;
1178 bool IsStrict = N->isStrictFPOpcode();
1181 Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl,
1182 {N->getValueType(0), MVT::Other},
1183 {N->getOperand(0), N->getOperand(1),
1184 CurDAG->getTargetConstant(Imm, dl, MVT::i32)});
1186 Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0),
1188 CurDAG->getTargetConstant(Imm, dl, MVT::i32));
1190 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1198 case X86ISD::FXOR: {
1199 // Widen scalar fp logic ops to vector to reduce isel patterns.
1200 // FIXME: Can we do this during lowering/combine.
1201 MVT VT = N->getSimpleValueType(0);
1202 if (VT.isVector() || VT == MVT::f128)
1205 MVT VecVT = VT == MVT::f64 ? MVT::v2f64
1206 : VT == MVT::f32 ? MVT::v4f32
1210 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1212 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1216 if (Subtarget->hasSSE2()) {
1217 EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
1218 Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
1219 Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
1221 switch (N->getOpcode()) {
1222 default: llvm_unreachable("Unexpected opcode!");
1223 case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
1224 case X86ISD::FAND: Opc = ISD::AND; break;
1225 case X86ISD::FOR: Opc = ISD::OR; break;
1226 case X86ISD::FXOR: Opc = ISD::XOR; break;
1228 Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
1229 Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
1231 Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
1233 Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
1234 CurDAG->getIntPtrConstant(0, dl));
1236 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1243 if (OptLevel != CodeGenOpt::None &&
1244 // Only do this when the target can fold the load into the call or
1246 !Subtarget->useIndirectThunkCalls() &&
1247 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
1248 (N->getOpcode() == X86ISD::TC_RETURN &&
1249 (Subtarget->is64Bit() ||
1250 !getTargetMachine().isPositionIndependent())))) {
1251 /// Also try moving call address load from outside callseq_start to just
1252 /// before the call to allow it to be folded.
1262 ///[CALLSEQ_START] |
1270 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
1271 SDValue Chain = N->getOperand(0);
1272 SDValue Load = N->getOperand(1);
1273 if (!isCalleeLoad(Load, Chain, HasCallSeq))
1275 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
1281 // Lower fpround and fpextend nodes that target the FP stack to be store and
1282 // load to the stack. This is a gross hack. We would like to simply mark
1283 // these as being illegal, but when we do that, legalize produces these when
1284 // it expands calls, then expands these in the same legalize pass. We would
1285 // like dag combine to be able to hack on these between the call expansion
1286 // and the node legalization. As such this pass basically does "really
1287 // late" legalization of these inline with the X86 isel pass.
1288 // FIXME: This should only happen when not compiled with -O0.
1289 switch (N->getOpcode()) {
1292 case ISD::FP_EXTEND:
1294 MVT SrcVT = N->getOperand(0).getSimpleValueType();
1295 MVT DstVT = N->getSimpleValueType(0);
1297 // If any of the sources are vectors, no fp stack involved.
1298 if (SrcVT.isVector() || DstVT.isVector())
1301 // If the source and destination are SSE registers, then this is a legal
1302 // conversion that should not be lowered.
1303 const X86TargetLowering *X86Lowering =
1304 static_cast<const X86TargetLowering *>(TLI);
1305 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1306 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1307 if (SrcIsSSE && DstIsSSE)
1310 if (!SrcIsSSE && !DstIsSSE) {
1311 // If this is an FPStack extension, it is a noop.
1312 if (N->getOpcode() == ISD::FP_EXTEND)
1314 // If this is a value-preserving FPStack truncation, it is a noop.
1315 if (N->getConstantOperandVal(1))
1319 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1320 // FPStack has extload and truncstore. SSE can fold direct loads into other
1321 // operations. Based on this, decide what we want to do.
1322 MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT;
1323 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1324 int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1325 MachinePointerInfo MPI =
1326 MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1329 // FIXME: optimize the case where the src/dest is a load or store?
1331 SDValue Store = CurDAG->getTruncStore(
1332 CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT);
1333 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store,
1334 MemTmp, MPI, MemVT);
1336 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1337 // extload we created. This will cause general havok on the dag because
1338 // anything below the conversion could be folded into other existing nodes.
1339 // To avoid invalidating 'I', back it up to the convert node.
1341 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1345 //The sequence of events for lowering STRICT_FP versions of these nodes requires
1346 //dealing with the chain differently, as there is already a preexisting chain.
1347 case ISD::STRICT_FP_ROUND:
1348 case ISD::STRICT_FP_EXTEND:
1350 MVT SrcVT = N->getOperand(1).getSimpleValueType();
1351 MVT DstVT = N->getSimpleValueType(0);
1353 // If any of the sources are vectors, no fp stack involved.
1354 if (SrcVT.isVector() || DstVT.isVector())
1357 // If the source and destination are SSE registers, then this is a legal
1358 // conversion that should not be lowered.
1359 const X86TargetLowering *X86Lowering =
1360 static_cast<const X86TargetLowering *>(TLI);
1361 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1362 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1363 if (SrcIsSSE && DstIsSSE)
1366 if (!SrcIsSSE && !DstIsSSE) {
1367 // If this is an FPStack extension, it is a noop.
1368 if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1370 // If this is a value-preserving FPStack truncation, it is a noop.
1371 if (N->getConstantOperandVal(2))
1375 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1376 // FPStack has extload and truncstore. SSE can fold direct loads into other
1377 // operations. Based on this, decide what we want to do.
1378 MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT;
1379 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1380 int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1381 MachinePointerInfo MPI =
1382 MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1385 // FIXME: optimize the case where the src/dest is a load or store?
1387 //Since the operation is StrictFP, use the preexisting chain.
1388 SDValue Store, Result;
1390 SDVTList VTs = CurDAG->getVTList(MVT::Other);
1391 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp};
1392 Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT,
1393 MPI, /*Align*/ std::nullopt,
1394 MachineMemOperand::MOStore);
1395 if (N->getFlags().hasNoFPExcept()) {
1396 SDNodeFlags Flags = Store->getFlags();
1397 Flags.setNoFPExcept(true);
1398 Store->setFlags(Flags);
1401 assert(SrcVT == MemVT && "Unexpected VT!");
1402 Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp,
1407 SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other);
1408 SDValue Ops[] = {Store, MemTmp};
1409 Result = CurDAG->getMemIntrinsicNode(
1410 X86ISD::FLD, dl, VTs, Ops, MemVT, MPI,
1411 /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
1412 if (N->getFlags().hasNoFPExcept()) {
1413 SDNodeFlags Flags = Result->getFlags();
1414 Flags.setNoFPExcept(true);
1415 Result->setFlags(Flags);
1418 assert(DstVT == MemVT && "Unexpected VT!");
1419 Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI);
1422 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1423 // extload we created. This will cause general havok on the dag because
1424 // anything below the conversion could be folded into other existing nodes.
1425 // To avoid invalidating 'I', back it up to the convert node.
1427 CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1433 // Now that we did that, the node is dead. Increment the iterator to the
1434 // next node to process, then delete N.
1439 // Remove any dead nodes that may have been left behind.
1441 CurDAG->RemoveDeadNodes();
1444 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1445 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1446 unsigned Opc = N->getMachineOpcode();
1447 if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1448 Opc != X86::MOVSX64rr8)
1451 SDValue N0 = N->getOperand(0);
1453 // We need to be extracting the lower bit of an extend.
1454 if (!N0.isMachineOpcode() ||
1455 N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1456 N0.getConstantOperandVal(1) != X86::sub_8bit)
1459 // We're looking for either a movsx or movzx to match the original opcode.
1460 unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1461 : X86::MOVSX32rr8_NOREX;
1462 SDValue N00 = N0.getOperand(0);
1463 if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1466 if (Opc == X86::MOVSX64rr8) {
1467 // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1469 MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1471 ReplaceUses(N, Extend);
1473 // Ok we can drop this extend and just use the original extend.
1474 ReplaceUses(N, N00.getNode());
1480 void X86DAGToDAGISel::PostprocessISelDAG() {
1481 // Skip peepholes at -O0.
1482 if (TM.getOptLevel() == CodeGenOpt::None)
1485 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1487 bool MadeChange = false;
1488 while (Position != CurDAG->allnodes_begin()) {
1489 SDNode *N = &*--Position;
1490 // Skip dead nodes and any non-machine opcodes.
1491 if (N->use_empty() || !N->isMachineOpcode())
1494 if (tryOptimizeRem8Extend(N)) {
1499 // Look for a TESTrr+ANDrr pattern where both operands of the test are
1500 // the same. Rewrite to remove the AND.
1501 unsigned Opc = N->getMachineOpcode();
1502 if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1503 Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1504 N->getOperand(0) == N->getOperand(1) &&
1505 N->getOperand(0)->hasNUsesOfValue(2, N->getOperand(0).getResNo()) &&
1506 N->getOperand(0).isMachineOpcode()) {
1507 SDValue And = N->getOperand(0);
1508 unsigned N0Opc = And.getMachineOpcode();
1509 if ((N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1510 N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) &&
1511 !And->hasAnyUseOfValue(1)) {
1512 MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1516 ReplaceUses(N, Test);
1520 if ((N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1521 N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) &&
1522 !And->hasAnyUseOfValue(1)) {
1525 case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1526 case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1527 case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1528 case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1531 // Need to swap the memory and register operand.
1532 SDValue Ops[] = { And.getOperand(1),
1538 And.getOperand(6) /* Chain */ };
1539 MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1540 MVT::i32, MVT::Other, Ops);
1541 CurDAG->setNodeMemRefs(
1542 Test, cast<MachineSDNode>(And.getNode())->memoperands());
1543 ReplaceUses(And.getValue(2), SDValue(Test, 1));
1544 ReplaceUses(SDValue(N, 0), SDValue(Test, 0));
1550 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1551 // used. We're doing this late so we can prefer to fold the AND into masked
1552 // comparisons. Doing that can be better for the live range of the mask
1554 if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1555 Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1556 N->getOperand(0) == N->getOperand(1) &&
1557 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1558 N->getOperand(0).isMachineOpcode() &&
1559 onlyUsesZeroFlag(SDValue(N, 0))) {
1560 SDValue And = N->getOperand(0);
1561 unsigned N0Opc = And.getMachineOpcode();
1562 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1563 // KAND instructions and KTEST use the same ISA feature.
1564 if (N0Opc == X86::KANDBrr ||
1565 (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1566 N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1569 default: llvm_unreachable("Unexpected opcode!");
1570 case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1571 case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1572 case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1573 case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1575 MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1579 ReplaceUses(N, KTest);
1585 // Attempt to remove vectors moves that were inserted to zero upper bits.
1586 if (Opc != TargetOpcode::SUBREG_TO_REG)
1589 unsigned SubRegIdx = N->getConstantOperandVal(2);
1590 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1593 SDValue Move = N->getOperand(1);
1594 if (!Move.isMachineOpcode())
1597 // Make sure its one of the move opcodes we recognize.
1598 switch (Move.getMachineOpcode()) {
1601 case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1602 case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1603 case X86::VMOVDQArr: case X86::VMOVDQUrr:
1604 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1605 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1606 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1607 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1608 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1609 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1610 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1611 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1612 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1613 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1614 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1618 SDValue In = Move.getOperand(0);
1619 if (!In.isMachineOpcode() ||
1620 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1623 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1624 // the SHA instructions which use a legacy encoding.
1625 uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1626 if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1627 (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1628 (TSFlags & X86II::EncodingMask) != X86II::XOP)
1631 // Producing instruction is another vector instruction. We can drop the
1633 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1638 CurDAG->RemoveDeadNodes();
1642 /// Emit any code that needs to be executed only in the main function.
1643 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1644 if (Subtarget->isTargetCygMing()) {
1645 TargetLowering::ArgListTy Args;
1646 auto &DL = CurDAG->getDataLayout();
1648 TargetLowering::CallLoweringInfo CLI(*CurDAG);
1649 CLI.setChain(CurDAG->getRoot())
1650 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1651 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1653 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1654 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1655 CurDAG->setRoot(Result.second);
1659 void X86DAGToDAGISel::emitFunctionEntryCode() {
1660 // If this is main, emit special code for main.
1661 const Function &F = MF->getFunction();
1662 if (F.hasExternalLinkage() && F.getName() == "main")
1663 emitSpecialCodeForMain();
1666 static bool isDispSafeForFrameIndex(int64_t Val) {
1667 // On 64-bit platforms, we can run into an issue where a frame index
1668 // includes a displacement that, when added to the explicit displacement,
1669 // will overflow the displacement field. Assuming that the frame index
1670 // displacement fits into a 31-bit integer (which is only slightly more
1671 // aggressive than the current fundamental assumption that it fits into
1672 // a 32-bit integer), a 31-bit disp should always be safe.
1673 return isInt<31>(Val);
1676 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1677 X86ISelAddressMode &AM) {
1678 // We may have already matched a displacement and the caller just added the
1679 // symbolic displacement. So we still need to do the checks even if Offset
1682 int64_t Val = AM.Disp + Offset;
1684 // Cannot combine ExternalSymbol displacements with integer offsets.
1685 if (Val != 0 && (AM.ES || AM.MCSym))
1688 CodeModel::Model M = TM.getCodeModel();
1689 if (Subtarget->is64Bit()) {
1691 !X86::isOffsetSuitableForCodeModel(Val, M,
1692 AM.hasSymbolicDisplacement()))
1694 // In addition to the checks required for a register base, check that
1695 // we do not try to use an unsafe Disp with a frame index.
1696 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1697 !isDispSafeForFrameIndex(Val))
1705 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
1706 bool AllowSegmentRegForX32) {
1707 SDValue Address = N->getOperand(1);
1709 // load gs:0 -> GS segment register.
1710 // load fs:0 -> FS segment register.
1712 // This optimization is generally valid because the GNU TLS model defines that
1713 // gs:0 (or fs:0 on X86-64) contains its own address. However, for X86-64 mode
1714 // with 32-bit registers, as we get in ILP32 mode, those registers are first
1715 // zero-extended to 64 bits and then added it to the base address, which gives
1716 // unwanted results when the register holds a negative value.
1717 // For more information see http://people.redhat.com/drepper/tls.pdf
1718 if (isNullConstant(Address) && AM.Segment.getNode() == nullptr &&
1719 !IndirectTlsSegRefs &&
1720 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1721 Subtarget->isTargetFuchsia())) {
1722 if (Subtarget->isTarget64BitILP32() && !AllowSegmentRegForX32)
1724 switch (N->getPointerInfo().getAddrSpace()) {
1726 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1729 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1731 // Address space X86AS::SS is not handled here, because it is not used to
1732 // address TLS areas.
1739 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1740 /// mode. These wrap things that will resolve down into a symbol reference.
1741 /// If no match is possible, this returns true, otherwise it returns false.
1742 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1743 // If the addressing mode already has a symbol as the displacement, we can
1744 // never match another symbol.
1745 if (AM.hasSymbolicDisplacement())
1748 bool IsRIPRelTLS = false;
1749 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1751 SDValue Val = N.getOperand(0);
1752 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1756 // We can't use an addressing mode in the 64-bit large code model.
1757 // Global TLS addressing is an exception. In the medium code model,
1758 // we use can use a mode when RIP wrappers are present.
1759 // That signifies access to globals that are known to be "near",
1760 // such as the GOT itself.
1761 CodeModel::Model M = TM.getCodeModel();
1762 if (Subtarget->is64Bit() &&
1763 ((M == CodeModel::Large && !IsRIPRelTLS) ||
1764 (M == CodeModel::Medium && !IsRIPRel)))
1767 // Base and index reg must be 0 in order to use %rip as base.
1768 if (IsRIPRel && AM.hasBaseOrIndexReg())
1771 // Make a local copy in case we can't do this fold.
1772 X86ISelAddressMode Backup = AM;
1775 SDValue N0 = N.getOperand(0);
1776 if (auto *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1777 AM.GV = G->getGlobal();
1778 AM.SymbolFlags = G->getTargetFlags();
1779 Offset = G->getOffset();
1780 } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1781 AM.CP = CP->getConstVal();
1782 AM.Alignment = CP->getAlign();
1783 AM.SymbolFlags = CP->getTargetFlags();
1784 Offset = CP->getOffset();
1785 } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1786 AM.ES = S->getSymbol();
1787 AM.SymbolFlags = S->getTargetFlags();
1788 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1789 AM.MCSym = S->getMCSymbol();
1790 } else if (auto *J = dyn_cast<JumpTableSDNode>(N0)) {
1791 AM.JT = J->getIndex();
1792 AM.SymbolFlags = J->getTargetFlags();
1793 } else if (auto *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1794 AM.BlockAddr = BA->getBlockAddress();
1795 AM.SymbolFlags = BA->getTargetFlags();
1796 Offset = BA->getOffset();
1798 llvm_unreachable("Unhandled symbol reference node.");
1800 if (foldOffsetIntoAddress(Offset, AM)) {
1806 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1808 // Commit the changes now that we know this fold is safe.
1812 /// Add the specified node to the specified addressing mode, returning true if
1813 /// it cannot be done. This just pattern matches for the addressing mode.
1814 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1815 if (matchAddressRecursively(N, AM, 0))
1818 // Post-processing: Make a second attempt to fold a load, if we now know
1819 // that there will not be any other register. This is only performed for
1820 // 64-bit ILP32 mode since 32-bit mode and 64-bit LP64 mode will have folded
1821 // any foldable load the first time.
1822 if (Subtarget->isTarget64BitILP32() &&
1823 AM.BaseType == X86ISelAddressMode::RegBase &&
1824 AM.Base_Reg.getNode() != nullptr && AM.IndexReg.getNode() == nullptr) {
1825 SDValue Save_Base_Reg = AM.Base_Reg;
1826 if (auto *LoadN = dyn_cast<LoadSDNode>(Save_Base_Reg)) {
1827 AM.Base_Reg = SDValue();
1828 if (matchLoadInAddress(LoadN, AM, /*AllowSegmentRegForX32=*/true))
1829 AM.Base_Reg = Save_Base_Reg;
1833 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1834 // a smaller encoding and avoids a scaled-index.
1835 if (AM.Scale == 2 &&
1836 AM.BaseType == X86ISelAddressMode::RegBase &&
1837 AM.Base_Reg.getNode() == nullptr) {
1838 AM.Base_Reg = AM.IndexReg;
1842 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1843 // because it has a smaller encoding.
1844 // TODO: Which other code models can use this?
1845 switch (TM.getCodeModel()) {
1847 case CodeModel::Small:
1848 case CodeModel::Kernel:
1849 if (Subtarget->is64Bit() &&
1851 AM.BaseType == X86ISelAddressMode::RegBase &&
1852 AM.Base_Reg.getNode() == nullptr &&
1853 AM.IndexReg.getNode() == nullptr &&
1854 AM.SymbolFlags == X86II::MO_NO_FLAG &&
1855 AM.hasSymbolicDisplacement())
1856 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1863 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1865 // Add an artificial use to this node so that we can keep track of
1866 // it if it gets CSE'd with a different node.
1867 HandleSDNode Handle(N);
1869 X86ISelAddressMode Backup = AM;
1870 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1871 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1875 // Try again after commutating the operands.
1876 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
1878 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
1882 // If we couldn't fold both operands into the address at the same time,
1883 // see if we can just put each operand into a register and fold at least
1885 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1886 !AM.Base_Reg.getNode() &&
1887 !AM.IndexReg.getNode()) {
1888 N = Handle.getValue();
1889 AM.Base_Reg = N.getOperand(0);
1890 AM.IndexReg = N.getOperand(1);
1894 N = Handle.getValue();
1898 // Insert a node into the DAG at least before the Pos node's position. This
1899 // will reposition the node as needed, and will assign it a node ID that is <=
1900 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1901 // IDs! The selection DAG must no longer depend on their uniqueness when this
1903 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1904 if (N->getNodeId() == -1 ||
1905 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1906 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1907 DAG.RepositionNode(Pos->getIterator(), N.getNode());
1908 // Mark Node as invalid for pruning as after this it may be a successor to a
1909 // selected node but otherwise be in the same position of Pos.
1910 // Conservatively mark it with the same -abs(Id) to assure node id
1911 // invariant is preserved.
1912 N->setNodeId(Pos->getNodeId());
1913 SelectionDAGISel::InvalidateNodeId(N.getNode());
1917 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1918 // safe. This allows us to convert the shift and and into an h-register
1919 // extract and a scaled index. Returns false if the simplification is
1921 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1923 SDValue Shift, SDValue X,
1924 X86ISelAddressMode &AM) {
1925 if (Shift.getOpcode() != ISD::SRL ||
1926 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1930 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1931 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1932 Mask != (0xffu << ScaleLog))
1935 MVT XVT = X.getSimpleValueType();
1936 MVT VT = N.getSimpleValueType();
1938 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1939 SDValue NewMask = DAG.getConstant(0xff, DL, XVT);
1940 SDValue Srl = DAG.getNode(ISD::SRL, DL, XVT, X, Eight);
1941 SDValue And = DAG.getNode(ISD::AND, DL, XVT, Srl, NewMask);
1942 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1943 SDValue Ext = DAG.getZExtOrTrunc(And, DL, VT);
1944 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Ext, ShlCount);
1946 // Insert the new nodes into the topological ordering. We must do this in
1947 // a valid topological ordering as nothing is going to go back and re-sort
1948 // these nodes. We continually insert before 'N' in sequence as this is
1949 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1950 // hierarchy left to express.
1951 insertDAGNode(DAG, N, Eight);
1952 insertDAGNode(DAG, N, Srl);
1953 insertDAGNode(DAG, N, NewMask);
1954 insertDAGNode(DAG, N, And);
1955 insertDAGNode(DAG, N, ShlCount);
1957 insertDAGNode(DAG, N, Ext);
1958 insertDAGNode(DAG, N, Shl);
1959 DAG.ReplaceAllUsesWith(N, Shl);
1960 DAG.RemoveDeadNode(N.getNode());
1962 AM.Scale = (1 << ScaleLog);
1966 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1967 // allows us to fold the shift into this addressing mode. Returns false if the
1968 // transform succeeded.
1969 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1970 X86ISelAddressMode &AM) {
1971 SDValue Shift = N.getOperand(0);
1973 // Use a signed mask so that shifting right will insert sign bits. These
1974 // bits will be removed when we shift the result left so it doesn't matter
1975 // what we use. This might allow a smaller immediate encoding.
1976 int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1978 // If we have an any_extend feeding the AND, look through it to see if there
1979 // is a shift behind it. But only if the AND doesn't use the extended bits.
1980 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1981 bool FoundAnyExtend = false;
1982 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1983 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1985 FoundAnyExtend = true;
1986 Shift = Shift.getOperand(0);
1989 if (Shift.getOpcode() != ISD::SHL ||
1990 !isa<ConstantSDNode>(Shift.getOperand(1)))
1993 SDValue X = Shift.getOperand(0);
1995 // Not likely to be profitable if either the AND or SHIFT node has more
1996 // than one use (unless all uses are for address computation). Besides,
1997 // isel mechanism requires their node ids to be reused.
1998 if (!N.hasOneUse() || !Shift.hasOneUse())
2001 // Verify that the shift amount is something we can fold.
2002 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2003 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
2006 MVT VT = N.getSimpleValueType();
2008 if (FoundAnyExtend) {
2009 SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
2010 insertDAGNode(DAG, N, NewX);
2014 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
2015 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
2016 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
2018 // Insert the new nodes into the topological ordering. We must do this in
2019 // a valid topological ordering as nothing is going to go back and re-sort
2020 // these nodes. We continually insert before 'N' in sequence as this is
2021 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2022 // hierarchy left to express.
2023 insertDAGNode(DAG, N, NewMask);
2024 insertDAGNode(DAG, N, NewAnd);
2025 insertDAGNode(DAG, N, NewShift);
2026 DAG.ReplaceAllUsesWith(N, NewShift);
2027 DAG.RemoveDeadNode(N.getNode());
2029 AM.Scale = 1 << ShiftAmt;
2030 AM.IndexReg = NewAnd;
2034 // Implement some heroics to detect shifts of masked values where the mask can
2035 // be replaced by extending the shift and undoing that in the addressing mode
2036 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
2037 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
2038 // the addressing mode. This results in code such as:
2040 // int f(short *y, int *lookup_table) {
2042 // return *y + lookup_table[*y >> 11];
2046 // movzwl (%rdi), %eax
2049 // addl (%rsi,%rcx,4), %eax
2052 // movzwl (%rdi), %eax
2056 // addl (%rsi,%rcx), %eax
2058 // Note that this function assumes the mask is provided as a mask *after* the
2059 // value is shifted. The input chain may or may not match that, but computing
2060 // such a mask is trivial.
2061 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
2063 SDValue Shift, SDValue X,
2064 X86ISelAddressMode &AM) {
2065 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
2066 !isa<ConstantSDNode>(Shift.getOperand(1)))
2069 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2070 unsigned MaskLZ = llvm::countl_zero(Mask);
2071 unsigned MaskTZ = llvm::countr_zero(Mask);
2073 // The amount of shift we're trying to fit into the addressing mode is taken
2074 // from the trailing zeros of the mask.
2075 unsigned AMShiftAmt = MaskTZ;
2077 // There is nothing we can do here unless the mask is removing some bits.
2078 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2079 if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2081 // We also need to ensure that mask is a continuous run of bits.
2082 if (llvm::countr_one(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64)
2085 // Scale the leading zero count down based on the actual size of the value.
2086 // Also scale it down based on the size of the shift.
2087 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
2088 if (MaskLZ < ScaleDown)
2090 MaskLZ -= ScaleDown;
2092 // The final check is to ensure that any masked out high bits of X are
2093 // already known to be zero. Otherwise, the mask has a semantic impact
2094 // other than masking out a couple of low bits. Unfortunately, because of
2095 // the mask, zero extensions will be removed from operands in some cases.
2096 // This code works extra hard to look through extensions because we can
2097 // replace them with zero extensions cheaply if necessary.
2098 bool ReplacingAnyExtend = false;
2099 if (X.getOpcode() == ISD::ANY_EXTEND) {
2100 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
2101 X.getOperand(0).getSimpleValueType().getSizeInBits();
2102 // Assume that we'll replace the any-extend with a zero-extend, and
2103 // narrow the search to the extended value.
2104 X = X.getOperand(0);
2105 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
2106 ReplacingAnyExtend = true;
2108 APInt MaskedHighBits =
2109 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
2110 KnownBits Known = DAG.computeKnownBits(X);
2111 if (MaskedHighBits != Known.Zero) return true;
2113 // We've identified a pattern that can be transformed into a single shift
2114 // and an addressing mode. Make it so.
2115 MVT VT = N.getSimpleValueType();
2116 if (ReplacingAnyExtend) {
2117 assert(X.getValueType() != VT);
2118 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
2119 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
2120 insertDAGNode(DAG, N, NewX);
2124 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2125 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2126 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2127 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
2129 // Insert the new nodes into the topological ordering. We must do this in
2130 // a valid topological ordering as nothing is going to go back and re-sort
2131 // these nodes. We continually insert before 'N' in sequence as this is
2132 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2133 // hierarchy left to express.
2134 insertDAGNode(DAG, N, NewSRLAmt);
2135 insertDAGNode(DAG, N, NewSRL);
2136 insertDAGNode(DAG, N, NewSHLAmt);
2137 insertDAGNode(DAG, N, NewSHL);
2138 DAG.ReplaceAllUsesWith(N, NewSHL);
2139 DAG.RemoveDeadNode(N.getNode());
2141 AM.Scale = 1 << AMShiftAmt;
2142 AM.IndexReg = NewSRL;
2146 // Transform "(X >> SHIFT) & (MASK << C1)" to
2147 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
2148 // matched to a BEXTR later. Returns false if the simplification is performed.
2149 static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
2151 SDValue Shift, SDValue X,
2152 X86ISelAddressMode &AM,
2153 const X86Subtarget &Subtarget) {
2154 if (Shift.getOpcode() != ISD::SRL ||
2155 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
2156 !Shift.hasOneUse() || !N.hasOneUse())
2159 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
2160 if (!Subtarget.hasTBM() &&
2161 !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
2164 // We need to ensure that mask is a continuous run of bits.
2165 if (!isShiftedMask_64(Mask)) return true;
2167 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2169 // The amount of shift we're trying to fit into the addressing mode is taken
2170 // from the trailing zeros of the mask.
2171 unsigned AMShiftAmt = llvm::countr_zero(Mask);
2173 // There is nothing we can do here unless the mask is removing some bits.
2174 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2175 if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2177 MVT VT = N.getSimpleValueType();
2179 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2180 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
2181 SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
2182 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
2183 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2184 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
2186 // Insert the new nodes into the topological ordering. We must do this in
2187 // a valid topological ordering as nothing is going to go back and re-sort
2188 // these nodes. We continually insert before 'N' in sequence as this is
2189 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2190 // hierarchy left to express.
2191 insertDAGNode(DAG, N, NewSRLAmt);
2192 insertDAGNode(DAG, N, NewSRL);
2193 insertDAGNode(DAG, N, NewMask);
2194 insertDAGNode(DAG, N, NewAnd);
2195 insertDAGNode(DAG, N, NewSHLAmt);
2196 insertDAGNode(DAG, N, NewSHL);
2197 DAG.ReplaceAllUsesWith(N, NewSHL);
2198 DAG.RemoveDeadNode(N.getNode());
2200 AM.Scale = 1 << AMShiftAmt;
2201 AM.IndexReg = NewAnd;
2205 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
2209 dbgs() << "MatchAddress: ";
2214 return matchAddressBase(N, AM);
2216 // If this is already a %rip relative address, we can only merge immediates
2217 // into it. Instead of handling this in every case, we handle it here.
2218 // RIP relative addressing: %rip + 32-bit displacement!
2219 if (AM.isRIPRelative()) {
2220 // FIXME: JumpTable and ExternalSymbol address currently don't like
2221 // displacements. It isn't very important, but this should be fixed for
2223 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
2226 if (auto *Cst = dyn_cast<ConstantSDNode>(N))
2227 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
2232 switch (N.getOpcode()) {
2234 case ISD::LOCAL_RECOVER: {
2235 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
2236 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
2237 // Use the symbol and don't prefix it.
2238 AM.MCSym = ESNode->getMCSymbol();
2243 case ISD::Constant: {
2244 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2245 if (!foldOffsetIntoAddress(Val, AM))
2250 case X86ISD::Wrapper:
2251 case X86ISD::WrapperRIP:
2252 if (!matchWrapper(N, AM))
2257 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
2261 case ISD::FrameIndex:
2262 if (AM.BaseType == X86ISelAddressMode::RegBase &&
2263 AM.Base_Reg.getNode() == nullptr &&
2264 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
2265 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
2266 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
2272 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2275 if (auto *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2276 unsigned Val = CN->getZExtValue();
2277 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
2278 // that the base operand remains free for further matching. If
2279 // the base doesn't end up getting used, a post-processing step
2280 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
2281 if (Val == 1 || Val == 2 || Val == 3) {
2282 AM.Scale = 1 << Val;
2283 SDValue ShVal = N.getOperand(0);
2285 // Okay, we know that we have a scale by now. However, if the scaled
2286 // value is an add of something and a constant, we can fold the
2287 // constant into the disp field here.
2288 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
2289 AM.IndexReg = ShVal.getOperand(0);
2290 auto *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
2291 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
2292 if (!foldOffsetIntoAddress(Disp, AM))
2296 AM.IndexReg = ShVal;
2303 // Scale must not be used already.
2304 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2306 // We only handle up to 64-bit values here as those are what matter for
2307 // addressing mode optimizations.
2308 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2309 "Unexpected value size!");
2311 SDValue And = N.getOperand(0);
2312 if (And.getOpcode() != ISD::AND) break;
2313 SDValue X = And.getOperand(0);
2315 // The mask used for the transform is expected to be post-shift, but we
2316 // found the shift first so just apply the shift to the mask before passing
2318 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
2319 !isa<ConstantSDNode>(And.getOperand(1)))
2321 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
2323 // Try to fold the mask and shift into the scale, and return false if we
2325 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
2330 case ISD::SMUL_LOHI:
2331 case ISD::UMUL_LOHI:
2332 // A mul_lohi where we need the low part can be folded as a plain multiply.
2333 if (N.getResNo() != 0) break;
2336 case X86ISD::MUL_IMM:
2337 // X*[3,5,9] -> X+X*[2,4,8]
2338 if (AM.BaseType == X86ISelAddressMode::RegBase &&
2339 AM.Base_Reg.getNode() == nullptr &&
2340 AM.IndexReg.getNode() == nullptr) {
2341 if (auto *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
2342 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
2343 CN->getZExtValue() == 9) {
2344 AM.Scale = unsigned(CN->getZExtValue())-1;
2346 SDValue MulVal = N.getOperand(0);
2349 // Okay, we know that we have a scale by now. However, if the scaled
2350 // value is an add of something and a constant, we can fold the
2351 // constant into the disp field here.
2352 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
2353 isa<ConstantSDNode>(MulVal.getOperand(1))) {
2354 Reg = MulVal.getOperand(0);
2355 auto *AddVal = cast<ConstantSDNode>(MulVal.getOperand(1));
2356 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
2357 if (foldOffsetIntoAddress(Disp, AM))
2358 Reg = N.getOperand(0);
2360 Reg = N.getOperand(0);
2363 AM.IndexReg = AM.Base_Reg = Reg;
2370 // Given A-B, if A can be completely folded into the address and
2371 // the index field with the index field unused, use -B as the index.
2372 // This is a win if a has multiple parts that can be folded into
2373 // the address. Also, this saves a mov if the base register has
2374 // other uses, since it avoids a two-address sub instruction, however
2375 // it costs an additional mov if the index register has other uses.
2377 // Add an artificial use to this node so that we can keep track of
2378 // it if it gets CSE'd with a different node.
2379 HandleSDNode Handle(N);
2381 // Test if the LHS of the sub can be folded.
2382 X86ISelAddressMode Backup = AM;
2383 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2384 N = Handle.getValue();
2388 N = Handle.getValue();
2389 // Test if the index field is free for use.
2390 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2396 SDValue RHS = N.getOperand(1);
2397 // If the RHS involves a register with multiple uses, this
2398 // transformation incurs an extra mov, due to the neg instruction
2399 // clobbering its operand.
2400 if (!RHS.getNode()->hasOneUse() ||
2401 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2402 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2403 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2404 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2405 RHS.getOperand(0).getValueType() == MVT::i32))
2407 // If the base is a register with multiple uses, this
2408 // transformation may save a mov.
2409 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2410 !AM.Base_Reg.getNode()->hasOneUse()) ||
2411 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2413 // If the folded LHS was interesting, this transformation saves
2414 // address arithmetic.
2415 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2416 ((AM.Disp != 0) && (Backup.Disp == 0)) +
2417 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2419 // If it doesn't look like it may be an overall win, don't do it.
2425 // Ok, the transformation is legal and appears profitable. Go for it.
2426 // Negation will be emitted later to avoid creating dangling nodes if this
2427 // was an unprofitable LEA.
2429 AM.NegateIndex = true;
2435 if (!matchAdd(N, AM, Depth))
2440 // We want to look through a transform in InstCombine and DAGCombiner that
2441 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2442 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2443 // An 'lea' can then be used to match the shift (multiply) and add:
2445 // lea (%rsi, %rdi, 8), %rax
2446 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2447 !matchAdd(N, AM, Depth))
2452 // We want to look through a transform in InstCombine that
2453 // turns 'add' with min_signed_val into 'xor', so we can treat this 'xor'
2454 // exactly like an 'add'.
2455 if (isMinSignedConstant(N.getOperand(1)) && !matchAdd(N, AM, Depth))
2460 // Perform some heroic transforms on an and of a constant-count shift
2461 // with a constant to enable use of the scaled offset field.
2463 // Scale must not be used already.
2464 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2466 // We only handle up to 64-bit values here as those are what matter for
2467 // addressing mode optimizations.
2468 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2469 "Unexpected value size!");
2471 if (!isa<ConstantSDNode>(N.getOperand(1)))
2474 if (N.getOperand(0).getOpcode() == ISD::SRL) {
2475 SDValue Shift = N.getOperand(0);
2476 SDValue X = Shift.getOperand(0);
2478 uint64_t Mask = N.getConstantOperandVal(1);
2480 // Try to fold the mask and shift into an extract and scale.
2481 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2484 // Try to fold the mask and shift directly into the scale.
2485 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2488 // Try to fold the mask and shift into BEXTR and scale.
2489 if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2493 // Try to swap the mask and shift to place shifts which can be done as
2494 // a scale on the outside of the mask.
2495 if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2500 case ISD::ZERO_EXTEND: {
2501 // Try to widen a zexted shift left to the same size as its use, so we can
2502 // match the shift as a scale factor.
2503 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2506 // Peek through mask: zext(and(shl(x,c1),c2))
2507 SDValue Src = N.getOperand(0);
2508 APInt Mask = APInt::getAllOnes(Src.getScalarValueSizeInBits());
2509 if (Src.getOpcode() == ISD::AND && Src.hasOneUse())
2510 if (auto *MaskC = dyn_cast<ConstantSDNode>(Src.getOperand(1))) {
2511 Mask = MaskC->getAPIntValue();
2512 Src = Src.getOperand(0);
2515 if (Src.getOpcode() == ISD::SHL && Src.hasOneUse()) {
2516 // Give up if the shift is not a valid scale factor [1,2,3].
2517 SDValue ShlSrc = Src.getOperand(0);
2518 SDValue ShlAmt = Src.getOperand(1);
2519 auto *ShAmtC = dyn_cast<ConstantSDNode>(ShlAmt);
2522 unsigned ShAmtV = ShAmtC->getZExtValue();
2526 // The narrow shift must only shift out zero bits (it must be 'nuw').
2527 // That makes it safe to widen to the destination type.
2529 APInt::getHighBitsSet(ShlSrc.getValueSizeInBits(), ShAmtV);
2530 if (!CurDAG->MaskedValueIsZero(ShlSrc, HighZeros & Mask))
2533 // zext (shl nuw i8 %x, C1) to i32
2534 // --> shl (zext i8 %x to i32), (zext C1)
2535 // zext (and (shl nuw i8 %x, C1), C2) to i32
2536 // --> shl (zext i8 (and %x, C2 >> C1) to i32), (zext C1)
2537 MVT SrcVT = ShlSrc.getSimpleValueType();
2538 MVT VT = N.getSimpleValueType();
2541 SDValue Res = ShlSrc;
2542 if (!Mask.isAllOnes()) {
2543 Res = CurDAG->getConstant(Mask.lshr(ShAmtV), DL, SrcVT);
2544 insertDAGNode(*CurDAG, N, Res);
2545 Res = CurDAG->getNode(ISD::AND, DL, SrcVT, ShlSrc, Res);
2546 insertDAGNode(*CurDAG, N, Res);
2548 SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Res);
2549 insertDAGNode(*CurDAG, N, Zext);
2550 SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, ShlAmt);
2551 insertDAGNode(*CurDAG, N, NewShl);
2553 // Convert the shift to scale factor.
2554 AM.Scale = 1 << ShAmtV;
2557 CurDAG->ReplaceAllUsesWith(N, NewShl);
2558 CurDAG->RemoveDeadNode(N.getNode());
2562 // Try to fold the mask and shift into an extract and scale.
2563 if (Src.getOpcode() == ISD::SRL && !Mask.isAllOnes() &&
2564 !foldMaskAndShiftToExtract(*CurDAG, N, Mask.getZExtValue(), Src,
2565 Src.getOperand(0), AM))
2572 return matchAddressBase(N, AM);
2575 /// Helper for MatchAddress. Add the specified node to the
2576 /// specified addressing mode without any further recursion.
2577 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2578 // Is the base register already occupied?
2579 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2580 // If so, check to see if the scale index register is set.
2581 if (!AM.IndexReg.getNode()) {
2587 // Otherwise, we cannot select it.
2591 // Default, generate it as a register.
2592 AM.BaseType = X86ISelAddressMode::RegBase;
2597 bool X86DAGToDAGISel::matchVectorAddressRecursively(SDValue N,
2598 X86ISelAddressMode &AM,
2602 dbgs() << "MatchVectorAddress: ";
2607 return matchAddressBase(N, AM);
2609 // TODO: Support other operations.
2610 switch (N.getOpcode()) {
2611 case ISD::Constant: {
2612 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2613 if (!foldOffsetIntoAddress(Val, AM))
2617 case X86ISD::Wrapper:
2618 if (!matchWrapper(N, AM))
2622 // Add an artificial use to this node so that we can keep track of
2623 // it if it gets CSE'd with a different node.
2624 HandleSDNode Handle(N);
2626 X86ISelAddressMode Backup = AM;
2627 if (!matchVectorAddressRecursively(N.getOperand(0), AM, Depth + 1) &&
2628 !matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2633 // Try again after commuting the operands.
2634 if (!matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2636 !matchVectorAddressRecursively(Handle.getValue().getOperand(0), AM,
2641 N = Handle.getValue();
2646 return matchAddressBase(N, AM);
2649 /// Helper for selectVectorAddr. Handles things that can be folded into a
2650 /// gather/scatter address. The index register and scale should have already
2652 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2653 return matchVectorAddressRecursively(N, AM, 0);
2656 bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
2657 SDValue IndexOp, SDValue ScaleOp,
2658 SDValue &Base, SDValue &Scale,
2659 SDValue &Index, SDValue &Disp,
2661 X86ISelAddressMode AM;
2662 AM.IndexReg = IndexOp;
2663 AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
2665 unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace();
2666 if (AddrSpace == X86AS::GS)
2667 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2668 if (AddrSpace == X86AS::FS)
2669 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2670 if (AddrSpace == X86AS::SS)
2671 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2674 MVT VT = BasePtr.getSimpleValueType();
2676 // Try to match into the base and displacement fields.
2677 if (matchVectorAddress(BasePtr, AM))
2680 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2684 /// Returns true if it is able to pattern match an addressing mode.
2685 /// It returns the operands which make up the maximal addressing mode it can
2686 /// match by reference.
2688 /// Parent is the parent node of the addr operand that is being matched. It
2689 /// is always a load, store, atomic node, or null. It is only null when
2690 /// checking memory operands for inline asm nodes.
2691 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2692 SDValue &Scale, SDValue &Index,
2693 SDValue &Disp, SDValue &Segment) {
2694 X86ISelAddressMode AM;
2697 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2698 // that are not a MemSDNode, and thus don't have proper addrspace info.
2699 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2700 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2701 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2702 Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2703 Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2704 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2705 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2706 unsigned AddrSpace =
2707 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2708 if (AddrSpace == X86AS::GS)
2709 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2710 if (AddrSpace == X86AS::FS)
2711 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2712 if (AddrSpace == X86AS::SS)
2713 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2716 // Save the DL and VT before calling matchAddress, it can invalidate N.
2718 MVT VT = N.getSimpleValueType();
2720 if (matchAddress(N, AM))
2723 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2727 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2728 // In static codegen with small code model, we can get the address of a label
2729 // into a register with 'movl'
2730 if (N->getOpcode() != X86ISD::Wrapper)
2733 N = N.getOperand(0);
2735 // At least GNU as does not accept 'movl' for TPOFF relocations.
2736 // FIXME: We could use 'movl' when we know we are targeting MC.
2737 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2741 if (N->getOpcode() != ISD::TargetGlobalAddress)
2742 return TM.getCodeModel() == CodeModel::Small;
2744 std::optional<ConstantRange> CR =
2745 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2747 return TM.getCodeModel() == CodeModel::Small;
2749 return CR->getUnsignedMax().ult(1ull << 32);
2752 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2753 SDValue &Scale, SDValue &Index,
2754 SDValue &Disp, SDValue &Segment) {
2755 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2758 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2761 auto *RN = dyn_cast<RegisterSDNode>(Base);
2762 if (RN && RN->getReg() == 0)
2763 Base = CurDAG->getRegister(0, MVT::i64);
2764 else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2765 // Base could already be %rip, particularly in the x32 ABI.
2766 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2768 Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2772 RN = dyn_cast<RegisterSDNode>(Index);
2773 if (RN && RN->getReg() == 0)
2774 Index = CurDAG->getRegister(0, MVT::i64);
2776 assert(Index.getValueType() == MVT::i32 &&
2777 "Expect to be extending 32-bit registers for use in LEA");
2778 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2780 Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2787 /// Calls SelectAddr and determines if the maximal addressing
2788 /// mode it matches can be cost effectively emitted as an LEA instruction.
2789 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2790 SDValue &Base, SDValue &Scale,
2791 SDValue &Index, SDValue &Disp,
2793 X86ISelAddressMode AM;
2795 // Save the DL and VT before calling matchAddress, it can invalidate N.
2797 MVT VT = N.getSimpleValueType();
2799 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2801 SDValue Copy = AM.Segment;
2802 SDValue T = CurDAG->getRegister(0, MVT::i32);
2804 if (matchAddress(N, AM))
2806 assert (T == AM.Segment);
2809 unsigned Complexity = 0;
2810 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2812 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2815 if (AM.IndexReg.getNode())
2818 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2823 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2824 // to a LEA. This is determined with some experimentation but is by no means
2825 // optimal (especially for code size consideration). LEA is nice because of
2826 // its three-address nature. Tweak the cost function again when we can run
2827 // convertToThreeAddress() at register allocation time.
2828 if (AM.hasSymbolicDisplacement()) {
2829 // For X86-64, always use LEA to materialize RIP-relative addresses.
2830 if (Subtarget->is64Bit())
2836 // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2837 // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2838 // duplicating flag-producing instructions later in the pipeline.
2839 if (N.getOpcode() == ISD::ADD) {
2840 auto isMathWithFlags = [](SDValue V) {
2841 switch (V.getOpcode()) {
2848 /* TODO: These opcodes can be added safely, but we may want to justify
2849 their inclusion for different reasons (better for reg-alloc).
2854 // Value 1 is the flag output of the node - verify it's not dead.
2855 return !SDValue(V.getNode(), 1).use_empty();
2860 // TODO: We might want to factor in whether there's a load folding
2861 // opportunity for the math op that disappears with LEA.
2862 if (isMathWithFlags(N.getOperand(0)) || isMathWithFlags(N.getOperand(1)))
2869 // If it isn't worth using an LEA, reject it.
2870 if (Complexity <= 2)
2873 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2877 /// This is only run on TargetGlobalTLSAddress nodes.
2878 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2879 SDValue &Scale, SDValue &Index,
2880 SDValue &Disp, SDValue &Segment) {
2881 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
2882 auto *GA = cast<GlobalAddressSDNode>(N);
2884 X86ISelAddressMode AM;
2885 AM.GV = GA->getGlobal();
2886 AM.Disp += GA->getOffset();
2887 AM.SymbolFlags = GA->getTargetFlags();
2889 if (Subtarget->is32Bit()) {
2891 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2894 MVT VT = N.getSimpleValueType();
2895 getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2899 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2900 // Keep track of the original value type and whether this value was
2901 // truncated. If we see a truncation from pointer type to VT that truncates
2902 // bits that are known to be zero, we can use a narrow reference.
2903 EVT VT = N.getValueType();
2904 bool WasTruncated = false;
2905 if (N.getOpcode() == ISD::TRUNCATE) {
2906 WasTruncated = true;
2907 N = N.getOperand(0);
2910 if (N.getOpcode() != X86ISD::Wrapper)
2913 // We can only use non-GlobalValues as immediates if they were not truncated,
2914 // as we do not have any range information. If we have a GlobalValue and the
2915 // address was not truncated, we can select it as an operand directly.
2916 unsigned Opc = N.getOperand(0)->getOpcode();
2917 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2918 Op = N.getOperand(0);
2919 // We can only select the operand directly if we didn't have to look past a
2921 return !WasTruncated;
2924 // Check that the global's range fits into VT.
2925 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2926 std::optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2927 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2930 // Okay, we can use a narrow reference.
2931 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2932 GA->getOffset(), GA->getTargetFlags());
2936 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2937 SDValue &Base, SDValue &Scale,
2938 SDValue &Index, SDValue &Disp,
2940 assert(Root && P && "Unknown root/parent nodes");
2941 if (!ISD::isNON_EXTLoad(N.getNode()) ||
2942 !IsProfitableToFold(N, P, Root) ||
2943 !IsLegalToFold(N, P, Root, OptLevel))
2946 return selectAddr(N.getNode(),
2947 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2950 bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
2951 SDValue &Base, SDValue &Scale,
2952 SDValue &Index, SDValue &Disp,
2954 assert(Root && P && "Unknown root/parent nodes");
2955 if (N->getOpcode() != X86ISD::VBROADCAST_LOAD ||
2956 !IsProfitableToFold(N, P, Root) ||
2957 !IsLegalToFold(N, P, Root, OptLevel))
2960 return selectAddr(N.getNode(),
2961 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2964 /// Return an SDNode that returns the value of the global base register.
2965 /// Output instructions required to initialize the global base register,
2967 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2968 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2969 auto &DL = MF->getDataLayout();
2970 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2973 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2974 if (N->getOpcode() == ISD::TRUNCATE)
2975 N = N->getOperand(0).getNode();
2976 if (N->getOpcode() != X86ISD::Wrapper)
2979 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2983 std::optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2985 return Width == 32 && TM.getCodeModel() == CodeModel::Small;
2987 return CR->getSignedMin().sge(-1ull << Width) &&
2988 CR->getSignedMax().slt(1ull << Width);
2991 X86::CondCode X86DAGToDAGISel::getCondFromNode(SDNode *N) const {
2992 assert(N->isMachineOpcode() && "Unexpected node");
2993 unsigned Opc = N->getMachineOpcode();
2994 const MCInstrDesc &MCID = getInstrInfo()->get(Opc);
2995 int CondNo = X86::getCondSrcNoFromDesc(MCID);
2997 return X86::COND_INVALID;
2999 return static_cast<X86::CondCode>(N->getConstantOperandVal(CondNo));
3002 /// Test whether the given X86ISD::CMP node has any users that use a flag
3004 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
3005 // Examine each user of the node.
3006 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3008 // Only check things that use the flags.
3009 if (UI.getUse().getResNo() != Flags.getResNo())
3011 // Only examine CopyToReg uses that copy to EFLAGS.
3012 if (UI->getOpcode() != ISD::CopyToReg ||
3013 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3015 // Examine each user of the CopyToReg use.
3016 for (SDNode::use_iterator FlagUI = UI->use_begin(),
3017 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
3018 // Only examine the Flag result.
3019 if (FlagUI.getUse().getResNo() != 1) continue;
3020 // Anything unusual: assume conservatively.
3021 if (!FlagUI->isMachineOpcode()) return false;
3022 // Examine the condition code of the user.
3023 X86::CondCode CC = getCondFromNode(*FlagUI);
3026 // Comparisons which only use the zero flag.
3027 case X86::COND_E: case X86::COND_NE:
3029 // Anything else: assume conservatively.
3038 /// Test whether the given X86ISD::CMP node has any uses which require the SF
3039 /// flag to be accurate.
3040 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
3041 // Examine each user of the node.
3042 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3044 // Only check things that use the flags.
3045 if (UI.getUse().getResNo() != Flags.getResNo())
3047 // Only examine CopyToReg uses that copy to EFLAGS.
3048 if (UI->getOpcode() != ISD::CopyToReg ||
3049 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3051 // Examine each user of the CopyToReg use.
3052 for (SDNode::use_iterator FlagUI = UI->use_begin(),
3053 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
3054 // Only examine the Flag result.
3055 if (FlagUI.getUse().getResNo() != 1) continue;
3056 // Anything unusual: assume conservatively.
3057 if (!FlagUI->isMachineOpcode()) return false;
3058 // Examine the condition code of the user.
3059 X86::CondCode CC = getCondFromNode(*FlagUI);
3062 // Comparisons which don't examine the SF flag.
3063 case X86::COND_A: case X86::COND_AE:
3064 case X86::COND_B: case X86::COND_BE:
3065 case X86::COND_E: case X86::COND_NE:
3066 case X86::COND_O: case X86::COND_NO:
3067 case X86::COND_P: case X86::COND_NP:
3069 // Anything else: assume conservatively.
3078 static bool mayUseCarryFlag(X86::CondCode CC) {
3080 // Comparisons which don't examine the CF flag.
3081 case X86::COND_O: case X86::COND_NO:
3082 case X86::COND_E: case X86::COND_NE:
3083 case X86::COND_S: case X86::COND_NS:
3084 case X86::COND_P: case X86::COND_NP:
3085 case X86::COND_L: case X86::COND_GE:
3086 case X86::COND_G: case X86::COND_LE:
3088 // Anything else: assume conservatively.
3094 /// Test whether the given node which sets flags has any uses which require the
3095 /// CF flag to be accurate.
3096 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
3097 // Examine each user of the node.
3098 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3100 // Only check things that use the flags.
3101 if (UI.getUse().getResNo() != Flags.getResNo())
3104 unsigned UIOpc = UI->getOpcode();
3106 if (UIOpc == ISD::CopyToReg) {
3107 // Only examine CopyToReg uses that copy to EFLAGS.
3108 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3110 // Examine each user of the CopyToReg use.
3111 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
3112 FlagUI != FlagUE; ++FlagUI) {
3113 // Only examine the Flag result.
3114 if (FlagUI.getUse().getResNo() != 1)
3116 // Anything unusual: assume conservatively.
3117 if (!FlagUI->isMachineOpcode())
3119 // Examine the condition code of the user.
3120 X86::CondCode CC = getCondFromNode(*FlagUI);
3122 if (mayUseCarryFlag(CC))
3126 // This CopyToReg is ok. Move on to the next user.
3130 // This might be an unselected node. So look for the pre-isel opcodes that
3135 // Something unusual. Be conservative.
3137 case X86ISD::SETCC: CCOpNo = 0; break;
3138 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
3139 case X86ISD::CMOV: CCOpNo = 2; break;
3140 case X86ISD::BRCOND: CCOpNo = 2; break;
3143 X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
3144 if (mayUseCarryFlag(CC))
3150 /// Check whether or not the chain ending in StoreNode is suitable for doing
3151 /// the {load; op; store} to modify transformation.
3152 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
3153 SDValue StoredVal, SelectionDAG *CurDAG,
3155 LoadSDNode *&LoadNode,
3156 SDValue &InputChain) {
3157 // Is the stored value result 0 of the operation?
3158 if (StoredVal.getResNo() != 0) return false;
3160 // Are there other uses of the operation other than the store?
3161 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
3163 // Is the store non-extending and non-indexed?
3164 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
3167 SDValue Load = StoredVal->getOperand(LoadOpNo);
3168 // Is the stored value a non-extending and non-indexed load?
3169 if (!ISD::isNormalLoad(Load.getNode())) return false;
3171 // Return LoadNode by reference.
3172 LoadNode = cast<LoadSDNode>(Load);
3174 // Is store the only read of the loaded value?
3175 if (!Load.hasOneUse())
3178 // Is the address of the store the same as the load?
3179 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
3180 LoadNode->getOffset() != StoreNode->getOffset())
3183 bool FoundLoad = false;
3184 SmallVector<SDValue, 4> ChainOps;
3185 SmallVector<const SDNode *, 4> LoopWorklist;
3186 SmallPtrSet<const SDNode *, 16> Visited;
3187 const unsigned int Max = 1024;
3189 // Visualization of Load-Op-Store fusion:
3190 // -------------------------
3192 // *-lines = Chain operand dependencies.
3193 // |-lines = Normal operand dependencies.
3194 // Dependencies flow down and right. n-suffix references multiple nodes.
3202 // * * \ | => A--LD_OP_ST
3210 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
3214 // Ensure the transform is safe by checking for the dual
3215 // dependencies to make sure we do not induce a loop.
3217 // As LD is a predecessor to both OP and ST we can do this by checking:
3218 // a). if LD is a predecessor to a member of Xn or Yn.
3219 // b). if a Zn is a predecessor to ST.
3221 // However, (b) can only occur through being a chain predecessor to
3222 // ST, which is the same as Zn being a member or predecessor of Xn,
3223 // which is a subset of LD being a predecessor of Xn. So it's
3224 // subsumed by check (a).
3226 SDValue Chain = StoreNode->getChain();
3228 // Gather X elements in ChainOps.
3229 if (Chain == Load.getValue(1)) {
3231 ChainOps.push_back(Load.getOperand(0));
3232 } else if (Chain.getOpcode() == ISD::TokenFactor) {
3233 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
3234 SDValue Op = Chain.getOperand(i);
3235 if (Op == Load.getValue(1)) {
3237 // Drop Load, but keep its chain. No cycle check necessary.
3238 ChainOps.push_back(Load.getOperand(0));
3241 LoopWorklist.push_back(Op.getNode());
3242 ChainOps.push_back(Op);
3249 // Worklist is currently Xn. Add Yn to worklist.
3250 for (SDValue Op : StoredVal->ops())
3251 if (Op.getNode() != LoadNode)
3252 LoopWorklist.push_back(Op.getNode());
3254 // Check (a) if Load is a predecessor to Xn + Yn
3255 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
3260 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
3264 // Change a chain of {load; op; store} of the same value into a simple op
3265 // through memory of that value, if the uses of the modified value and its
3266 // address are suitable.
3268 // The tablegen pattern memory operand pattern is currently not able to match
3269 // the case where the EFLAGS on the original operation are used.
3271 // To move this to tablegen, we'll need to improve tablegen to allow flags to
3272 // be transferred from a node in the pattern to the result node, probably with
3273 // a new keyword. For example, we have this
3274 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3275 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3276 // (implicit EFLAGS)]>;
3277 // but maybe need something like this
3278 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3279 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3280 // (transferrable EFLAGS)]>;
3282 // Until then, we manually fold these and instruction select the operation
3284 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
3285 auto *StoreNode = cast<StoreSDNode>(Node);
3286 SDValue StoredVal = StoreNode->getOperand(1);
3287 unsigned Opc = StoredVal->getOpcode();
3289 // Before we try to select anything, make sure this is memory operand size
3290 // and opcode we can handle. Note that this must match the code below that
3291 // actually lowers the opcodes.
3292 EVT MemVT = StoreNode->getMemoryVT();
3293 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
3297 bool IsCommutable = false;
3298 bool IsNegate = false;
3303 IsNegate = isNullConstant(StoredVal.getOperand(0));
3312 IsCommutable = true;
3316 unsigned LoadOpNo = IsNegate ? 1 : 0;
3317 LoadSDNode *LoadNode = nullptr;
3319 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3320 LoadNode, InputChain)) {
3324 // This operation is commutable, try the other operand.
3326 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3327 LoadNode, InputChain))
3331 SDValue Base, Scale, Index, Disp, Segment;
3332 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
3336 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
3338 switch (MemVT.getSimpleVT().SimpleTy) {
3348 llvm_unreachable("Invalid size!");
3352 MachineSDNode *Result;
3357 unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
3359 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3360 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3366 // Try to match inc/dec.
3367 if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
3368 bool IsOne = isOneConstant(StoredVal.getOperand(1));
3369 bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3370 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3371 if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3373 ((Opc == X86ISD::ADD) == IsOne)
3374 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3375 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3376 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3377 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3388 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3391 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3394 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3397 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3400 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3403 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3406 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3408 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3411 llvm_unreachable("Invalid opcode!");
3414 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3417 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3420 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3423 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3426 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3429 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3432 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3435 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3438 llvm_unreachable("Invalid opcode!");
3442 unsigned NewOpc = SelectRegOpcode(Opc);
3443 SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3445 // See if the operand is a constant that we can fold into an immediate
3447 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3448 int64_t OperandV = OperandC->getSExtValue();
3450 // Check if we can shrink the operand enough to fit in an immediate (or
3451 // fit into a smaller immediate) by negating it and switching the
3453 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3454 ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3455 (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3456 isInt<32>(-OperandV))) &&
3457 hasNoCarryFlagUses(StoredVal.getValue(1))) {
3458 OperandV = -OperandV;
3459 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3462 if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3463 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3464 NewOpc = SelectImmOpcode(Opc);
3468 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3470 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3471 StoredVal.getOperand(2), SDValue());
3473 const SDValue Ops[] = {Base, Scale, Index, Disp,
3474 Segment, Operand, CopyTo, CopyTo.getValue(1)};
3475 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3478 const SDValue Ops[] = {Base, Scale, Index, Disp,
3479 Segment, Operand, InputChain};
3480 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3486 llvm_unreachable("Invalid opcode!");
3489 MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3490 LoadNode->getMemOperand()};
3491 CurDAG->setNodeMemRefs(Result, MemOps);
3493 // Update Load Chain uses as well.
3494 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3495 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3496 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3497 CurDAG->RemoveDeadNode(Node);
3501 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3502 // Where Mask is one of the following patterns:
3503 // a) x & (1 << nbits) - 1
3504 // b) x & ~(-1 << nbits)
3505 // c) x & (-1 >> (32 - y))
3506 // d) x << (32 - y) >> (32 - y)
3507 // e) (1 << nbits) - 1
3508 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3510 (Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::AND ||
3511 Node->getOpcode() == ISD::SRL) &&
3512 "Should be either an and-mask, or right-shift after clearing high bits.");
3514 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3515 if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3518 MVT NVT = Node->getSimpleValueType(0);
3520 // Only supported for 32 and 64 bits.
3521 if (NVT != MVT::i32 && NVT != MVT::i64)
3527 // If we have BMI2's BZHI, we are ok with muti-use patterns.
3528 // Else, if we only have BMI1's BEXTR, we require one-use.
3529 const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
3530 auto checkUses = [AllowExtraUsesByDefault](
3531 SDValue Op, unsigned NUses,
3532 std::optional<bool> AllowExtraUses) {
3533 return AllowExtraUses.value_or(AllowExtraUsesByDefault) ||
3534 Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3536 auto checkOneUse = [checkUses](SDValue Op,
3537 std::optional<bool> AllowExtraUses =
3539 return checkUses(Op, 1, AllowExtraUses);
3541 auto checkTwoUse = [checkUses](SDValue Op,
3542 std::optional<bool> AllowExtraUses =
3544 return checkUses(Op, 2, AllowExtraUses);
3547 auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3548 if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3549 assert(V.getSimpleValueType() == MVT::i32 &&
3550 V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3551 "Expected i64 -> i32 truncation");
3552 V = V.getOperand(0);
3557 // a) x & ((1 << nbits) + (-1))
3558 auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, &NBits,
3559 &NegateNBits](SDValue Mask) -> bool {
3560 // Match `add`. Must only have one use!
3561 if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3563 // We should be adding all-ones constant (i.e. subtracting one.)
3564 if (!isAllOnesConstant(Mask->getOperand(1)))
3566 // Match `1 << nbits`. Might be truncated. Must only have one use!
3567 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3568 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3570 if (!isOneConstant(M0->getOperand(0)))
3572 NBits = M0->getOperand(1);
3573 NegateNBits = false;
3577 auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3578 V = peekThroughOneUseTruncation(V);
3579 return CurDAG->MaskedValueIsAllOnes(
3580 V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3581 NVT.getSizeInBits()));
3584 // b) x & ~(-1 << nbits)
3585 auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3586 &NBits, &NegateNBits](SDValue Mask) -> bool {
3587 // Match `~()`. Must only have one use!
3588 if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3590 // The -1 only has to be all-ones for the final Node's NVT.
3591 if (!isAllOnes(Mask->getOperand(1)))
3593 // Match `-1 << nbits`. Might be truncated. Must only have one use!
3594 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3595 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3597 // The -1 only has to be all-ones for the final Node's NVT.
3598 if (!isAllOnes(M0->getOperand(0)))
3600 NBits = M0->getOperand(1);
3601 NegateNBits = false;
3605 // Try to match potentially-truncated shift amount as `(bitwidth - y)`,
3606 // or leave the shift amount as-is, but then we'll have to negate it.
3607 auto canonicalizeShiftAmt = [&NBits, &NegateNBits](SDValue ShiftAmt,
3608 unsigned Bitwidth) {
3611 // Skip over a truncate of the shift amount, if any.
3612 if (NBits.getOpcode() == ISD::TRUNCATE)
3613 NBits = NBits.getOperand(0);
3614 // Try to match the shift amount as (bitwidth - y). It should go away, too.
3615 // If it doesn't match, that's fine, we'll just negate it ourselves.
3616 if (NBits.getOpcode() != ISD::SUB)
3618 auto *V0 = dyn_cast<ConstantSDNode>(NBits.getOperand(0));
3619 if (!V0 || V0->getZExtValue() != Bitwidth)
3621 NBits = NBits.getOperand(1);
3622 NegateNBits = false;
3625 // c) x & (-1 >> z) but then we'll have to subtract z from bitwidth
3627 // c) x & (-1 >> (32 - y))
3628 auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, &NegateNBits,
3629 canonicalizeShiftAmt](SDValue Mask) -> bool {
3630 // The mask itself may be truncated.
3631 Mask = peekThroughOneUseTruncation(Mask);
3632 unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3633 // Match `l>>`. Must only have one use!
3634 if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3636 // We should be shifting truly all-ones constant.
3637 if (!isAllOnesConstant(Mask.getOperand(0)))
3639 SDValue M1 = Mask.getOperand(1);
3640 // The shift amount should not be used externally.
3641 if (!checkOneUse(M1))
3643 canonicalizeShiftAmt(M1, Bitwidth);
3644 // Pattern c. is non-canonical, and is expanded into pattern d. iff there
3645 // is no extra use of the mask. Clearly, there was one since we are here.
3646 // But at the same time, if we need to negate the shift amount,
3647 // then we don't want the mask to stick around, else it's unprofitable.
3648 return !NegateNBits;
3653 // d) x << z >> z but then we'll have to subtract z from bitwidth
3655 // d) x << (32 - y) >> (32 - y)
3656 auto matchPatternD = [checkOneUse, checkTwoUse, canonicalizeShiftAmt,
3657 AllowExtraUsesByDefault, &NegateNBits,
3658 &X](SDNode *Node) -> bool {
3659 if (Node->getOpcode() != ISD::SRL)
3661 SDValue N0 = Node->getOperand(0);
3662 if (N0->getOpcode() != ISD::SHL)
3664 unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3665 SDValue N1 = Node->getOperand(1);
3666 SDValue N01 = N0->getOperand(1);
3667 // Both of the shifts must be by the exact same value.
3670 canonicalizeShiftAmt(N1, Bitwidth);
3671 // There should not be any external uses of the inner shift / shift amount.
3672 // Note that while we are generally okay with external uses given BMI2,
3673 // iff we need to negate the shift amount, we are not okay with extra uses.
3674 const bool AllowExtraUses = AllowExtraUsesByDefault && !NegateNBits;
3675 if (!checkOneUse(N0, AllowExtraUses) || !checkTwoUse(N1, AllowExtraUses))
3677 X = N0->getOperand(0);
3681 auto matchLowBitMask = [matchPatternA, matchPatternB,
3682 matchPatternC](SDValue Mask) -> bool {
3683 return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3686 if (Node->getOpcode() == ISD::AND) {
3687 X = Node->getOperand(0);
3688 SDValue Mask = Node->getOperand(1);
3690 if (matchLowBitMask(Mask)) {
3694 if (!matchLowBitMask(Mask))
3697 } else if (matchLowBitMask(SDValue(Node, 0))) {
3698 X = CurDAG->getAllOnesConstant(SDLoc(Node), NVT);
3699 } else if (!matchPatternD(Node))
3702 // If we need to negate the shift amount, require BMI2 BZHI support.
3703 // It's just too unprofitable for BMI1 BEXTR.
3704 if (NegateNBits && !Subtarget->hasBMI2())
3709 // Truncate the shift amount.
3710 NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3711 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3713 // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3714 // All the other bits are undefined, we do not care about them.
3715 SDValue ImplDef = SDValue(
3716 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3717 insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3719 SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3720 insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3721 NBits = SDValue(CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
3722 MVT::i32, ImplDef, NBits, SRIdxVal),
3724 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3726 // We might have matched the amount of high bits to be cleared,
3727 // but we want the amount of low bits to be kept, so negate it then.
3729 SDValue BitWidthC = CurDAG->getConstant(NVT.getSizeInBits(), DL, MVT::i32);
3730 insertDAGNode(*CurDAG, SDValue(Node, 0), BitWidthC);
3732 NBits = CurDAG->getNode(ISD::SUB, DL, MVT::i32, BitWidthC, NBits);
3733 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3736 if (Subtarget->hasBMI2()) {
3737 // Great, just emit the the BZHI..
3738 if (NVT != MVT::i32) {
3739 // But have to place the bit count into the wide-enough register first.
3740 NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3741 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3744 SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3745 ReplaceNode(Node, Extract.getNode());
3746 SelectCode(Extract.getNode());
3750 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3751 // *logically* shifted (potentially with one-use trunc inbetween),
3752 // and the truncation was the only use of the shift,
3753 // and if so look past one-use truncation.
3755 SDValue RealX = peekThroughOneUseTruncation(X);
3756 // FIXME: only if the shift is one-use?
3757 if (RealX != X && RealX.getOpcode() == ISD::SRL)
3761 MVT XVT = X.getSimpleValueType();
3763 // Else, emitting BEXTR requires one more step.
3764 // The 'control' of BEXTR has the pattern of:
3765 // [15...8 bit][ 7...0 bit] location
3766 // [ bit count][ shift] name
3767 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3769 // Shift NBits left by 8 bits, thus producing 'control'.
3770 // This makes the low 8 bits to be zero.
3771 SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3772 insertDAGNode(*CurDAG, SDValue(Node, 0), C8);
3773 SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3774 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3776 // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3777 // FIXME: only if the shift is one-use?
3778 if (X.getOpcode() == ISD::SRL) {
3779 SDValue ShiftAmt = X.getOperand(1);
3780 X = X.getOperand(0);
3782 assert(ShiftAmt.getValueType() == MVT::i8 &&
3783 "Expected shift amount to be i8");
3785 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3786 // We could zext to i16 in some form, but we intentionally don't do that.
3787 SDValue OrigShiftAmt = ShiftAmt;
3788 ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3789 insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3791 // And now 'or' these low 8 bits of shift amount into the 'control'.
3792 Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3793 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3796 // But have to place the 'control' into the wide-enough register first.
3797 if (XVT != MVT::i32) {
3798 Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3799 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3802 // And finally, form the BEXTR itself.
3803 SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3805 // The 'X' was originally truncated. Do that now.
3807 insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3808 Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3811 ReplaceNode(Node, Extract.getNode());
3812 SelectCode(Extract.getNode());
3817 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
3818 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3819 MVT NVT = Node->getSimpleValueType(0);
3822 SDValue N0 = Node->getOperand(0);
3823 SDValue N1 = Node->getOperand(1);
3825 // If we have TBM we can use an immediate for the control. If we have BMI
3826 // we should only do this if the BEXTR instruction is implemented well.
3827 // Otherwise moving the control into a register makes this more costly.
3828 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3829 // hoisting the move immediate would make it worthwhile with a less optimal
3832 Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR());
3833 if (!PreferBEXTR && !Subtarget->hasBMI2())
3836 // Must have a shift right.
3837 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3840 // Shift can't have additional users.
3841 if (!N0->hasOneUse())
3844 // Only supported for 32 and 64 bits.
3845 if (NVT != MVT::i32 && NVT != MVT::i64)
3848 // Shift amount and RHS of and must be constant.
3849 auto *MaskCst = dyn_cast<ConstantSDNode>(N1);
3850 auto *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3851 if (!MaskCst || !ShiftCst)
3854 // And RHS must be a mask.
3855 uint64_t Mask = MaskCst->getZExtValue();
3856 if (!isMask_64(Mask))
3859 uint64_t Shift = ShiftCst->getZExtValue();
3860 uint64_t MaskSize = llvm::popcount(Mask);
3862 // Don't interfere with something that can be handled by extracting AH.
3863 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3864 if (Shift == 8 && MaskSize == 8)
3867 // Make sure we are only using bits that were in the original value, not
3869 if (Shift + MaskSize > NVT.getSizeInBits())
3872 // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
3873 // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
3874 // does not fit into 32 bits. Load folding is not a sufficient reason.
3875 if (!PreferBEXTR && MaskSize <= 32)
3879 unsigned ROpc, MOpc;
3882 assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then.");
3883 // If we can't make use of BEXTR then we can't fuse shift+mask stages.
3884 // Let's perform the mask first, and apply shift later. Note that we need to
3885 // widen the mask to account for the fact that we'll apply shift afterwards!
3886 Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT);
3887 ROpc = NVT == MVT::i64 ? X86::BZHI64rr : X86::BZHI32rr;
3888 MOpc = NVT == MVT::i64 ? X86::BZHI64rm : X86::BZHI32rm;
3889 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3890 Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3892 // The 'control' of BEXTR has the pattern of:
3893 // [15...8 bit][ 7...0 bit] location
3894 // [ bit count][ shift] name
3895 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3896 Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3897 if (Subtarget->hasTBM()) {
3898 ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3899 MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3901 assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then.");
3902 // BMI requires the immediate to placed in a register.
3903 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3904 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3905 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3906 Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3910 MachineSDNode *NewNode;
3911 SDValue Input = N0->getOperand(0);
3912 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3913 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3915 Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)};
3916 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3917 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3918 // Update the chain.
3919 ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3920 // Record the mem-refs
3921 CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3923 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control);
3927 // We still need to apply the shift.
3928 SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT);
3929 unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri;
3931 CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt);
3937 // Emit a PCMISTR(I/M) instruction.
3938 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3939 bool MayFoldLoad, const SDLoc &dl,
3940 MVT VT, SDNode *Node) {
3941 SDValue N0 = Node->getOperand(0);
3942 SDValue N1 = Node->getOperand(1);
3943 SDValue Imm = Node->getOperand(2);
3944 auto *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3945 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3947 // Try to fold a load. No need to check alignment.
3948 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3949 if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3950 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3952 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3953 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3954 // Update the chain.
3955 ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3956 // Record the mem-refs
3957 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3961 SDValue Ops[] = { N0, N1, Imm };
3962 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3963 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3967 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3968 // to emit a second instruction after this one. This is needed since we have two
3969 // copyToReg nodes glued before this and we need to continue that glue through.
3970 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3971 bool MayFoldLoad, const SDLoc &dl,
3972 MVT VT, SDNode *Node,
3974 SDValue N0 = Node->getOperand(0);
3975 SDValue N2 = Node->getOperand(2);
3976 SDValue Imm = Node->getOperand(4);
3977 auto *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3978 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3980 // Try to fold a load. No need to check alignment.
3981 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3982 if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3983 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3984 N2.getOperand(0), InGlue };
3985 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3986 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3987 InGlue = SDValue(CNode, 3);
3988 // Update the chain.
3989 ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3990 // Record the mem-refs
3991 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3995 SDValue Ops[] = { N0, N2, Imm, InGlue };
3996 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3997 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3998 InGlue = SDValue(CNode, 2);
4002 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
4003 EVT VT = N->getValueType(0);
4005 // Only handle scalar shifts.
4009 // Narrower shifts only mask to 5 bits in hardware.
4010 unsigned Size = VT == MVT::i64 ? 64 : 32;
4012 SDValue OrigShiftAmt = N->getOperand(1);
4013 SDValue ShiftAmt = OrigShiftAmt;
4016 // Skip over a truncate of the shift amount.
4017 if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
4018 ShiftAmt = ShiftAmt->getOperand(0);
4020 // This function is called after X86DAGToDAGISel::matchBitExtract(),
4021 // so we are not afraid that we might mess up BZHI/BEXTR pattern.
4023 SDValue NewShiftAmt;
4024 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB ||
4025 ShiftAmt->getOpcode() == ISD::XOR) {
4026 SDValue Add0 = ShiftAmt->getOperand(0);
4027 SDValue Add1 = ShiftAmt->getOperand(1);
4028 auto *Add0C = dyn_cast<ConstantSDNode>(Add0);
4029 auto *Add1C = dyn_cast<ConstantSDNode>(Add1);
4030 // If we are shifting by X+/-/^N where N == 0 mod Size, then just shift by X
4031 // to avoid the ADD/SUB/XOR.
4032 if (Add1C && Add1C->getAPIntValue().urem(Size) == 0) {
4035 } else if (ShiftAmt->getOpcode() != ISD::ADD && ShiftAmt.hasOneUse() &&
4036 ((Add0C && Add0C->getAPIntValue().urem(Size) == Size - 1) ||
4037 (Add1C && Add1C->getAPIntValue().urem(Size) == Size - 1))) {
4038 // If we are doing a NOT on just the lower bits with (Size*N-1) -/^ X
4039 // we can replace it with a NOT. In the XOR case it may save some code
4040 // size, in the SUB case it also may save a move.
4041 assert(Add0C == nullptr || Add1C == nullptr);
4043 // We can only do N-X, not X-N
4044 if (ShiftAmt->getOpcode() == ISD::SUB && Add0C == nullptr)
4047 EVT OpVT = ShiftAmt.getValueType();
4049 SDValue AllOnes = CurDAG->getAllOnesConstant(DL, OpVT);
4050 NewShiftAmt = CurDAG->getNode(ISD::XOR, DL, OpVT,
4051 Add0C == nullptr ? Add0 : Add1, AllOnes);
4052 insertDAGNode(*CurDAG, OrigShiftAmt, AllOnes);
4053 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4054 // If we are shifting by N-X where N == 0 mod Size, then just shift by
4055 // -X to generate a NEG instead of a SUB of a constant.
4056 } else if (ShiftAmt->getOpcode() == ISD::SUB && Add0C &&
4057 Add0C->getZExtValue() != 0) {
4058 EVT SubVT = ShiftAmt.getValueType();
4060 if (Add0C->getZExtValue() % Size == 0)
4062 else if (ShiftAmt.hasOneUse() && Size == 64 &&
4063 Add0C->getZExtValue() % 32 == 0) {
4064 // We have a 64-bit shift by (n*32-x), turn it into -(x+n*32).
4065 // This is mainly beneficial if we already compute (x+n*32).
4066 if (Add1.getOpcode() == ISD::TRUNCATE) {
4067 Add1 = Add1.getOperand(0);
4068 SubVT = Add1.getValueType();
4070 if (Add0.getValueType() != SubVT) {
4071 Add0 = CurDAG->getZExtOrTrunc(Add0, DL, SubVT);
4072 insertDAGNode(*CurDAG, OrigShiftAmt, Add0);
4075 X = CurDAG->getNode(ISD::ADD, DL, SubVT, Add1, Add0);
4076 insertDAGNode(*CurDAG, OrigShiftAmt, X);
4079 // Insert a negate op.
4080 // TODO: This isn't guaranteed to replace the sub if there is a logic cone
4081 // that uses it that's not a shift.
4082 SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
4083 SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, X);
4086 // Insert these operands into a valid topological order so they can
4087 // get selected independently.
4088 insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
4089 insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
4095 if (NewShiftAmt.getValueType() != MVT::i8) {
4096 // Need to truncate the shift amount.
4097 NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
4098 // Add to a correct topological ordering.
4099 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4102 // Insert a new mask to keep the shift amount legal. This should be removed
4103 // by isel patterns.
4104 NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
4105 CurDAG->getConstant(Size - 1, DL, MVT::i8));
4106 // Place in a correct topological ordering.
4107 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4109 SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
4111 if (UpdatedNode != N) {
4112 // If we found an existing node, we should replace ourselves with that node
4113 // and wait for it to be selected after its other users.
4114 ReplaceNode(N, UpdatedNode);
4118 // If the original shift amount is now dead, delete it so that we don't run
4120 if (OrigShiftAmt.getNode()->use_empty())
4121 CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
4123 // Now that we've optimized the shift amount, defer to normal isel to get
4124 // load folding and legacy vs BMI2 selection without repeating it here.
4129 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
4130 MVT NVT = N->getSimpleValueType(0);
4131 unsigned Opcode = N->getOpcode();
4134 // For operations of the form (x << C1) op C2, check if we can use a smaller
4135 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
4136 SDValue Shift = N->getOperand(0);
4137 SDValue N1 = N->getOperand(1);
4139 auto *Cst = dyn_cast<ConstantSDNode>(N1);
4143 int64_t Val = Cst->getSExtValue();
4145 // If we have an any_extend feeding the AND, look through it to see if there
4146 // is a shift behind it. But only if the AND doesn't use the extended bits.
4147 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
4148 bool FoundAnyExtend = false;
4149 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
4150 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
4152 FoundAnyExtend = true;
4153 Shift = Shift.getOperand(0);
4156 if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
4159 // i8 is unshrinkable, i16 should be promoted to i32.
4160 if (NVT != MVT::i32 && NVT != MVT::i64)
4163 auto *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
4167 uint64_t ShAmt = ShlCst->getZExtValue();
4169 // Make sure that we don't change the operation by removing bits.
4170 // This only matters for OR and XOR, AND is unaffected.
4171 uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
4172 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
4175 // Check the minimum bitwidth for the new constant.
4176 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
4177 auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
4178 if (Opcode == ISD::AND) {
4179 // AND32ri is the same as AND64ri32 with zext imm.
4180 // Try this before sign extended immediates below.
4181 ShiftedVal = (uint64_t)Val >> ShAmt;
4182 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4184 // Also swap order when the AND can become MOVZX.
4185 if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
4188 ShiftedVal = Val >> ShAmt;
4189 if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
4190 (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
4192 if (Opcode != ISD::AND) {
4193 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
4194 ShiftedVal = (uint64_t)Val >> ShAmt;
4195 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4202 if (!CanShrinkImmediate(ShiftedVal))
4205 // Ok, we can reorder to get a smaller immediate.
4207 // But, its possible the original immediate allowed an AND to become MOVZX.
4208 // Doing this late due to avoid the MakedValueIsZero call as late as
4210 if (Opcode == ISD::AND) {
4211 // Find the smallest zext this could possibly be.
4212 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
4213 ZExtWidth = llvm::bit_ceil(std::max(ZExtWidth, 8U));
4215 // Figure out which bits need to be zero to achieve that mask.
4216 APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
4218 NeededMask &= ~Cst->getAPIntValue();
4220 if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
4224 SDValue X = Shift.getOperand(0);
4225 if (FoundAnyExtend) {
4226 SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
4227 insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
4231 SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
4232 insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
4233 SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
4234 insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
4235 SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
4236 Shift.getOperand(1));
4237 ReplaceNode(N, NewSHL.getNode());
4238 SelectCode(NewSHL.getNode());
4242 bool X86DAGToDAGISel::matchVPTERNLOG(SDNode *Root, SDNode *ParentA,
4243 SDNode *ParentB, SDNode *ParentC,
4244 SDValue A, SDValue B, SDValue C,
4246 assert(A.isOperandOf(ParentA) && B.isOperandOf(ParentB) &&
4247 C.isOperandOf(ParentC) && "Incorrect parent node");
4249 auto tryFoldLoadOrBCast =
4250 [this](SDNode *Root, SDNode *P, SDValue &L, SDValue &Base, SDValue &Scale,
4251 SDValue &Index, SDValue &Disp, SDValue &Segment) {
4252 if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4255 // Not a load, check for broadcast which may be behind a bitcast.
4256 if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4258 L = L.getOperand(0);
4261 if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4264 // Only 32 and 64 bit broadcasts are supported.
4265 auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4266 unsigned Size = MemIntr->getMemoryVT().getSizeInBits();
4267 if (Size != 32 && Size != 64)
4270 return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4273 bool FoldedLoad = false;
4274 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4275 if (tryFoldLoadOrBCast(Root, ParentC, C, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4277 } else if (tryFoldLoadOrBCast(Root, ParentA, A, Tmp0, Tmp1, Tmp2, Tmp3,
4281 // Swap bits 1/4 and 3/6.
4282 uint8_t OldImm = Imm;
4283 Imm = OldImm & 0xa5;
4284 if (OldImm & 0x02) Imm |= 0x10;
4285 if (OldImm & 0x10) Imm |= 0x02;
4286 if (OldImm & 0x08) Imm |= 0x40;
4287 if (OldImm & 0x40) Imm |= 0x08;
4288 } else if (tryFoldLoadOrBCast(Root, ParentB, B, Tmp0, Tmp1, Tmp2, Tmp3,
4292 // Swap bits 1/2 and 5/6.
4293 uint8_t OldImm = Imm;
4294 Imm = OldImm & 0x99;
4295 if (OldImm & 0x02) Imm |= 0x04;
4296 if (OldImm & 0x04) Imm |= 0x02;
4297 if (OldImm & 0x20) Imm |= 0x40;
4298 if (OldImm & 0x40) Imm |= 0x20;
4303 SDValue TImm = CurDAG->getTargetConstant(Imm, DL, MVT::i8);
4305 MVT NVT = Root->getSimpleValueType(0);
4307 MachineSDNode *MNode;
4309 SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
4312 if (C.getOpcode() == X86ISD::VBROADCAST_LOAD) {
4313 auto *MemIntr = cast<MemIntrinsicSDNode>(C);
4314 unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits();
4315 assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!");
4317 bool UseD = EltSize == 32;
4318 if (NVT.is128BitVector())
4319 Opc = UseD ? X86::VPTERNLOGDZ128rmbi : X86::VPTERNLOGQZ128rmbi;
4320 else if (NVT.is256BitVector())
4321 Opc = UseD ? X86::VPTERNLOGDZ256rmbi : X86::VPTERNLOGQZ256rmbi;
4322 else if (NVT.is512BitVector())
4323 Opc = UseD ? X86::VPTERNLOGDZrmbi : X86::VPTERNLOGQZrmbi;
4325 llvm_unreachable("Unexpected vector size!");
4327 bool UseD = NVT.getVectorElementType() == MVT::i32;
4328 if (NVT.is128BitVector())
4329 Opc = UseD ? X86::VPTERNLOGDZ128rmi : X86::VPTERNLOGQZ128rmi;
4330 else if (NVT.is256BitVector())
4331 Opc = UseD ? X86::VPTERNLOGDZ256rmi : X86::VPTERNLOGQZ256rmi;
4332 else if (NVT.is512BitVector())
4333 Opc = UseD ? X86::VPTERNLOGDZrmi : X86::VPTERNLOGQZrmi;
4335 llvm_unreachable("Unexpected vector size!");
4338 SDValue Ops[] = {A, B, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, TImm, C.getOperand(0)};
4339 MNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
4341 // Update the chain.
4342 ReplaceUses(C.getValue(1), SDValue(MNode, 1));
4343 // Record the mem-refs
4344 CurDAG->setNodeMemRefs(MNode, {cast<MemSDNode>(C)->getMemOperand()});
4346 bool UseD = NVT.getVectorElementType() == MVT::i32;
4348 if (NVT.is128BitVector())
4349 Opc = UseD ? X86::VPTERNLOGDZ128rri : X86::VPTERNLOGQZ128rri;
4350 else if (NVT.is256BitVector())
4351 Opc = UseD ? X86::VPTERNLOGDZ256rri : X86::VPTERNLOGQZ256rri;
4352 else if (NVT.is512BitVector())
4353 Opc = UseD ? X86::VPTERNLOGDZrri : X86::VPTERNLOGQZrri;
4355 llvm_unreachable("Unexpected vector size!");
4357 MNode = CurDAG->getMachineNode(Opc, DL, NVT, {A, B, C, TImm});
4360 ReplaceUses(SDValue(Root, 0), SDValue(MNode, 0));
4361 CurDAG->RemoveDeadNode(Root);
4365 // Try to match two logic ops to a VPTERNLOG.
4366 // FIXME: Handle more complex patterns that use an operand more than once?
4367 bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) {
4368 MVT NVT = N->getSimpleValueType(0);
4370 // Make sure we support VPTERNLOG.
4371 if (!NVT.isVector() || !Subtarget->hasAVX512() ||
4372 NVT.getVectorElementType() == MVT::i1)
4375 // We need VLX for 128/256-bit.
4376 if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4379 SDValue N0 = N->getOperand(0);
4380 SDValue N1 = N->getOperand(1);
4382 auto getFoldableLogicOp = [](SDValue Op) {
4383 // Peek through single use bitcast.
4384 if (Op.getOpcode() == ISD::BITCAST && Op.hasOneUse())
4385 Op = Op.getOperand(0);
4387 if (!Op.hasOneUse())
4390 unsigned Opc = Op.getOpcode();
4391 if (Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR ||
4392 Opc == X86ISD::ANDNP)
4398 SDValue A, FoldableOp;
4399 if ((FoldableOp = getFoldableLogicOp(N1))) {
4401 } else if ((FoldableOp = getFoldableLogicOp(N0))) {
4406 SDValue B = FoldableOp.getOperand(0);
4407 SDValue C = FoldableOp.getOperand(1);
4408 SDNode *ParentA = N;
4409 SDNode *ParentB = FoldableOp.getNode();
4410 SDNode *ParentC = FoldableOp.getNode();
4412 // We can build the appropriate control immediate by performing the logic
4413 // operation we're matching using these constants for A, B, and C.
4414 uint8_t TernlogMagicA = 0xf0;
4415 uint8_t TernlogMagicB = 0xcc;
4416 uint8_t TernlogMagicC = 0xaa;
4418 // Some of the inputs may be inverted, peek through them and invert the
4419 // magic values accordingly.
4420 // TODO: There may be a bitcast before the xor that we should peek through.
4421 auto PeekThroughNot = [](SDValue &Op, SDNode *&Parent, uint8_t &Magic) {
4422 if (Op.getOpcode() == ISD::XOR && Op.hasOneUse() &&
4423 ISD::isBuildVectorAllOnes(Op.getOperand(1).getNode())) {
4425 Parent = Op.getNode();
4426 Op = Op.getOperand(0);
4430 PeekThroughNot(A, ParentA, TernlogMagicA);
4431 PeekThroughNot(B, ParentB, TernlogMagicB);
4432 PeekThroughNot(C, ParentC, TernlogMagicC);
4435 switch (FoldableOp.getOpcode()) {
4436 default: llvm_unreachable("Unexpected opcode!");
4437 case ISD::AND: Imm = TernlogMagicB & TernlogMagicC; break;
4438 case ISD::OR: Imm = TernlogMagicB | TernlogMagicC; break;
4439 case ISD::XOR: Imm = TernlogMagicB ^ TernlogMagicC; break;
4440 case X86ISD::ANDNP: Imm = ~(TernlogMagicB) & TernlogMagicC; break;
4443 switch (N->getOpcode()) {
4444 default: llvm_unreachable("Unexpected opcode!");
4447 Imm &= ~TernlogMagicA;
4449 Imm = ~(Imm) & TernlogMagicA;
4451 case ISD::AND: Imm &= TernlogMagicA; break;
4452 case ISD::OR: Imm |= TernlogMagicA; break;
4453 case ISD::XOR: Imm ^= TernlogMagicA; break;
4456 return matchVPTERNLOG(N, ParentA, ParentB, ParentC, A, B, C, Imm);
4459 /// If the high bits of an 'and' operand are known zero, try setting the
4460 /// high bits of an 'and' constant operand to produce a smaller encoding by
4461 /// creating a small, sign-extended negative immediate rather than a large
4462 /// positive one. This reverses a transform in SimplifyDemandedBits that
4463 /// shrinks mask constants by clearing bits. There is also a possibility that
4464 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
4465 /// case, just replace the 'and'. Return 'true' if the node is replaced.
4466 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
4467 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
4468 // have immediate operands.
4469 MVT VT = And->getSimpleValueType(0);
4470 if (VT != MVT::i32 && VT != MVT::i64)
4473 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
4477 // Bail out if the mask constant is already negative. It's can't shrink more.
4478 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
4479 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
4480 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
4481 // are negative too.
4482 APInt MaskVal = And1C->getAPIntValue();
4483 unsigned MaskLZ = MaskVal.countl_zero();
4484 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
4487 // Don't extend into the upper 32 bits of a 64 bit mask.
4488 if (VT == MVT::i64 && MaskLZ >= 32) {
4490 MaskVal = MaskVal.trunc(32);
4493 SDValue And0 = And->getOperand(0);
4494 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
4495 APInt NegMaskVal = MaskVal | HighZeros;
4497 // If a negative constant would not allow a smaller encoding, there's no need
4498 // to continue. Only change the constant when we know it's a win.
4499 unsigned MinWidth = NegMaskVal.getSignificantBits();
4500 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getSignificantBits() <= 32))
4503 // Extend masks if we truncated above.
4504 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
4505 NegMaskVal = NegMaskVal.zext(64);
4506 HighZeros = HighZeros.zext(64);
4509 // The variable operand must be all zeros in the top bits to allow using the
4510 // new, negative constant as the mask.
4511 if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
4514 // Check if the mask is -1. In that case, this is an unnecessary instruction
4515 // that escaped earlier analysis.
4516 if (NegMaskVal.isAllOnes()) {
4517 ReplaceNode(And, And0.getNode());
4521 // A negative mask allows a smaller encoding. Create a new 'and' node.
4522 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
4523 insertDAGNode(*CurDAG, SDValue(And, 0), NewMask);
4524 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
4525 ReplaceNode(And, NewAnd.getNode());
4526 SelectCode(NewAnd.getNode());
4530 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
4531 bool FoldedBCast, bool Masked) {
4532 #define VPTESTM_CASE(VT, SUFFIX) \
4535 return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \
4536 return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX;
4539 #define VPTESTM_BROADCAST_CASES(SUFFIX) \
4540 default: llvm_unreachable("Unexpected VT!"); \
4541 VPTESTM_CASE(v4i32, DZ128##SUFFIX) \
4542 VPTESTM_CASE(v2i64, QZ128##SUFFIX) \
4543 VPTESTM_CASE(v8i32, DZ256##SUFFIX) \
4544 VPTESTM_CASE(v4i64, QZ256##SUFFIX) \
4545 VPTESTM_CASE(v16i32, DZ##SUFFIX) \
4546 VPTESTM_CASE(v8i64, QZ##SUFFIX)
4548 #define VPTESTM_FULL_CASES(SUFFIX) \
4549 VPTESTM_BROADCAST_CASES(SUFFIX) \
4550 VPTESTM_CASE(v16i8, BZ128##SUFFIX) \
4551 VPTESTM_CASE(v8i16, WZ128##SUFFIX) \
4552 VPTESTM_CASE(v32i8, BZ256##SUFFIX) \
4553 VPTESTM_CASE(v16i16, WZ256##SUFFIX) \
4554 VPTESTM_CASE(v64i8, BZ##SUFFIX) \
4555 VPTESTM_CASE(v32i16, WZ##SUFFIX)
4558 switch (TestVT.SimpleTy) {
4559 VPTESTM_BROADCAST_CASES(rmb)
4564 switch (TestVT.SimpleTy) {
4565 VPTESTM_FULL_CASES(rm)
4569 switch (TestVT.SimpleTy) {
4570 VPTESTM_FULL_CASES(rr)
4573 #undef VPTESTM_FULL_CASES
4574 #undef VPTESTM_BROADCAST_CASES
4578 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4579 // to form a masked operation.
4580 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4582 assert(Subtarget->hasAVX512() && "Expected AVX512!");
4583 assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&
4586 // Look for equal and not equal compares.
4587 ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4588 if (CC != ISD::SETEQ && CC != ISD::SETNE)
4591 SDValue SetccOp0 = Setcc.getOperand(0);
4592 SDValue SetccOp1 = Setcc.getOperand(1);
4594 // Canonicalize the all zero vector to the RHS.
4595 if (ISD::isBuildVectorAllZeros(SetccOp0.getNode()))
4596 std::swap(SetccOp0, SetccOp1);
4598 // See if we're comparing against zero.
4599 if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode()))
4602 SDValue N0 = SetccOp0;
4604 MVT CmpVT = N0.getSimpleValueType();
4605 MVT CmpSVT = CmpVT.getVectorElementType();
4607 // Start with both operands the same. We'll try to refine this.
4612 // Look through single use bitcasts.
4613 SDValue N0Temp = N0;
4614 if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4615 N0Temp = N0.getOperand(0);
4617 // Look for single use AND.
4618 if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4619 Src0 = N0Temp.getOperand(0);
4620 Src1 = N0Temp.getOperand(1);
4624 // Without VLX we need to widen the operation.
4625 bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4627 auto tryFoldLoadOrBCast = [&](SDNode *Root, SDNode *P, SDValue &L,
4628 SDValue &Base, SDValue &Scale, SDValue &Index,
4629 SDValue &Disp, SDValue &Segment) {
4630 // If we need to widen, we can't fold the load.
4632 if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4635 // If we didn't fold a load, try to match broadcast. No widening limitation
4636 // for this. But only 32 and 64 bit types are supported.
4637 if (CmpSVT != MVT::i32 && CmpSVT != MVT::i64)
4640 // Look through single use bitcasts.
4641 if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4643 L = L.getOperand(0);
4646 if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4649 auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4650 if (MemIntr->getMemoryVT().getSizeInBits() != CmpSVT.getSizeInBits())
4653 return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4656 // We can only fold loads if the sources are unique.
4657 bool CanFoldLoads = Src0 != Src1;
4659 bool FoldedLoad = false;
4660 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4662 FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
4665 // And is commutative.
4666 FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1,
4669 std::swap(Src0, Src1);
4673 bool FoldedBCast = FoldedLoad && Src1.getOpcode() == X86ISD::VBROADCAST_LOAD;
4675 bool IsMasked = InMask.getNode() != nullptr;
4679 MVT ResVT = Setcc.getSimpleValueType();
4682 // Widen the inputs using insert_subreg or copy_to_regclass.
4683 unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4684 unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4685 unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4686 CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4687 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4688 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4690 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4693 Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4697 unsigned RegClass = TLI->getRegClassFor(MaskVT)->getID();
4698 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4699 InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4700 dl, MaskVT, InMask, RC), 0);
4704 bool IsTestN = CC == ISD::SETEQ;
4705 unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4708 MachineSDNode *CNode;
4710 SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4713 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4714 Src1.getOperand(0) };
4715 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4717 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4718 Src1.getOperand(0) };
4719 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4722 // Update the chain.
4723 ReplaceUses(Src1.getValue(1), SDValue(CNode, 1));
4724 // Record the mem-refs
4725 CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Src1)->getMemOperand()});
4728 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4730 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4733 // If we widened, we need to shrink the mask VT.
4735 unsigned RegClass = TLI->getRegClassFor(ResVT)->getID();
4736 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4737 CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4738 dl, ResVT, SDValue(CNode, 0), RC);
4741 ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4742 CurDAG->RemoveDeadNode(Root);
4746 // Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it
4748 bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) {
4749 assert(N->getOpcode() == ISD::OR && "Unexpected opcode!");
4751 MVT NVT = N->getSimpleValueType(0);
4753 // Make sure we support VPTERNLOG.
4754 if (!NVT.isVector() || !Subtarget->hasAVX512())
4757 // We need VLX for 128/256-bit.
4758 if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4761 SDValue N0 = N->getOperand(0);
4762 SDValue N1 = N->getOperand(1);
4764 // Canonicalize AND to LHS.
4765 if (N1.getOpcode() == ISD::AND)
4768 if (N0.getOpcode() != ISD::AND ||
4769 N1.getOpcode() != X86ISD::ANDNP ||
4770 !N0.hasOneUse() || !N1.hasOneUse())
4773 // ANDN is not commutable, use it to pick down A and C.
4774 SDValue A = N1.getOperand(0);
4775 SDValue C = N1.getOperand(1);
4777 // AND is commutable, if one operand matches A, the other operand is B.
4778 // Otherwise this isn't a match.
4780 if (N0.getOperand(0) == A)
4781 B = N0.getOperand(1);
4782 else if (N0.getOperand(1) == A)
4783 B = N0.getOperand(0);
4788 SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8);
4789 SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm);
4790 ReplaceNode(N, Ternlog.getNode());
4792 return matchVPTERNLOG(Ternlog.getNode(), Ternlog.getNode(), Ternlog.getNode(),
4793 Ternlog.getNode(), A, B, C, 0xCA);
4796 void X86DAGToDAGISel::Select(SDNode *Node) {
4797 MVT NVT = Node->getSimpleValueType(0);
4798 unsigned Opcode = Node->getOpcode();
4801 if (Node->isMachineOpcode()) {
4802 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
4803 Node->setNodeId(-1);
4804 return; // Already selected.
4809 case ISD::INTRINSIC_W_CHAIN: {
4810 unsigned IntNo = Node->getConstantOperandVal(1);
4813 case Intrinsic::x86_encodekey128:
4814 case Intrinsic::x86_encodekey256: {
4815 if (!Subtarget->hasKL())
4820 default: llvm_unreachable("Impossible intrinsic");
4821 case Intrinsic::x86_encodekey128: Opcode = X86::ENCODEKEY128; break;
4822 case Intrinsic::x86_encodekey256: Opcode = X86::ENCODEKEY256; break;
4825 SDValue Chain = Node->getOperand(0);
4826 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(3),
4828 if (Opcode == X86::ENCODEKEY256)
4829 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(4),
4832 MachineSDNode *Res = CurDAG->getMachineNode(
4833 Opcode, dl, Node->getVTList(),
4834 {Node->getOperand(2), Chain, Chain.getValue(1)});
4835 ReplaceNode(Node, Res);
4838 case Intrinsic::x86_tileloadd64_internal:
4839 case Intrinsic::x86_tileloaddt164_internal: {
4840 if (!Subtarget->hasAMXTILE())
4842 unsigned Opc = IntNo == Intrinsic::x86_tileloadd64_internal
4844 : X86::PTILELOADDT1V;
4845 // _tile_loadd_internal(row, col, buf, STRIDE)
4846 SDValue Base = Node->getOperand(4);
4847 SDValue Scale = getI8Imm(1, dl);
4848 SDValue Index = Node->getOperand(5);
4849 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4850 SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4851 SDValue Chain = Node->getOperand(0);
4852 MachineSDNode *CNode;
4853 SDValue Ops[] = {Node->getOperand(2),
4854 Node->getOperand(3),
4861 CNode = CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops);
4862 ReplaceNode(Node, CNode);
4868 case ISD::INTRINSIC_VOID: {
4869 unsigned IntNo = Node->getConstantOperandVal(1);
4872 case Intrinsic::x86_sse3_monitor:
4873 case Intrinsic::x86_monitorx:
4874 case Intrinsic::x86_clzero: {
4875 bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4879 default: llvm_unreachable("Unexpected intrinsic!");
4880 case Intrinsic::x86_sse3_monitor:
4881 if (!Subtarget->hasSSE3())
4883 Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4885 case Intrinsic::x86_monitorx:
4886 if (!Subtarget->hasMWAITX())
4888 Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4890 case Intrinsic::x86_clzero:
4891 if (!Subtarget->hasCLZERO())
4893 Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4898 unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4899 SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4900 Node->getOperand(2), SDValue());
4901 SDValue InGlue = Chain.getValue(1);
4903 if (IntNo == Intrinsic::x86_sse3_monitor ||
4904 IntNo == Intrinsic::x86_monitorx) {
4905 // Copy the other two operands to ECX and EDX.
4906 Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4908 InGlue = Chain.getValue(1);
4909 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4911 InGlue = Chain.getValue(1);
4914 MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4916 ReplaceNode(Node, CNode);
4922 case Intrinsic::x86_tilestored64_internal: {
4923 unsigned Opc = X86::PTILESTOREDV;
4924 // _tile_stored_internal(row, col, buf, STRIDE, c)
4925 SDValue Base = Node->getOperand(4);
4926 SDValue Scale = getI8Imm(1, dl);
4927 SDValue Index = Node->getOperand(5);
4928 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4929 SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4930 SDValue Chain = Node->getOperand(0);
4931 MachineSDNode *CNode;
4932 SDValue Ops[] = {Node->getOperand(2),
4933 Node->getOperand(3),
4939 Node->getOperand(6),
4941 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4942 ReplaceNode(Node, CNode);
4945 case Intrinsic::x86_tileloadd64:
4946 case Intrinsic::x86_tileloaddt164:
4947 case Intrinsic::x86_tilestored64: {
4948 if (!Subtarget->hasAMXTILE())
4952 default: llvm_unreachable("Unexpected intrinsic!");
4953 case Intrinsic::x86_tileloadd64: Opc = X86::PTILELOADD; break;
4954 case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break;
4955 case Intrinsic::x86_tilestored64: Opc = X86::PTILESTORED; break;
4957 // FIXME: Match displacement and scale.
4958 unsigned TIndex = Node->getConstantOperandVal(2);
4959 SDValue TReg = getI8Imm(TIndex, dl);
4960 SDValue Base = Node->getOperand(3);
4961 SDValue Scale = getI8Imm(1, dl);
4962 SDValue Index = Node->getOperand(4);
4963 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4964 SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4965 SDValue Chain = Node->getOperand(0);
4966 MachineSDNode *CNode;
4967 if (Opc == X86::PTILESTORED) {
4968 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain };
4969 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4971 SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain };
4972 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4974 ReplaceNode(Node, CNode);
4981 case X86ISD::NT_BRIND: {
4982 if (Subtarget->isTargetNaCl())
4983 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4984 // leave the instruction alone.
4986 if (Subtarget->isTarget64BitILP32()) {
4987 // Converts a 32-bit register to a 64-bit, zero-extended version of
4988 // it. This is needed because x86-64 can do many things, but jmp %r32
4989 // ain't one of them.
4990 SDValue Target = Node->getOperand(1);
4991 assert(Target.getValueType() == MVT::i32 && "Unexpected VT!");
4992 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64);
4993 SDValue Brind = CurDAG->getNode(Opcode, dl, MVT::Other,
4994 Node->getOperand(0), ZextTarget);
4995 ReplaceNode(Node, Brind.getNode());
4996 SelectCode(ZextTarget.getNode());
4997 SelectCode(Brind.getNode());
5002 case X86ISD::GlobalBaseReg:
5003 ReplaceNode(Node, getGlobalBaseReg());
5007 // Just drop all 128/256/512-bit bitcasts.
5008 if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
5010 ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
5011 CurDAG->RemoveDeadNode(Node);
5017 if (matchBitExtract(Node))
5022 if (tryShiftAmountMod(Node))
5026 case X86ISD::VPTERNLOG: {
5027 uint8_t Imm = cast<ConstantSDNode>(Node->getOperand(3))->getZExtValue();
5028 if (matchVPTERNLOG(Node, Node, Node, Node, Node->getOperand(0),
5029 Node->getOperand(1), Node->getOperand(2), Imm))
5035 if (tryVPTERNLOG(Node))
5040 if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
5041 // Try to form a masked VPTESTM. Operands can be in either order.
5042 SDValue N0 = Node->getOperand(0);
5043 SDValue N1 = Node->getOperand(1);
5044 if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
5045 tryVPTESTM(Node, N0, N1))
5047 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
5048 tryVPTESTM(Node, N1, N0))
5052 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
5053 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
5054 CurDAG->RemoveDeadNode(Node);
5057 if (matchBitExtract(Node))
5059 if (AndImmShrink && shrinkAndImmediate(Node))
5065 if (tryShrinkShlLogicImm(Node))
5067 if (Opcode == ISD::OR && tryMatchBitSelect(Node))
5069 if (tryVPTERNLOG(Node))
5074 if (Opcode == ISD::ADD && matchBitExtract(Node))
5078 // Try to avoid folding immediates with multiple uses for optsize.
5079 // This code tries to select to register form directly to avoid going
5080 // through the isel table which might fold the immediate. We can't change
5081 // the patterns on the add/sub/and/or/xor with immediate paterns in the
5082 // tablegen files to check immediate use count without making the patterns
5083 // unavailable to the fast-isel table.
5084 if (!CurDAG->shouldOptForSize())
5087 // Only handle i8/i16/i32/i64.
5088 if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
5091 SDValue N0 = Node->getOperand(0);
5092 SDValue N1 = Node->getOperand(1);
5094 auto *Cst = dyn_cast<ConstantSDNode>(N1);
5098 int64_t Val = Cst->getSExtValue();
5100 // Make sure its an immediate that is considered foldable.
5101 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
5102 if (!isInt<8>(Val) && !isInt<32>(Val))
5105 // If this can match to INC/DEC, let it go.
5106 if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
5109 // Check if we should avoid folding this immediate.
5110 if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
5113 // We should not fold the immediate. So we need a register form instead.
5114 unsigned ROpc, MOpc;
5115 switch (NVT.SimpleTy) {
5116 default: llvm_unreachable("Unexpected VT!");
5119 default: llvm_unreachable("Unexpected opcode!");
5120 case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
5121 case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
5122 case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
5123 case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
5124 case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
5129 default: llvm_unreachable("Unexpected opcode!");
5130 case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
5131 case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
5132 case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
5133 case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
5134 case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
5139 default: llvm_unreachable("Unexpected opcode!");
5140 case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
5141 case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
5142 case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
5143 case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
5144 case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
5149 default: llvm_unreachable("Unexpected opcode!");
5150 case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
5151 case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
5152 case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
5153 case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
5154 case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
5159 // Ok this is a AND/OR/XOR/ADD/SUB with constant.
5161 // If this is a not a subtract, we can still try to fold a load.
5162 if (Opcode != ISD::SUB) {
5163 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5164 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5165 SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
5166 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
5167 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5168 // Update the chain.
5169 ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
5170 // Record the mem-refs
5171 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
5172 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5173 CurDAG->RemoveDeadNode(Node);
5178 CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
5183 // i16/i32/i64 are handled with isel patterns.
5187 case X86ISD::UMUL: {
5188 SDValue N0 = Node->getOperand(0);
5189 SDValue N1 = Node->getOperand(1);
5191 unsigned LoReg, ROpc, MOpc;
5192 switch (NVT.SimpleTy) {
5193 default: llvm_unreachable("Unsupported VT!");
5196 ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
5197 MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
5216 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5217 bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5218 // Multiply is commutative.
5220 FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5225 SDValue InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
5226 N0, SDValue()).getValue(1);
5228 MachineSDNode *CNode;
5230 // i16/i32/i64 use an instruction that produces a low and high result even
5231 // though only the low result is used.
5234 VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
5236 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
5238 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5240 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5242 // Update the chain.
5243 ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
5244 // Record the mem-refs
5245 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5247 // i16/i32/i64 use an instruction that produces a low and high result even
5248 // though only the low result is used.
5251 VTs = CurDAG->getVTList(NVT, MVT::i32);
5253 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
5255 CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InGlue});
5258 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5259 ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
5260 CurDAG->RemoveDeadNode(Node);
5264 case ISD::SMUL_LOHI:
5265 case ISD::UMUL_LOHI: {
5266 SDValue N0 = Node->getOperand(0);
5267 SDValue N1 = Node->getOperand(1);
5270 unsigned LoReg, HiReg;
5271 bool IsSigned = Opcode == ISD::SMUL_LOHI;
5272 bool UseMULX = !IsSigned && Subtarget->hasBMI2();
5273 bool UseMULXHi = UseMULX && SDValue(Node, 0).use_empty();
5274 switch (NVT.SimpleTy) {
5275 default: llvm_unreachable("Unsupported VT!");
5277 Opc = UseMULXHi ? X86::MULX32Hrr :
5278 UseMULX ? X86::MULX32rr :
5279 IsSigned ? X86::IMUL32r : X86::MUL32r;
5280 MOpc = UseMULXHi ? X86::MULX32Hrm :
5281 UseMULX ? X86::MULX32rm :
5282 IsSigned ? X86::IMUL32m : X86::MUL32m;
5283 LoReg = UseMULX ? X86::EDX : X86::EAX;
5287 Opc = UseMULXHi ? X86::MULX64Hrr :
5288 UseMULX ? X86::MULX64rr :
5289 IsSigned ? X86::IMUL64r : X86::MUL64r;
5290 MOpc = UseMULXHi ? X86::MULX64Hrm :
5291 UseMULX ? X86::MULX64rm :
5292 IsSigned ? X86::IMUL64m : X86::MUL64m;
5293 LoReg = UseMULX ? X86::RDX : X86::RAX;
5298 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5299 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5300 // Multiply is commutative.
5302 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5307 SDValue InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
5308 N0, SDValue()).getValue(1);
5309 SDValue ResHi, ResLo;
5312 MachineSDNode *CNode = nullptr;
5313 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5316 SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
5317 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5318 ResHi = SDValue(CNode, 0);
5319 Chain = SDValue(CNode, 1);
5320 } else if (UseMULX) {
5321 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other);
5322 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5323 ResHi = SDValue(CNode, 0);
5324 ResLo = SDValue(CNode, 1);
5325 Chain = SDValue(CNode, 2);
5327 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
5328 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5329 Chain = SDValue(CNode, 0);
5330 InGlue = SDValue(CNode, 1);
5333 // Update the chain.
5334 ReplaceUses(N1.getValue(1), Chain);
5335 // Record the mem-refs
5336 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5338 SDValue Ops[] = { N1, InGlue };
5340 SDVTList VTs = CurDAG->getVTList(NVT);
5341 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5342 ResHi = SDValue(CNode, 0);
5343 } else if (UseMULX) {
5344 SDVTList VTs = CurDAG->getVTList(NVT, NVT);
5345 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5346 ResHi = SDValue(CNode, 0);
5347 ResLo = SDValue(CNode, 1);
5349 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
5350 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5351 InGlue = SDValue(CNode, 0);
5355 // Copy the low half of the result, if it is needed.
5356 if (!SDValue(Node, 0).use_empty()) {
5358 assert(LoReg && "Register for low half is not defined!");
5359 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
5361 InGlue = ResLo.getValue(2);
5363 ReplaceUses(SDValue(Node, 0), ResLo);
5364 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
5367 // Copy the high half of the result, if it is needed.
5368 if (!SDValue(Node, 1).use_empty()) {
5370 assert(HiReg && "Register for high half is not defined!");
5371 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
5373 InGlue = ResHi.getValue(2);
5375 ReplaceUses(SDValue(Node, 1), ResHi);
5376 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
5380 CurDAG->RemoveDeadNode(Node);
5385 case ISD::UDIVREM: {
5386 SDValue N0 = Node->getOperand(0);
5387 SDValue N1 = Node->getOperand(1);
5389 unsigned ROpc, MOpc;
5390 bool isSigned = Opcode == ISD::SDIVREM;
5392 switch (NVT.SimpleTy) {
5393 default: llvm_unreachable("Unsupported VT!");
5394 case MVT::i8: ROpc = X86::DIV8r; MOpc = X86::DIV8m; break;
5395 case MVT::i16: ROpc = X86::DIV16r; MOpc = X86::DIV16m; break;
5396 case MVT::i32: ROpc = X86::DIV32r; MOpc = X86::DIV32m; break;
5397 case MVT::i64: ROpc = X86::DIV64r; MOpc = X86::DIV64m; break;
5400 switch (NVT.SimpleTy) {
5401 default: llvm_unreachable("Unsupported VT!");
5402 case MVT::i8: ROpc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
5403 case MVT::i16: ROpc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
5404 case MVT::i32: ROpc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
5405 case MVT::i64: ROpc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
5409 unsigned LoReg, HiReg, ClrReg;
5410 unsigned SExtOpcode;
5411 switch (NVT.SimpleTy) {
5412 default: llvm_unreachable("Unsupported VT!");
5414 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
5415 SExtOpcode = 0; // Not used.
5418 LoReg = X86::AX; HiReg = X86::DX;
5420 SExtOpcode = X86::CWD;
5423 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
5424 SExtOpcode = X86::CDQ;
5427 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
5428 SExtOpcode = X86::CQO;
5432 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5433 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5434 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
5437 if (NVT == MVT::i8) {
5438 // Special case for div8, just use a move with zero extension to AX to
5439 // clear the upper 8 bits (AH).
5440 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
5441 MachineSDNode *Move;
5442 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5443 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
5444 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8
5446 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops);
5447 Chain = SDValue(Move, 1);
5448 ReplaceUses(N0.getValue(1), Chain);
5449 // Record the mem-refs
5450 CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
5452 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8
5454 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, N0);
5455 Chain = CurDAG->getEntryNode();
5457 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, SDValue(Move, 0),
5459 InGlue = Chain.getValue(1);
5462 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
5463 LoReg, N0, SDValue()).getValue(1);
5464 if (isSigned && !signBitIsZero) {
5465 // Sign extend the low part into the high part.
5467 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InGlue),0);
5469 // Zero out the high part, effectively zero extending the input.
5470 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
5471 SDValue ClrNode = SDValue(
5472 CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, std::nullopt), 0);
5473 switch (NVT.SimpleTy) {
5476 SDValue(CurDAG->getMachineNode(
5477 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
5478 CurDAG->getTargetConstant(X86::sub_16bit, dl,
5486 SDValue(CurDAG->getMachineNode(
5487 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
5488 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
5489 CurDAG->getTargetConstant(X86::sub_32bit, dl,
5494 llvm_unreachable("Unexpected division source");
5497 InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
5498 ClrNode, InGlue).getValue(1);
5503 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5505 MachineSDNode *CNode =
5506 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
5507 InGlue = SDValue(CNode, 1);
5508 // Update the chain.
5509 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
5510 // Record the mem-refs
5511 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5514 SDValue(CurDAG->getMachineNode(ROpc, dl, MVT::Glue, N1, InGlue), 0);
5517 // Prevent use of AH in a REX instruction by explicitly copying it to
5518 // an ABCD_L register.
5520 // The current assumption of the register allocator is that isel
5521 // won't generate explicit references to the GR8_ABCD_H registers. If
5522 // the allocator and/or the backend get enhanced to be more robust in
5523 // that regard, this can be, and should be, removed.
5524 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
5525 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
5526 unsigned AHExtOpcode =
5527 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
5529 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
5530 MVT::Glue, AHCopy, InGlue);
5531 SDValue Result(RNode, 0);
5532 InGlue = SDValue(RNode, 1);
5535 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
5537 ReplaceUses(SDValue(Node, 1), Result);
5538 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5541 // Copy the division (low) result, if it is needed.
5542 if (!SDValue(Node, 0).use_empty()) {
5543 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
5544 LoReg, NVT, InGlue);
5545 InGlue = Result.getValue(2);
5546 ReplaceUses(SDValue(Node, 0), Result);
5547 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5550 // Copy the remainder (high) result, if it is needed.
5551 if (!SDValue(Node, 1).use_empty()) {
5552 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
5553 HiReg, NVT, InGlue);
5554 InGlue = Result.getValue(2);
5555 ReplaceUses(SDValue(Node, 1), Result);
5556 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5559 CurDAG->RemoveDeadNode(Node);
5564 case X86ISD::STRICT_FCMP:
5565 case X86ISD::STRICT_FCMPS: {
5566 bool IsStrictCmp = Node->getOpcode() == X86ISD::STRICT_FCMP ||
5567 Node->getOpcode() == X86ISD::STRICT_FCMPS;
5568 SDValue N0 = Node->getOperand(IsStrictCmp ? 1 : 0);
5569 SDValue N1 = Node->getOperand(IsStrictCmp ? 2 : 1);
5571 // Save the original VT of the compare.
5572 MVT CmpVT = N0.getSimpleValueType();
5574 // Floating point needs special handling if we don't have FCOMI.
5575 if (Subtarget->canUseCMOV())
5578 bool IsSignaling = Node->getOpcode() == X86ISD::STRICT_FCMPS;
5581 switch (CmpVT.SimpleTy) {
5582 default: llvm_unreachable("Unexpected type!");
5584 Opc = IsSignaling ? X86::COM_Fpr32 : X86::UCOM_Fpr32;
5587 Opc = IsSignaling ? X86::COM_Fpr64 : X86::UCOM_Fpr64;
5590 Opc = IsSignaling ? X86::COM_Fpr80 : X86::UCOM_Fpr80;
5595 IsStrictCmp ? Node->getOperand(0) : CurDAG->getEntryNode();
5598 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
5599 Chain = SDValue(CurDAG->getMachineNode(Opc, dl, VTs, {N0, N1, Chain}), 0);
5600 Glue = Chain.getValue(1);
5602 Glue = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N0, N1), 0);
5607 SDValue(CurDAG->getMachineNode(X86::FNSTSW16r, dl, MVT::i16, Glue), 0);
5609 // Extract upper 8-bits of AX.
5611 CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, MVT::i8, FNSTSW);
5613 // Move AH into flags.
5614 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
5615 assert(Subtarget->canUseLAHFSAHF() &&
5616 "Target doesn't support SAHF or FCOMI?");
5617 SDValue AH = CurDAG->getCopyToReg(Chain, dl, X86::AH, Extract, SDValue());
5619 SDValue SAHF = SDValue(
5620 CurDAG->getMachineNode(X86::SAHF, dl, MVT::i32, AH.getValue(1)), 0);
5623 ReplaceUses(SDValue(Node, 1), Chain);
5625 ReplaceUses(SDValue(Node, 0), SAHF);
5626 CurDAG->RemoveDeadNode(Node);
5631 SDValue N0 = Node->getOperand(0);
5632 SDValue N1 = Node->getOperand(1);
5634 // Optimizations for TEST compares.
5635 if (!isNullConstant(N1))
5638 // Save the original VT of the compare.
5639 MVT CmpVT = N0.getSimpleValueType();
5641 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
5642 // by a test instruction. The test should be removed later by
5643 // analyzeCompare if we are using only the zero flag.
5644 // TODO: Should we check the users and use the BEXTR flags directly?
5645 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
5646 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
5647 unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
5649 SDValue BEXTR = SDValue(NewNode, 0);
5650 NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR);
5651 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
5652 CurDAG->RemoveDeadNode(Node);
5657 // We can peek through truncates, but we need to be careful below.
5658 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
5659 N0 = N0.getOperand(0);
5661 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
5662 // use a smaller encoding.
5663 // Look past the truncate if CMP is the only use of it.
5664 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
5665 N0.getValueType() != MVT::i8) {
5666 auto *MaskC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5670 // We may have looked through a truncate so mask off any bits that
5671 // shouldn't be part of the compare.
5672 uint64_t Mask = MaskC->getZExtValue();
5673 Mask &= maskTrailingOnes<uint64_t>(CmpVT.getScalarSizeInBits());
5675 // Check if we can replace AND+IMM{32,64} with a shift. This is possible
5676 // for masks like 0xFF000000 or 0x00FFFFFF and if we care only about the
5678 if (CmpVT == MVT::i64 && !isInt<8>(Mask) && isShiftedMask_64(Mask) &&
5679 onlyUsesZeroFlag(SDValue(Node, 0))) {
5680 unsigned ShiftOpcode = ISD::DELETED_NODE;
5684 unsigned TestOpcode;
5685 unsigned LeadingZeros = llvm::countl_zero(Mask);
5686 unsigned TrailingZeros = llvm::countr_zero(Mask);
5688 // With leading/trailing zeros, the transform is profitable if we can
5689 // eliminate a movabsq or shrink a 32-bit immediate to 8-bit without
5690 // incurring any extra register moves.
5691 bool SavesBytes = !isInt<32>(Mask) || N0.getOperand(0).hasOneUse();
5692 if (LeadingZeros == 0 && SavesBytes) {
5693 // If the mask covers the most significant bit, then we can replace
5694 // TEST+AND with a SHR and check eflags.
5695 // This emits a redundant TEST which is subsequently eliminated.
5696 ShiftOpcode = X86::SHR64ri;
5697 ShiftAmt = TrailingZeros;
5699 TestOpcode = X86::TEST64rr;
5700 } else if (TrailingZeros == 0 && SavesBytes) {
5701 // If the mask covers the least significant bit, then we can replace
5702 // TEST+AND with a SHL and check eflags.
5703 // This emits a redundant TEST which is subsequently eliminated.
5704 ShiftOpcode = X86::SHL64ri;
5705 ShiftAmt = LeadingZeros;
5707 TestOpcode = X86::TEST64rr;
5708 } else if (MaskC->hasOneUse() && !isInt<32>(Mask)) {
5709 // If the shifted mask extends into the high half and is 8/16/32 bits
5710 // wide, then replace it with a SHR and a TEST8rr/TEST16rr/TEST32rr.
5711 unsigned PopCount = 64 - LeadingZeros - TrailingZeros;
5712 if (PopCount == 8) {
5713 ShiftOpcode = X86::SHR64ri;
5714 ShiftAmt = TrailingZeros;
5715 SubRegIdx = X86::sub_8bit;
5717 TestOpcode = X86::TEST8rr;
5718 } else if (PopCount == 16) {
5719 ShiftOpcode = X86::SHR64ri;
5720 ShiftAmt = TrailingZeros;
5721 SubRegIdx = X86::sub_16bit;
5722 SubRegVT = MVT::i16;
5723 TestOpcode = X86::TEST16rr;
5724 } else if (PopCount == 32) {
5725 ShiftOpcode = X86::SHR64ri;
5726 ShiftAmt = TrailingZeros;
5727 SubRegIdx = X86::sub_32bit;
5728 SubRegVT = MVT::i32;
5729 TestOpcode = X86::TEST32rr;
5732 if (ShiftOpcode != ISD::DELETED_NODE) {
5733 SDValue ShiftC = CurDAG->getTargetConstant(ShiftAmt, dl, MVT::i64);
5734 SDValue Shift = SDValue(
5735 CurDAG->getMachineNode(ShiftOpcode, dl, MVT::i64, MVT::i32,
5736 N0.getOperand(0), ShiftC),
5738 if (SubRegIdx != 0) {
5740 CurDAG->getTargetExtractSubreg(SubRegIdx, dl, SubRegVT, Shift);
5742 MachineSDNode *Test =
5743 CurDAG->getMachineNode(TestOpcode, dl, MVT::i32, Shift, Shift);
5744 ReplaceNode(Node, Test);
5751 unsigned ROpc, MOpc;
5753 // For each of these checks we need to be careful if the sign flag is
5754 // being used. It is only safe to use the sign flag in two conditions,
5755 // either the sign bit in the shrunken mask is zero or the final test
5756 // size is equal to the original compare size.
5758 if (isUInt<8>(Mask) &&
5759 (!(Mask & 0x80) || CmpVT == MVT::i8 ||
5760 hasNoSignFlagUses(SDValue(Node, 0)))) {
5761 // For example, convert "testl %eax, $8" to "testb %al, $8"
5763 SubRegOp = X86::sub_8bit;
5764 ROpc = X86::TEST8ri;
5765 MOpc = X86::TEST8mi;
5766 } else if (OptForMinSize && isUInt<16>(Mask) &&
5767 (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
5768 hasNoSignFlagUses(SDValue(Node, 0)))) {
5769 // For example, "testl %eax, $32776" to "testw %ax, $32776".
5770 // NOTE: We only want to form TESTW instructions if optimizing for
5771 // min size. Otherwise we only save one byte and possibly get a length
5772 // changing prefix penalty in the decoders.
5774 SubRegOp = X86::sub_16bit;
5775 ROpc = X86::TEST16ri;
5776 MOpc = X86::TEST16mi;
5777 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
5778 ((!(Mask & 0x80000000) &&
5779 // Without minsize 16-bit Cmps can get here so we need to
5780 // be sure we calculate the correct sign flag if needed.
5781 (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
5782 CmpVT == MVT::i32 ||
5783 hasNoSignFlagUses(SDValue(Node, 0)))) {
5784 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
5785 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
5786 // Otherwize, we find ourselves in a position where we have to do
5787 // promotion. If previous passes did not promote the and, we assume
5788 // they had a good reason not to and do not promote here.
5790 SubRegOp = X86::sub_32bit;
5791 ROpc = X86::TEST32ri;
5792 MOpc = X86::TEST32mi;
5794 // No eligible transformation was found.
5798 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
5799 SDValue Reg = N0.getOperand(0);
5801 // Emit a testl or testw.
5802 MachineSDNode *NewNode;
5803 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5804 if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5805 if (auto *LoadN = dyn_cast<LoadSDNode>(N0.getOperand(0).getNode())) {
5806 if (!LoadN->isSimple()) {
5807 unsigned NumVolBits = LoadN->getValueType(0).getSizeInBits();
5808 if ((MOpc == X86::TEST8mi && NumVolBits != 8) ||
5809 (MOpc == X86::TEST16mi && NumVolBits != 16) ||
5810 (MOpc == X86::TEST32mi && NumVolBits != 32))
5814 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
5815 Reg.getOperand(0) };
5816 NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
5817 // Update the chain.
5818 ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1));
5819 // Record the mem-refs
5820 CurDAG->setNodeMemRefs(NewNode,
5821 {cast<LoadSDNode>(Reg)->getMemOperand()});
5823 // Extract the subregister if necessary.
5824 if (N0.getValueType() != VT)
5825 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
5827 NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm);
5829 // Replace CMP with TEST.
5830 ReplaceNode(Node, NewNode);
5835 case X86ISD::PCMPISTR: {
5836 if (!Subtarget->hasSSE42())
5839 bool NeedIndex = !SDValue(Node, 0).use_empty();
5840 bool NeedMask = !SDValue(Node, 1).use_empty();
5841 // We can't fold a load if we are going to make two instructions.
5842 bool MayFoldLoad = !NeedIndex || !NeedMask;
5844 MachineSDNode *CNode;
5846 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
5847 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
5848 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
5849 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5851 if (NeedIndex || !NeedMask) {
5852 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
5853 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
5854 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
5855 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5858 // Connect the flag usage to the last instruction created.
5859 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5860 CurDAG->RemoveDeadNode(Node);
5863 case X86ISD::PCMPESTR: {
5864 if (!Subtarget->hasSSE42())
5867 // Copy the two implicit register inputs.
5868 SDValue InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
5869 Node->getOperand(1),
5870 SDValue()).getValue(1);
5871 InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
5872 Node->getOperand(3), InGlue).getValue(1);
5874 bool NeedIndex = !SDValue(Node, 0).use_empty();
5875 bool NeedMask = !SDValue(Node, 1).use_empty();
5876 // We can't fold a load if we are going to make two instructions.
5877 bool MayFoldLoad = !NeedIndex || !NeedMask;
5879 MachineSDNode *CNode;
5881 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
5882 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
5883 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
5885 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5887 if (NeedIndex || !NeedMask) {
5888 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
5889 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
5890 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InGlue);
5891 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5893 // Connect the flag usage to the last instruction created.
5894 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5895 CurDAG->RemoveDeadNode(Node);
5900 if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
5907 if (foldLoadStoreIntoMemOperand(Node))
5911 case X86ISD::SETCC_CARRY: {
5912 MVT VT = Node->getSimpleValueType(0);
5914 if (Subtarget->hasSBBDepBreaking()) {
5915 // We have to do this manually because tblgen will put the eflags copy in
5916 // the wrong place if we use an extract_subreg in the pattern.
5917 // Copy flags to the EFLAGS register and glue it to next node.
5919 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
5920 Node->getOperand(1), SDValue());
5922 // Create a 64-bit instruction if the result is 64-bits otherwise use the
5924 unsigned Opc = VT == MVT::i64 ? X86::SETB_C64r : X86::SETB_C32r;
5925 MVT SetVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
5927 CurDAG->getMachineNode(Opc, dl, SetVT, EFLAGS, EFLAGS.getValue(1)),
5930 // The target does not recognize sbb with the same reg operand as a
5931 // no-source idiom, so we explicitly zero the input values.
5932 Result = getSBBZero(Node);
5935 // For less than 32-bits we need to extract from the 32-bit node.
5936 if (VT == MVT::i8 || VT == MVT::i16) {
5937 int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit;
5938 Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result);
5941 ReplaceUses(SDValue(Node, 0), Result);
5942 CurDAG->RemoveDeadNode(Node);
5946 if (isNullConstant(Node->getOperand(0)) &&
5947 isNullConstant(Node->getOperand(1))) {
5948 SDValue Result = getSBBZero(Node);
5950 // Replace the flag use.
5951 ReplaceUses(SDValue(Node, 1), Result.getValue(1));
5953 // Replace the result use.
5954 if (!SDValue(Node, 0).use_empty()) {
5955 // For less than 32-bits we need to extract from the 32-bit node.
5956 MVT VT = Node->getSimpleValueType(0);
5957 if (VT == MVT::i8 || VT == MVT::i16) {
5958 int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit;
5959 Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result);
5961 ReplaceUses(SDValue(Node, 0), Result);
5964 CurDAG->RemoveDeadNode(Node);
5969 case X86ISD::MGATHER: {
5970 auto *Mgt = cast<X86MaskedGatherSDNode>(Node);
5971 SDValue IndexOp = Mgt->getIndex();
5972 SDValue Mask = Mgt->getMask();
5973 MVT IndexVT = IndexOp.getSimpleValueType();
5974 MVT ValueVT = Node->getSimpleValueType(0);
5975 MVT MaskVT = Mask.getSimpleValueType();
5977 // This is just to prevent crashes if the nodes are malformed somehow. We're
5978 // otherwise only doing loose type checking in here based on type what
5979 // a type constraint would say just like table based isel.
5980 if (!ValueVT.isVector() || !MaskVT.isVector())
5983 unsigned NumElts = ValueVT.getVectorNumElements();
5984 MVT ValueSVT = ValueVT.getVectorElementType();
5986 bool IsFP = ValueSVT.isFloatingPoint();
5987 unsigned EltSize = ValueSVT.getSizeInBits();
5990 bool AVX512Gather = MaskVT.getVectorElementType() == MVT::i1;
5992 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
5993 Opc = IsFP ? X86::VGATHERDPSZ128rm : X86::VPGATHERDDZ128rm;
5994 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
5995 Opc = IsFP ? X86::VGATHERDPSZ256rm : X86::VPGATHERDDZ256rm;
5996 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32)
5997 Opc = IsFP ? X86::VGATHERDPSZrm : X86::VPGATHERDDZrm;
5998 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
5999 Opc = IsFP ? X86::VGATHERDPDZ128rm : X86::VPGATHERDQZ128rm;
6000 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
6001 Opc = IsFP ? X86::VGATHERDPDZ256rm : X86::VPGATHERDQZ256rm;
6002 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64)
6003 Opc = IsFP ? X86::VGATHERDPDZrm : X86::VPGATHERDQZrm;
6004 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
6005 Opc = IsFP ? X86::VGATHERQPSZ128rm : X86::VPGATHERQDZ128rm;
6006 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
6007 Opc = IsFP ? X86::VGATHERQPSZ256rm : X86::VPGATHERQDZ256rm;
6008 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32)
6009 Opc = IsFP ? X86::VGATHERQPSZrm : X86::VPGATHERQDZrm;
6010 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
6011 Opc = IsFP ? X86::VGATHERQPDZ128rm : X86::VPGATHERQQZ128rm;
6012 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
6013 Opc = IsFP ? X86::VGATHERQPDZ256rm : X86::VPGATHERQQZ256rm;
6014 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64)
6015 Opc = IsFP ? X86::VGATHERQPDZrm : X86::VPGATHERQQZrm;
6017 assert(EVT(MaskVT) == EVT(ValueVT).changeVectorElementTypeToInteger() &&
6018 "Unexpected mask VT!");
6019 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
6020 Opc = IsFP ? X86::VGATHERDPSrm : X86::VPGATHERDDrm;
6021 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
6022 Opc = IsFP ? X86::VGATHERDPSYrm : X86::VPGATHERDDYrm;
6023 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
6024 Opc = IsFP ? X86::VGATHERDPDrm : X86::VPGATHERDQrm;
6025 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
6026 Opc = IsFP ? X86::VGATHERDPDYrm : X86::VPGATHERDQYrm;
6027 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
6028 Opc = IsFP ? X86::VGATHERQPSrm : X86::VPGATHERQDrm;
6029 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
6030 Opc = IsFP ? X86::VGATHERQPSYrm : X86::VPGATHERQDYrm;
6031 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
6032 Opc = IsFP ? X86::VGATHERQPDrm : X86::VPGATHERQQrm;
6033 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
6034 Opc = IsFP ? X86::VGATHERQPDYrm : X86::VPGATHERQQYrm;
6040 SDValue Base, Scale, Index, Disp, Segment;
6041 if (!selectVectorAddr(Mgt, Mgt->getBasePtr(), IndexOp, Mgt->getScale(),
6042 Base, Scale, Index, Disp, Segment))
6045 SDValue PassThru = Mgt->getPassThru();
6046 SDValue Chain = Mgt->getChain();
6047 // Gather instructions have a mask output not in the ISD node.
6048 SDVTList VTs = CurDAG->getVTList(ValueVT, MaskVT, MVT::Other);
6050 MachineSDNode *NewNode;
6052 SDValue Ops[] = {PassThru, Mask, Base, Scale,
6053 Index, Disp, Segment, Chain};
6054 NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
6056 SDValue Ops[] = {PassThru, Base, Scale, Index,
6057 Disp, Segment, Mask, Chain};
6058 NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
6060 CurDAG->setNodeMemRefs(NewNode, {Mgt->getMemOperand()});
6061 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
6062 ReplaceUses(SDValue(Node, 1), SDValue(NewNode, 2));
6063 CurDAG->RemoveDeadNode(Node);
6066 case X86ISD::MSCATTER: {
6067 auto *Sc = cast<X86MaskedScatterSDNode>(Node);
6068 SDValue Value = Sc->getValue();
6069 SDValue IndexOp = Sc->getIndex();
6070 MVT IndexVT = IndexOp.getSimpleValueType();
6071 MVT ValueVT = Value.getSimpleValueType();
6073 // This is just to prevent crashes if the nodes are malformed somehow. We're
6074 // otherwise only doing loose type checking in here based on type what
6075 // a type constraint would say just like table based isel.
6076 if (!ValueVT.isVector())
6079 unsigned NumElts = ValueVT.getVectorNumElements();
6080 MVT ValueSVT = ValueVT.getVectorElementType();
6082 bool IsFP = ValueSVT.isFloatingPoint();
6083 unsigned EltSize = ValueSVT.getSizeInBits();
6086 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
6087 Opc = IsFP ? X86::VSCATTERDPSZ128mr : X86::VPSCATTERDDZ128mr;
6088 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
6089 Opc = IsFP ? X86::VSCATTERDPSZ256mr : X86::VPSCATTERDDZ256mr;
6090 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32)
6091 Opc = IsFP ? X86::VSCATTERDPSZmr : X86::VPSCATTERDDZmr;
6092 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
6093 Opc = IsFP ? X86::VSCATTERDPDZ128mr : X86::VPSCATTERDQZ128mr;
6094 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
6095 Opc = IsFP ? X86::VSCATTERDPDZ256mr : X86::VPSCATTERDQZ256mr;
6096 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64)
6097 Opc = IsFP ? X86::VSCATTERDPDZmr : X86::VPSCATTERDQZmr;
6098 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
6099 Opc = IsFP ? X86::VSCATTERQPSZ128mr : X86::VPSCATTERQDZ128mr;
6100 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
6101 Opc = IsFP ? X86::VSCATTERQPSZ256mr : X86::VPSCATTERQDZ256mr;
6102 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32)
6103 Opc = IsFP ? X86::VSCATTERQPSZmr : X86::VPSCATTERQDZmr;
6104 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
6105 Opc = IsFP ? X86::VSCATTERQPDZ128mr : X86::VPSCATTERQQZ128mr;
6106 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
6107 Opc = IsFP ? X86::VSCATTERQPDZ256mr : X86::VPSCATTERQQZ256mr;
6108 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64)
6109 Opc = IsFP ? X86::VSCATTERQPDZmr : X86::VPSCATTERQQZmr;
6113 SDValue Base, Scale, Index, Disp, Segment;
6114 if (!selectVectorAddr(Sc, Sc->getBasePtr(), IndexOp, Sc->getScale(),
6115 Base, Scale, Index, Disp, Segment))
6118 SDValue Mask = Sc->getMask();
6119 SDValue Chain = Sc->getChain();
6120 // Scatter instructions have a mask output not in the ISD node.
6121 SDVTList VTs = CurDAG->getVTList(Mask.getValueType(), MVT::Other);
6122 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, Mask, Value, Chain};
6124 MachineSDNode *NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
6125 CurDAG->setNodeMemRefs(NewNode, {Sc->getMemOperand()});
6126 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 1));
6127 CurDAG->RemoveDeadNode(Node);
6130 case ISD::PREALLOCATED_SETUP: {
6131 auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>();
6132 auto CallId = MFI->getPreallocatedIdForCallSite(
6133 cast<SrcValueSDNode>(Node->getOperand(1))->getValue());
6134 SDValue Chain = Node->getOperand(0);
6135 SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32);
6136 MachineSDNode *New = CurDAG->getMachineNode(
6137 TargetOpcode::PREALLOCATED_SETUP, dl, MVT::Other, CallIdValue, Chain);
6138 ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Chain
6139 CurDAG->RemoveDeadNode(Node);
6142 case ISD::PREALLOCATED_ARG: {
6143 auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>();
6144 auto CallId = MFI->getPreallocatedIdForCallSite(
6145 cast<SrcValueSDNode>(Node->getOperand(1))->getValue());
6146 SDValue Chain = Node->getOperand(0);
6147 SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32);
6148 SDValue ArgIndex = Node->getOperand(2);
6150 Ops[0] = CallIdValue;
6153 MachineSDNode *New = CurDAG->getMachineNode(
6154 TargetOpcode::PREALLOCATED_ARG, dl,
6155 CurDAG->getVTList(TLI->getPointerTy(CurDAG->getDataLayout()),
6158 ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Arg pointer
6159 ReplaceUses(SDValue(Node, 1), SDValue(New, 1)); // Chain
6160 CurDAG->RemoveDeadNode(Node);
6163 case X86ISD::AESENCWIDE128KL:
6164 case X86ISD::AESDECWIDE128KL:
6165 case X86ISD::AESENCWIDE256KL:
6166 case X86ISD::AESDECWIDE256KL: {
6167 if (!Subtarget->hasWIDEKL())
6171 switch (Node->getOpcode()) {
6173 llvm_unreachable("Unexpected opcode!");
6174 case X86ISD::AESENCWIDE128KL:
6175 Opcode = X86::AESENCWIDE128KL;
6177 case X86ISD::AESDECWIDE128KL:
6178 Opcode = X86::AESDECWIDE128KL;
6180 case X86ISD::AESENCWIDE256KL:
6181 Opcode = X86::AESENCWIDE256KL;
6183 case X86ISD::AESDECWIDE256KL:
6184 Opcode = X86::AESDECWIDE256KL;
6188 SDValue Chain = Node->getOperand(0);
6189 SDValue Addr = Node->getOperand(1);
6191 SDValue Base, Scale, Index, Disp, Segment;
6192 if (!selectAddr(Node, Addr, Base, Scale, Index, Disp, Segment))
6195 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(2),
6197 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(3),
6199 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM2, Node->getOperand(4),
6201 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM3, Node->getOperand(5),
6203 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM4, Node->getOperand(6),
6205 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM5, Node->getOperand(7),
6207 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM6, Node->getOperand(8),
6209 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM7, Node->getOperand(9),
6212 MachineSDNode *Res = CurDAG->getMachineNode(
6213 Opcode, dl, Node->getVTList(),
6214 {Base, Scale, Index, Disp, Segment, Chain, Chain.getValue(1)});
6215 CurDAG->setNodeMemRefs(Res, cast<MemSDNode>(Node)->getMemOperand());
6216 ReplaceNode(Node, Res);
6224 bool X86DAGToDAGISel::
6225 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
6226 std::vector<SDValue> &OutOps) {
6227 SDValue Op0, Op1, Op2, Op3, Op4;
6228 switch (ConstraintID) {
6230 llvm_unreachable("Unexpected asm memory constraint");
6231 case InlineAsm::Constraint_o: // offsetable ??
6232 case InlineAsm::Constraint_v: // not offsetable ??
6233 case InlineAsm::Constraint_m: // memory
6234 case InlineAsm::Constraint_X:
6235 case InlineAsm::Constraint_p: // address
6236 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
6241 OutOps.push_back(Op0);
6242 OutOps.push_back(Op1);
6243 OutOps.push_back(Op2);
6244 OutOps.push_back(Op3);
6245 OutOps.push_back(Op4);
6249 /// This pass converts a legalized DAG into a X86-specific DAG,
6250 /// ready for instruction scheduling.
6251 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
6252 CodeGenOpt::Level OptLevel) {
6253 return new X86DAGToDAGISel(TM, OptLevel);