1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
16 #include "X86MachineFunctionInfo.h"
17 #include "X86RegisterInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/Config/llvm-config.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/Instructions.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/KnownBits.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
40 #define DEBUG_TYPE "x86-isel"
42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44 //===----------------------------------------------------------------------===//
45 // Pattern Matcher Implementation
46 //===----------------------------------------------------------------------===//
49 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
50 /// numbers for the leaves of the matched tree.
51 struct X86ISelAddressMode {
57 // This is really a union, discriminated by BaseType!
65 const GlobalValue *GV;
67 const BlockAddress *BlockAddr;
71 unsigned Align; // CP alignment.
72 unsigned char SymbolFlags; // X86II::MO_*
75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
76 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
77 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
79 bool hasSymbolicDisplacement() const {
80 return GV != nullptr || CP != nullptr || ES != nullptr ||
81 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
84 bool hasBaseOrIndexReg() const {
85 return BaseType == FrameIndexBase ||
86 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
89 /// Return true if this addressing mode is already RIP-relative.
90 bool isRIPRelative() const {
91 if (BaseType != RegBase) return false;
92 if (RegisterSDNode *RegNode =
93 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
94 return RegNode->getReg() == X86::RIP;
98 void setBaseReg(SDValue Reg) {
103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
104 void dump(SelectionDAG *DAG = nullptr) {
105 dbgs() << "X86ISelAddressMode " << this << '\n';
106 dbgs() << "Base_Reg ";
107 if (Base_Reg.getNode())
108 Base_Reg.getNode()->dump(DAG);
111 if (BaseType == FrameIndexBase)
112 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
113 dbgs() << " Scale " << Scale << '\n'
115 if (IndexReg.getNode())
116 IndexReg.getNode()->dump(DAG);
119 dbgs() << " Disp " << Disp << '\n'
141 dbgs() << " JT" << JT << " Align" << Align << '\n';
148 //===--------------------------------------------------------------------===//
149 /// ISel - X86-specific code to select X86 machine instructions for
150 /// SelectionDAG operations.
152 class X86DAGToDAGISel final : public SelectionDAGISel {
153 /// Keep a pointer to the X86Subtarget around so that we can
154 /// make the right decision when generating code for different targets.
155 const X86Subtarget *Subtarget;
157 /// If true, selector should try to optimize for code size instead of
161 /// If true, selector should try to optimize for minimum code size.
165 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
166 : SelectionDAGISel(tm, OptLevel), OptForSize(false),
167 OptForMinSize(false) {}
169 StringRef getPassName() const override {
170 return "X86 DAG->DAG Instruction Selection";
173 bool runOnMachineFunction(MachineFunction &MF) override {
174 // Reset the subtarget each time through.
175 Subtarget = &MF.getSubtarget<X86Subtarget>();
176 SelectionDAGISel::runOnMachineFunction(MF);
180 void EmitFunctionEntryCode() override;
182 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
184 void PreprocessISelDAG() override;
185 void PostprocessISelDAG() override;
187 // Include the pieces autogenerated from the target description.
188 #include "X86GenDAGISel.inc"
191 void Select(SDNode *N) override;
193 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
194 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
195 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
196 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
197 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
198 bool matchAdd(SDValue N, X86ISelAddressMode &AM, unsigned Depth);
199 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
201 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
202 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
203 SDValue &Scale, SDValue &Index, SDValue &Disp,
205 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
206 SDValue &Scale, SDValue &Index, SDValue &Disp,
208 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
209 bool selectLEAAddr(SDValue N, SDValue &Base,
210 SDValue &Scale, SDValue &Index, SDValue &Disp,
212 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
213 SDValue &Scale, SDValue &Index, SDValue &Disp,
215 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
216 SDValue &Scale, SDValue &Index, SDValue &Disp,
218 bool selectScalarSSELoad(SDNode *Root, SDNode *Parent, SDValue N,
219 SDValue &Base, SDValue &Scale,
220 SDValue &Index, SDValue &Disp,
222 SDValue &NodeWithChain);
223 bool selectRelocImm(SDValue N, SDValue &Op);
225 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
226 SDValue &Base, SDValue &Scale,
227 SDValue &Index, SDValue &Disp,
230 // Convenience method where P is also root.
231 bool tryFoldLoad(SDNode *P, SDValue N,
232 SDValue &Base, SDValue &Scale,
233 SDValue &Index, SDValue &Disp,
235 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
238 // Try to fold a vector load. This makes sure the load isn't non-temporal.
239 bool tryFoldVecLoad(SDNode *Root, SDNode *P, SDValue N,
240 SDValue &Base, SDValue &Scale,
241 SDValue &Index, SDValue &Disp,
244 /// Implement addressing mode selection for inline asm expressions.
245 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
246 unsigned ConstraintID,
247 std::vector<SDValue> &OutOps) override;
249 void emitSpecialCodeForMain();
251 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
252 SDValue &Base, SDValue &Scale,
253 SDValue &Index, SDValue &Disp,
255 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
256 ? CurDAG->getTargetFrameIndex(
258 TLI->getPointerTy(CurDAG->getDataLayout()))
260 Scale = getI8Imm(AM.Scale, DL);
262 // These are 32-bit even in 64-bit mode since RIP-relative offset
265 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
269 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
270 AM.Align, AM.Disp, AM.SymbolFlags);
272 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
273 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
274 } else if (AM.MCSym) {
275 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
276 assert(AM.SymbolFlags == 0 && "oo");
277 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
278 } else if (AM.JT != -1) {
279 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
280 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
281 } else if (AM.BlockAddr)
282 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
285 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
287 if (AM.Segment.getNode())
288 Segment = AM.Segment;
290 Segment = CurDAG->getRegister(0, MVT::i32);
293 // Utility function to determine whether we should avoid selecting
294 // immediate forms of instructions for better code size or not.
295 // At a high level, we'd like to avoid such instructions when
296 // we have similar constants used within the same basic block
297 // that can be kept in a register.
299 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
300 uint32_t UseCount = 0;
302 // Do not want to hoist if we're not optimizing for size.
303 // TODO: We'd like to remove this restriction.
304 // See the comment in X86InstrInfo.td for more info.
308 // Walk all the users of the immediate.
309 for (SDNode::use_iterator UI = N->use_begin(),
310 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
314 // This user is already selected. Count it as a legitimate use and
316 if (User->isMachineOpcode()) {
321 // We want to count stores of immediates as real uses.
322 if (User->getOpcode() == ISD::STORE &&
323 User->getOperand(1).getNode() == N) {
328 // We don't currently match users that have > 2 operands (except
329 // for stores, which are handled above)
330 // Those instruction won't match in ISEL, for now, and would
331 // be counted incorrectly.
332 // This may change in the future as we add additional instruction
334 if (User->getNumOperands() != 2)
337 // Immediates that are used for offsets as part of stack
338 // manipulation should be left alone. These are typically
339 // used to indicate SP offsets for argument passing and
340 // will get pulled into stores/pushes (implicitly).
341 if (User->getOpcode() == X86ISD::ADD ||
342 User->getOpcode() == ISD::ADD ||
343 User->getOpcode() == X86ISD::SUB ||
344 User->getOpcode() == ISD::SUB) {
346 // Find the other operand of the add/sub.
347 SDValue OtherOp = User->getOperand(0);
348 if (OtherOp.getNode() == N)
349 OtherOp = User->getOperand(1);
351 // Don't count if the other operand is SP.
352 RegisterSDNode *RegNode;
353 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
354 (RegNode = dyn_cast_or_null<RegisterSDNode>(
355 OtherOp->getOperand(1).getNode())))
356 if ((RegNode->getReg() == X86::ESP) ||
357 (RegNode->getReg() == X86::RSP))
361 // ... otherwise, count this and move on.
365 // If we have more than 1 use, then recommend for hoisting.
366 return (UseCount > 1);
369 /// Return a target constant with the specified value of type i8.
370 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
371 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
374 /// Return a target constant with the specified value, of type i32.
375 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
376 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
379 /// Return a target constant with the specified value, of type i64.
380 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
381 return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
384 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
386 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
387 uint64_t Index = N->getConstantOperandVal(1);
388 MVT VecVT = N->getOperand(0).getSimpleValueType();
389 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
392 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
394 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
395 uint64_t Index = N->getConstantOperandVal(2);
396 MVT VecVT = N->getSimpleValueType(0);
397 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
400 /// Return an SDNode that returns the value of the global base register.
401 /// Output instructions required to initialize the global base register,
403 SDNode *getGlobalBaseReg();
405 /// Return a reference to the TargetMachine, casted to the target-specific
407 const X86TargetMachine &getTargetMachine() const {
408 return static_cast<const X86TargetMachine &>(TM);
411 /// Return a reference to the TargetInstrInfo, casted to the target-specific
413 const X86InstrInfo *getInstrInfo() const {
414 return Subtarget->getInstrInfo();
417 /// Address-mode matching performs shift-of-and to and-of-shift
418 /// reassociation in order to expose more scaled addressing
420 bool ComplexPatternFuncMutatesDAG() const override {
424 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
426 /// Returns whether this is a relocatable immediate in the range
427 /// [-2^Width .. 2^Width-1].
428 template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
429 if (auto *CN = dyn_cast<ConstantSDNode>(N))
430 return isInt<Width>(CN->getSExtValue());
431 return isSExtAbsoluteSymbolRef(Width, N);
434 // Indicates we should prefer to use a non-temporal load for this load.
435 bool useNonTemporalLoad(LoadSDNode *N) const {
436 if (!N->isNonTemporal())
439 unsigned StoreSize = N->getMemoryVT().getStoreSize();
441 if (N->getAlignment() < StoreSize)
445 default: llvm_unreachable("Unsupported store size");
447 return Subtarget->hasSSE41();
449 return Subtarget->hasAVX2();
451 return Subtarget->hasAVX512();
455 bool foldLoadStoreIntoMemOperand(SDNode *Node);
456 bool matchBEXTRFromAnd(SDNode *Node);
457 bool shrinkAndImmediate(SDNode *N);
458 bool isMaskZeroExtended(SDNode *N) const;
460 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
461 const SDLoc &dl, MVT VT, SDNode *Node);
462 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
463 const SDLoc &dl, MVT VT, SDNode *Node,
469 // Returns true if this masked compare can be implemented legally with this
471 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
472 unsigned Opcode = N->getOpcode();
473 if (Opcode == X86ISD::CMPM || Opcode == ISD::SETCC ||
474 Opcode == X86ISD::CMPM_RND || Opcode == X86ISD::VFPCLASS) {
475 // We can get 256-bit 8 element types here without VLX being enabled. When
476 // this happens we will use 512-bit operations and the mask will not be
478 EVT OpVT = N->getOperand(0).getValueType();
479 if (OpVT.is256BitVector() || OpVT.is128BitVector())
480 return Subtarget->hasVLX();
484 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
485 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
486 Opcode == X86ISD::FSETCCM_RND)
492 // Returns true if we can assume the writer of the mask has zero extended it
494 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
495 // If this is an AND, check if we have a compare on either side. As long as
496 // one side guarantees the mask is zero extended, the AND will preserve those
498 if (N->getOpcode() == ISD::AND)
499 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
500 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
502 return isLegalMaskCompare(N, Subtarget);
506 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
507 if (OptLevel == CodeGenOpt::None) return false;
512 if (N.getOpcode() != ISD::LOAD)
515 // If N is a load, do additional profitability checks.
517 switch (U->getOpcode()) {
529 SDValue Op1 = U->getOperand(1);
531 // If the other operand is a 8-bit immediate we should fold the immediate
532 // instead. This reduces code size.
534 // movl 4(%esp), %eax
538 // addl 4(%esp), %eax
539 // The former is 2 bytes shorter. In case where the increment is 1, then
540 // the saving can be 4 bytes (by using incl %eax).
541 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
542 if (Imm->getAPIntValue().isSignedIntN(8))
545 // If this is a 64-bit AND with an immediate that fits in 32-bits,
546 // prefer using the smaller and over folding the load. This is needed to
547 // make sure immediates created by shrinkAndImmediate are always folded.
548 // Ideally we would narrow the load during DAG combine and get the
549 // best of both worlds.
550 if (U->getOpcode() == ISD::AND &&
551 Imm->getAPIntValue().getBitWidth() == 64 &&
552 Imm->getAPIntValue().isIntN(32))
556 // If the other operand is a TLS address, we should fold it instead.
559 // leal i@NTPOFF(%eax), %eax
561 // movl $i@NTPOFF, %eax
563 // if the block also has an access to a second TLS address this will save
565 // FIXME: This is probably also true for non-TLS addresses.
566 if (Op1.getOpcode() == X86ISD::Wrapper) {
567 SDValue Val = Op1.getOperand(0);
568 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
572 // Don't fold load if this matches the BTS/BTR/BTC patterns.
573 // BTS: (or X, (shl 1, n))
574 // BTR: (and X, (rotl -2, n))
575 // BTC: (xor X, (shl 1, n))
576 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
577 if (U->getOperand(0).getOpcode() == ISD::SHL &&
578 isOneConstant(U->getOperand(0).getOperand(0)))
581 if (U->getOperand(1).getOpcode() == ISD::SHL &&
582 isOneConstant(U->getOperand(1).getOperand(0)))
585 if (U->getOpcode() == ISD::AND) {
586 SDValue U0 = U->getOperand(0);
587 SDValue U1 = U->getOperand(1);
588 if (U0.getOpcode() == ISD::ROTL) {
589 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
590 if (C && C->getSExtValue() == -2)
594 if (U1.getOpcode() == ISD::ROTL) {
595 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
596 if (C && C->getSExtValue() == -2)
606 // Don't fold a load into a shift by immediate. The BMI2 instructions
607 // support folding a load, but not an immediate. The legacy instructions
608 // support folding an immediate, but can't fold a load. Folding an
609 // immediate is preferable to folding a load.
610 if (isa<ConstantSDNode>(U->getOperand(1)))
617 // Prevent folding a load if this can implemented with an insert_subreg or
618 // a move that implicitly zeroes.
619 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
620 isNullConstant(Root->getOperand(2)) &&
621 (Root->getOperand(0).isUndef() ||
622 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
628 /// Replace the original chain operand of the call with
629 /// load's chain operand and move load below the call's chain operand.
630 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
631 SDValue Call, SDValue OrigChain) {
632 SmallVector<SDValue, 8> Ops;
633 SDValue Chain = OrigChain.getOperand(0);
634 if (Chain.getNode() == Load.getNode())
635 Ops.push_back(Load.getOperand(0));
637 assert(Chain.getOpcode() == ISD::TokenFactor &&
638 "Unexpected chain operand");
639 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
640 if (Chain.getOperand(i).getNode() == Load.getNode())
641 Ops.push_back(Load.getOperand(0));
643 Ops.push_back(Chain.getOperand(i));
645 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
647 Ops.push_back(NewChain);
649 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
650 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
651 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
652 Load.getOperand(1), Load.getOperand(2));
655 Ops.push_back(SDValue(Load.getNode(), 1));
656 Ops.append(Call->op_begin() + 1, Call->op_end());
657 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
660 /// Return true if call address is a load and it can be
661 /// moved below CALLSEQ_START and the chains leading up to the call.
662 /// Return the CALLSEQ_START by reference as a second output.
663 /// In the case of a tail call, there isn't a callseq node between the call
664 /// chain and the load.
665 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
666 // The transformation is somewhat dangerous if the call's chain was glued to
667 // the call. After MoveBelowOrigChain the load is moved between the call and
668 // the chain, this can create a cycle if the load is not folded. So it is
669 // *really* important that we are sure the load will be folded.
670 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
672 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
675 LD->getAddressingMode() != ISD::UNINDEXED ||
676 LD->getExtensionType() != ISD::NON_EXTLOAD)
679 // Now let's find the callseq_start.
680 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
681 if (!Chain.hasOneUse())
683 Chain = Chain.getOperand(0);
686 if (!Chain.getNumOperands())
688 // Since we are not checking for AA here, conservatively abort if the chain
689 // writes to memory. It's not safe to move the callee (a load) across a store.
690 if (isa<MemSDNode>(Chain.getNode()) &&
691 cast<MemSDNode>(Chain.getNode())->writeMem())
693 if (Chain.getOperand(0).getNode() == Callee.getNode())
695 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
696 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
697 Callee.getValue(1).hasOneUse())
702 void X86DAGToDAGISel::PreprocessISelDAG() {
703 // OptFor[Min]Size are used in pattern predicates that isel is matching.
704 OptForSize = MF->getFunction().optForSize();
705 OptForMinSize = MF->getFunction().optForMinSize();
706 assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
708 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
709 E = CurDAG->allnodes_end(); I != E; ) {
710 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
712 // If this is a target specific AND node with no flag usages, turn it back
713 // into ISD::AND to enable test instruction matching.
714 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
715 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
716 N->getOperand(0), N->getOperand(1));
718 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
720 CurDAG->DeleteNode(N);
724 if (OptLevel != CodeGenOpt::None &&
725 // Only do this when the target can fold the load into the call or
727 !Subtarget->useRetpoline() &&
728 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
729 (N->getOpcode() == X86ISD::TC_RETURN &&
730 (Subtarget->is64Bit() ||
731 !getTargetMachine().isPositionIndependent())))) {
732 /// Also try moving call address load from outside callseq_start to just
733 /// before the call to allow it to be folded.
751 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
752 SDValue Chain = N->getOperand(0);
753 SDValue Load = N->getOperand(1);
754 if (!isCalleeLoad(Load, Chain, HasCallSeq))
756 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
761 // Lower fpround and fpextend nodes that target the FP stack to be store and
762 // load to the stack. This is a gross hack. We would like to simply mark
763 // these as being illegal, but when we do that, legalize produces these when
764 // it expands calls, then expands these in the same legalize pass. We would
765 // like dag combine to be able to hack on these between the call expansion
766 // and the node legalization. As such this pass basically does "really
767 // late" legalization of these inline with the X86 isel pass.
768 // FIXME: This should only happen when not compiled with -O0.
769 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
772 MVT SrcVT = N->getOperand(0).getSimpleValueType();
773 MVT DstVT = N->getSimpleValueType(0);
775 // If any of the sources are vectors, no fp stack involved.
776 if (SrcVT.isVector() || DstVT.isVector())
779 // If the source and destination are SSE registers, then this is a legal
780 // conversion that should not be lowered.
781 const X86TargetLowering *X86Lowering =
782 static_cast<const X86TargetLowering *>(TLI);
783 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
784 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
785 if (SrcIsSSE && DstIsSSE)
788 if (!SrcIsSSE && !DstIsSSE) {
789 // If this is an FPStack extension, it is a noop.
790 if (N->getOpcode() == ISD::FP_EXTEND)
792 // If this is a value-preserving FPStack truncation, it is a noop.
793 if (N->getConstantOperandVal(1))
797 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
798 // FPStack has extload and truncstore. SSE can fold direct loads into other
799 // operations. Based on this, decide what we want to do.
801 if (N->getOpcode() == ISD::FP_ROUND)
802 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
804 MemVT = SrcIsSSE ? SrcVT : DstVT;
806 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
809 // FIXME: optimize the case where the src/dest is a load or store?
811 CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
812 MemTmp, MachinePointerInfo(), MemVT);
813 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
814 MachinePointerInfo(), MemVT);
816 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
817 // extload we created. This will cause general havok on the dag because
818 // anything below the conversion could be folded into other existing nodes.
819 // To avoid invalidating 'I', back it up to the convert node.
821 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
823 // Now that we did that, the node is dead. Increment the iterator to the
824 // next node to process, then delete N.
826 CurDAG->DeleteNode(N);
831 void X86DAGToDAGISel::PostprocessISelDAG() {
832 // Skip peepholes at -O0.
833 if (TM.getOptLevel() == CodeGenOpt::None)
836 // Attempt to remove vectors moves that were inserted to zero upper bits.
838 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
841 while (Position != CurDAG->allnodes_begin()) {
842 SDNode *N = &*--Position;
843 // Skip dead nodes and any non-machine opcodes.
844 if (N->use_empty() || !N->isMachineOpcode())
847 if (N->getMachineOpcode() != TargetOpcode::SUBREG_TO_REG)
850 unsigned SubRegIdx = N->getConstantOperandVal(2);
851 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
854 SDValue Move = N->getOperand(1);
855 if (!Move.isMachineOpcode())
858 // Make sure its one of the move opcodes we recognize.
859 switch (Move.getMachineOpcode()) {
862 case X86::VMOVAPDrr: case X86::VMOVUPDrr:
863 case X86::VMOVAPSrr: case X86::VMOVUPSrr:
864 case X86::VMOVDQArr: case X86::VMOVDQUrr:
865 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
866 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
867 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
868 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
869 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
870 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
871 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
872 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
873 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
874 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
875 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
879 SDValue In = Move.getOperand(0);
880 if (!In.isMachineOpcode() ||
881 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
884 // Producing instruction is another vector instruction. We can drop the
886 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
888 // If the move is now dead, delete it.
889 if (Move.getNode()->use_empty())
890 CurDAG->RemoveDeadNode(Move.getNode());
895 /// Emit any code that needs to be executed only in the main function.
896 void X86DAGToDAGISel::emitSpecialCodeForMain() {
897 if (Subtarget->isTargetCygMing()) {
898 TargetLowering::ArgListTy Args;
899 auto &DL = CurDAG->getDataLayout();
901 TargetLowering::CallLoweringInfo CLI(*CurDAG);
902 CLI.setChain(CurDAG->getRoot())
903 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
904 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
906 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
907 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
908 CurDAG->setRoot(Result.second);
912 void X86DAGToDAGISel::EmitFunctionEntryCode() {
913 // If this is main, emit special code for main.
914 const Function &F = MF->getFunction();
915 if (F.hasExternalLinkage() && F.getName() == "main")
916 emitSpecialCodeForMain();
919 static bool isDispSafeForFrameIndex(int64_t Val) {
920 // On 64-bit platforms, we can run into an issue where a frame index
921 // includes a displacement that, when added to the explicit displacement,
922 // will overflow the displacement field. Assuming that the frame index
923 // displacement fits into a 31-bit integer (which is only slightly more
924 // aggressive than the current fundamental assumption that it fits into
925 // a 32-bit integer), a 31-bit disp should always be safe.
926 return isInt<31>(Val);
929 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
930 X86ISelAddressMode &AM) {
931 // If there's no offset to fold, we don't need to do any work.
935 // Cannot combine ExternalSymbol displacements with integer offsets.
936 if (AM.ES || AM.MCSym)
939 int64_t Val = AM.Disp + Offset;
940 CodeModel::Model M = TM.getCodeModel();
941 if (Subtarget->is64Bit()) {
942 if (!X86::isOffsetSuitableForCodeModel(Val, M,
943 AM.hasSymbolicDisplacement()))
945 // In addition to the checks required for a register base, check that
946 // we do not try to use an unsafe Disp with a frame index.
947 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
948 !isDispSafeForFrameIndex(Val))
956 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
957 SDValue Address = N->getOperand(1);
959 // load gs:0 -> GS segment register.
960 // load fs:0 -> FS segment register.
962 // This optimization is valid because the GNU TLS model defines that
963 // gs:0 (or fs:0 on X86-64) contains its own address.
964 // For more information see http://people.redhat.com/drepper/tls.pdf
965 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
966 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
967 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
968 Subtarget->isTargetFuchsia()))
969 switch (N->getPointerInfo().getAddrSpace()) {
971 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
974 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
976 // Address space 258 is not handled here, because it is not used to
977 // address TLS areas.
983 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
984 /// mode. These wrap things that will resolve down into a symbol reference.
985 /// If no match is possible, this returns true, otherwise it returns false.
986 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
987 // If the addressing mode already has a symbol as the displacement, we can
988 // never match another symbol.
989 if (AM.hasSymbolicDisplacement())
992 bool IsRIPRelTLS = false;
993 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
995 SDValue Val = N.getOperand(0);
996 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1000 // We can't use an addressing mode in the 64-bit large code model.
1001 // Global TLS addressing is an exception. In the medium code model,
1002 // we use can use a mode when RIP wrappers are present.
1003 // That signifies access to globals that are known to be "near",
1004 // such as the GOT itself.
1005 CodeModel::Model M = TM.getCodeModel();
1006 if (Subtarget->is64Bit() &&
1007 ((M == CodeModel::Large && !IsRIPRelTLS) ||
1008 (M == CodeModel::Medium && !IsRIPRel)))
1011 // Base and index reg must be 0 in order to use %rip as base.
1012 if (IsRIPRel && AM.hasBaseOrIndexReg())
1015 // Make a local copy in case we can't do this fold.
1016 X86ISelAddressMode Backup = AM;
1019 SDValue N0 = N.getOperand(0);
1020 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1021 AM.GV = G->getGlobal();
1022 AM.SymbolFlags = G->getTargetFlags();
1023 Offset = G->getOffset();
1024 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1025 AM.CP = CP->getConstVal();
1026 AM.Align = CP->getAlignment();
1027 AM.SymbolFlags = CP->getTargetFlags();
1028 Offset = CP->getOffset();
1029 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1030 AM.ES = S->getSymbol();
1031 AM.SymbolFlags = S->getTargetFlags();
1032 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1033 AM.MCSym = S->getMCSymbol();
1034 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1035 AM.JT = J->getIndex();
1036 AM.SymbolFlags = J->getTargetFlags();
1037 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1038 AM.BlockAddr = BA->getBlockAddress();
1039 AM.SymbolFlags = BA->getTargetFlags();
1040 Offset = BA->getOffset();
1042 llvm_unreachable("Unhandled symbol reference node.");
1044 if (foldOffsetIntoAddress(Offset, AM)) {
1050 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1052 // Commit the changes now that we know this fold is safe.
1056 /// Add the specified node to the specified addressing mode, returning true if
1057 /// it cannot be done. This just pattern matches for the addressing mode.
1058 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1059 if (matchAddressRecursively(N, AM, 0))
1062 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1063 // a smaller encoding and avoids a scaled-index.
1064 if (AM.Scale == 2 &&
1065 AM.BaseType == X86ISelAddressMode::RegBase &&
1066 AM.Base_Reg.getNode() == nullptr) {
1067 AM.Base_Reg = AM.IndexReg;
1071 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1072 // because it has a smaller encoding.
1073 // TODO: Which other code models can use this?
1074 if (TM.getCodeModel() == CodeModel::Small &&
1075 Subtarget->is64Bit() &&
1077 AM.BaseType == X86ISelAddressMode::RegBase &&
1078 AM.Base_Reg.getNode() == nullptr &&
1079 AM.IndexReg.getNode() == nullptr &&
1080 AM.SymbolFlags == X86II::MO_NO_FLAG &&
1081 AM.hasSymbolicDisplacement())
1082 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1087 bool X86DAGToDAGISel::matchAdd(SDValue N, X86ISelAddressMode &AM,
1089 // Add an artificial use to this node so that we can keep track of
1090 // it if it gets CSE'd with a different node.
1091 HandleSDNode Handle(N);
1093 X86ISelAddressMode Backup = AM;
1094 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1095 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1099 // Try again after commuting the operands.
1100 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
1101 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1105 // If we couldn't fold both operands into the address at the same time,
1106 // see if we can just put each operand into a register and fold at least
1108 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1109 !AM.Base_Reg.getNode() &&
1110 !AM.IndexReg.getNode()) {
1111 N = Handle.getValue();
1112 AM.Base_Reg = N.getOperand(0);
1113 AM.IndexReg = N.getOperand(1);
1117 N = Handle.getValue();
1121 // Insert a node into the DAG at least before the Pos node's position. This
1122 // will reposition the node as needed, and will assign it a node ID that is <=
1123 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1124 // IDs! The selection DAG must no longer depend on their uniqueness when this
1126 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1127 if (N->getNodeId() == -1 ||
1128 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1129 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1130 DAG.RepositionNode(Pos->getIterator(), N.getNode());
1131 // Mark Node as invalid for pruning as after this it may be a successor to a
1132 // selected node but otherwise be in the same position of Pos.
1133 // Conservatively mark it with the same -abs(Id) to assure node id
1134 // invariant is preserved.
1135 N->setNodeId(Pos->getNodeId());
1136 SelectionDAGISel::InvalidateNodeId(N.getNode());
1140 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1141 // safe. This allows us to convert the shift and and into an h-register
1142 // extract and a scaled index. Returns false if the simplification is
1144 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1146 SDValue Shift, SDValue X,
1147 X86ISelAddressMode &AM) {
1148 if (Shift.getOpcode() != ISD::SRL ||
1149 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1153 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1154 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1155 Mask != (0xffu << ScaleLog))
1158 MVT VT = N.getSimpleValueType();
1160 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1161 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1162 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1163 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1164 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1165 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1167 // Insert the new nodes into the topological ordering. We must do this in
1168 // a valid topological ordering as nothing is going to go back and re-sort
1169 // these nodes. We continually insert before 'N' in sequence as this is
1170 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1171 // hierarchy left to express.
1172 insertDAGNode(DAG, N, Eight);
1173 insertDAGNode(DAG, N, Srl);
1174 insertDAGNode(DAG, N, NewMask);
1175 insertDAGNode(DAG, N, And);
1176 insertDAGNode(DAG, N, ShlCount);
1177 insertDAGNode(DAG, N, Shl);
1178 DAG.ReplaceAllUsesWith(N, Shl);
1180 AM.Scale = (1 << ScaleLog);
1184 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1185 // allows us to fold the shift into this addressing mode. Returns false if the
1186 // transform succeeded.
1187 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1189 SDValue Shift, SDValue X,
1190 X86ISelAddressMode &AM) {
1191 if (Shift.getOpcode() != ISD::SHL ||
1192 !isa<ConstantSDNode>(Shift.getOperand(1)))
1195 // Not likely to be profitable if either the AND or SHIFT node has more
1196 // than one use (unless all uses are for address computation). Besides,
1197 // isel mechanism requires their node ids to be reused.
1198 if (!N.hasOneUse() || !Shift.hasOneUse())
1201 // Verify that the shift amount is something we can fold.
1202 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1203 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1206 MVT VT = N.getSimpleValueType();
1208 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1209 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1210 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1212 // Insert the new nodes into the topological ordering. We must do this in
1213 // a valid topological ordering as nothing is going to go back and re-sort
1214 // these nodes. We continually insert before 'N' in sequence as this is
1215 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1216 // hierarchy left to express.
1217 insertDAGNode(DAG, N, NewMask);
1218 insertDAGNode(DAG, N, NewAnd);
1219 insertDAGNode(DAG, N, NewShift);
1220 DAG.ReplaceAllUsesWith(N, NewShift);
1222 AM.Scale = 1 << ShiftAmt;
1223 AM.IndexReg = NewAnd;
1227 // Implement some heroics to detect shifts of masked values where the mask can
1228 // be replaced by extending the shift and undoing that in the addressing mode
1229 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1230 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1231 // the addressing mode. This results in code such as:
1233 // int f(short *y, int *lookup_table) {
1235 // return *y + lookup_table[*y >> 11];
1239 // movzwl (%rdi), %eax
1242 // addl (%rsi,%rcx,4), %eax
1245 // movzwl (%rdi), %eax
1249 // addl (%rsi,%rcx), %eax
1251 // Note that this function assumes the mask is provided as a mask *after* the
1252 // value is shifted. The input chain may or may not match that, but computing
1253 // such a mask is trivial.
1254 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1256 SDValue Shift, SDValue X,
1257 X86ISelAddressMode &AM) {
1258 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1259 !isa<ConstantSDNode>(Shift.getOperand(1)))
1262 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1263 unsigned MaskLZ = countLeadingZeros(Mask);
1264 unsigned MaskTZ = countTrailingZeros(Mask);
1266 // The amount of shift we're trying to fit into the addressing mode is taken
1267 // from the trailing zeros of the mask.
1268 unsigned AMShiftAmt = MaskTZ;
1270 // There is nothing we can do here unless the mask is removing some bits.
1271 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1272 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1274 // We also need to ensure that mask is a continuous run of bits.
1275 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1277 // Scale the leading zero count down based on the actual size of the value.
1278 // Also scale it down based on the size of the shift.
1279 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1280 if (MaskLZ < ScaleDown)
1282 MaskLZ -= ScaleDown;
1284 // The final check is to ensure that any masked out high bits of X are
1285 // already known to be zero. Otherwise, the mask has a semantic impact
1286 // other than masking out a couple of low bits. Unfortunately, because of
1287 // the mask, zero extensions will be removed from operands in some cases.
1288 // This code works extra hard to look through extensions because we can
1289 // replace them with zero extensions cheaply if necessary.
1290 bool ReplacingAnyExtend = false;
1291 if (X.getOpcode() == ISD::ANY_EXTEND) {
1292 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1293 X.getOperand(0).getSimpleValueType().getSizeInBits();
1294 // Assume that we'll replace the any-extend with a zero-extend, and
1295 // narrow the search to the extended value.
1296 X = X.getOperand(0);
1297 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1298 ReplacingAnyExtend = true;
1300 APInt MaskedHighBits =
1301 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1303 DAG.computeKnownBits(X, Known);
1304 if (MaskedHighBits != Known.Zero) return true;
1306 // We've identified a pattern that can be transformed into a single shift
1307 // and an addressing mode. Make it so.
1308 MVT VT = N.getSimpleValueType();
1309 if (ReplacingAnyExtend) {
1310 assert(X.getValueType() != VT);
1311 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1312 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1313 insertDAGNode(DAG, N, NewX);
1317 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1318 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1319 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1320 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1322 // Insert the new nodes into the topological ordering. We must do this in
1323 // a valid topological ordering as nothing is going to go back and re-sort
1324 // these nodes. We continually insert before 'N' in sequence as this is
1325 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1326 // hierarchy left to express.
1327 insertDAGNode(DAG, N, NewSRLAmt);
1328 insertDAGNode(DAG, N, NewSRL);
1329 insertDAGNode(DAG, N, NewSHLAmt);
1330 insertDAGNode(DAG, N, NewSHL);
1331 DAG.ReplaceAllUsesWith(N, NewSHL);
1333 AM.Scale = 1 << AMShiftAmt;
1334 AM.IndexReg = NewSRL;
1338 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1342 dbgs() << "MatchAddress: ";
1347 return matchAddressBase(N, AM);
1349 // If this is already a %rip relative address, we can only merge immediates
1350 // into it. Instead of handling this in every case, we handle it here.
1351 // RIP relative addressing: %rip + 32-bit displacement!
1352 if (AM.isRIPRelative()) {
1353 // FIXME: JumpTable and ExternalSymbol address currently don't like
1354 // displacements. It isn't very important, but this should be fixed for
1356 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1359 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1360 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1365 switch (N.getOpcode()) {
1367 case ISD::LOCAL_RECOVER: {
1368 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1369 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1370 // Use the symbol and don't prefix it.
1371 AM.MCSym = ESNode->getMCSymbol();
1376 case ISD::Constant: {
1377 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1378 if (!foldOffsetIntoAddress(Val, AM))
1383 case X86ISD::Wrapper:
1384 case X86ISD::WrapperRIP:
1385 if (!matchWrapper(N, AM))
1390 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1394 case ISD::FrameIndex:
1395 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1396 AM.Base_Reg.getNode() == nullptr &&
1397 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1398 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1399 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1405 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1408 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1409 unsigned Val = CN->getZExtValue();
1410 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1411 // that the base operand remains free for further matching. If
1412 // the base doesn't end up getting used, a post-processing step
1413 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1414 if (Val == 1 || Val == 2 || Val == 3) {
1415 AM.Scale = 1 << Val;
1416 SDValue ShVal = N.getOperand(0);
1418 // Okay, we know that we have a scale by now. However, if the scaled
1419 // value is an add of something and a constant, we can fold the
1420 // constant into the disp field here.
1421 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1422 AM.IndexReg = ShVal.getOperand(0);
1423 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
1424 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1425 if (!foldOffsetIntoAddress(Disp, AM))
1429 AM.IndexReg = ShVal;
1436 // Scale must not be used already.
1437 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1439 SDValue And = N.getOperand(0);
1440 if (And.getOpcode() != ISD::AND) break;
1441 SDValue X = And.getOperand(0);
1443 // We only handle up to 64-bit values here as those are what matter for
1444 // addressing mode optimizations.
1445 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1447 // The mask used for the transform is expected to be post-shift, but we
1448 // found the shift first so just apply the shift to the mask before passing
1450 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1451 !isa<ConstantSDNode>(And.getOperand(1)))
1453 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1455 // Try to fold the mask and shift into the scale, and return false if we
1457 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1462 case ISD::SMUL_LOHI:
1463 case ISD::UMUL_LOHI:
1464 // A mul_lohi where we need the low part can be folded as a plain multiply.
1465 if (N.getResNo() != 0) break;
1468 case X86ISD::MUL_IMM:
1469 // X*[3,5,9] -> X+X*[2,4,8]
1470 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1471 AM.Base_Reg.getNode() == nullptr &&
1472 AM.IndexReg.getNode() == nullptr) {
1473 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1474 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1475 CN->getZExtValue() == 9) {
1476 AM.Scale = unsigned(CN->getZExtValue())-1;
1478 SDValue MulVal = N.getOperand(0);
1481 // Okay, we know that we have a scale by now. However, if the scaled
1482 // value is an add of something and a constant, we can fold the
1483 // constant into the disp field here.
1484 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1485 isa<ConstantSDNode>(MulVal.getOperand(1))) {
1486 Reg = MulVal.getOperand(0);
1487 ConstantSDNode *AddVal =
1488 cast<ConstantSDNode>(MulVal.getOperand(1));
1489 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1490 if (foldOffsetIntoAddress(Disp, AM))
1491 Reg = N.getOperand(0);
1493 Reg = N.getOperand(0);
1496 AM.IndexReg = AM.Base_Reg = Reg;
1503 // Given A-B, if A can be completely folded into the address and
1504 // the index field with the index field unused, use -B as the index.
1505 // This is a win if a has multiple parts that can be folded into
1506 // the address. Also, this saves a mov if the base register has
1507 // other uses, since it avoids a two-address sub instruction, however
1508 // it costs an additional mov if the index register has other uses.
1510 // Add an artificial use to this node so that we can keep track of
1511 // it if it gets CSE'd with a different node.
1512 HandleSDNode Handle(N);
1514 // Test if the LHS of the sub can be folded.
1515 X86ISelAddressMode Backup = AM;
1516 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
1520 // Test if the index field is free for use.
1521 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1527 SDValue RHS = Handle.getValue().getOperand(1);
1528 // If the RHS involves a register with multiple uses, this
1529 // transformation incurs an extra mov, due to the neg instruction
1530 // clobbering its operand.
1531 if (!RHS.getNode()->hasOneUse() ||
1532 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1533 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1534 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1535 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1536 RHS.getOperand(0).getValueType() == MVT::i32))
1538 // If the base is a register with multiple uses, this
1539 // transformation may save a mov.
1540 // FIXME: Don't rely on DELETED_NODEs.
1541 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
1542 AM.Base_Reg->getOpcode() != ISD::DELETED_NODE &&
1543 !AM.Base_Reg.getNode()->hasOneUse()) ||
1544 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1546 // If the folded LHS was interesting, this transformation saves
1547 // address arithmetic.
1548 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1549 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1550 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1552 // If it doesn't look like it may be an overall win, don't do it.
1558 // Ok, the transformation is legal and appears profitable. Go for it.
1559 SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType());
1560 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1564 // Insert the new nodes into the topological ordering.
1565 insertDAGNode(*CurDAG, Handle.getValue(), Zero);
1566 insertDAGNode(*CurDAG, Handle.getValue(), Neg);
1571 if (!matchAdd(N, AM, Depth))
1576 // We want to look through a transform in InstCombine and DAGCombiner that
1577 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
1578 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
1579 // An 'lea' can then be used to match the shift (multiply) and add:
1581 // lea (%rsi, %rdi, 8), %rax
1582 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
1583 !matchAdd(N, AM, Depth))
1588 // Perform some heroic transforms on an and of a constant-count shift
1589 // with a constant to enable use of the scaled offset field.
1591 // Scale must not be used already.
1592 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1594 SDValue Shift = N.getOperand(0);
1595 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1596 SDValue X = Shift.getOperand(0);
1598 // We only handle up to 64-bit values here as those are what matter for
1599 // addressing mode optimizations.
1600 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1602 if (!isa<ConstantSDNode>(N.getOperand(1)))
1604 uint64_t Mask = N.getConstantOperandVal(1);
1606 // Try to fold the mask and shift into an extract and scale.
1607 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1610 // Try to fold the mask and shift directly into the scale.
1611 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1614 // Try to swap the mask and shift to place shifts which can be done as
1615 // a scale on the outside of the mask.
1616 if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1622 return matchAddressBase(N, AM);
1625 /// Helper for MatchAddress. Add the specified node to the
1626 /// specified addressing mode without any further recursion.
1627 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1628 // Is the base register already occupied?
1629 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1630 // If so, check to see if the scale index register is set.
1631 if (!AM.IndexReg.getNode()) {
1637 // Otherwise, we cannot select it.
1641 // Default, generate it as a register.
1642 AM.BaseType = X86ISelAddressMode::RegBase;
1647 /// Helper for selectVectorAddr. Handles things that can be folded into a
1648 /// gather scatter address. The index register and scale should have already
1650 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
1651 // TODO: Support other operations.
1652 switch (N.getOpcode()) {
1653 case ISD::Constant: {
1654 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1655 if (!foldOffsetIntoAddress(Val, AM))
1659 case X86ISD::Wrapper:
1660 if (!matchWrapper(N, AM))
1665 return matchAddressBase(N, AM);
1668 bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
1669 SDValue &Scale, SDValue &Index,
1670 SDValue &Disp, SDValue &Segment) {
1671 X86ISelAddressMode AM;
1672 auto *Mgs = cast<X86MaskedGatherScatterSDNode>(Parent);
1673 AM.IndexReg = Mgs->getIndex();
1674 AM.Scale = cast<ConstantSDNode>(Mgs->getScale())->getZExtValue();
1676 unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1677 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1678 if (AddrSpace == 256)
1679 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1680 if (AddrSpace == 257)
1681 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1682 if (AddrSpace == 258)
1683 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1685 // Try to match into the base and displacement fields.
1686 if (matchVectorAddress(N, AM))
1689 MVT VT = N.getSimpleValueType();
1690 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1691 if (!AM.Base_Reg.getNode())
1692 AM.Base_Reg = CurDAG->getRegister(0, VT);
1695 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1699 /// Returns true if it is able to pattern match an addressing mode.
1700 /// It returns the operands which make up the maximal addressing mode it can
1701 /// match by reference.
1703 /// Parent is the parent node of the addr operand that is being matched. It
1704 /// is always a load, store, atomic node, or null. It is only null when
1705 /// checking memory operands for inline asm nodes.
1706 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1707 SDValue &Scale, SDValue &Index,
1708 SDValue &Disp, SDValue &Segment) {
1709 X86ISelAddressMode AM;
1712 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1713 // that are not a MemSDNode, and thus don't have proper addrspace info.
1714 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1715 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1716 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1717 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1718 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
1719 unsigned AddrSpace =
1720 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1721 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1722 if (AddrSpace == 256)
1723 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1724 if (AddrSpace == 257)
1725 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1726 if (AddrSpace == 258)
1727 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1730 if (matchAddress(N, AM))
1733 MVT VT = N.getSimpleValueType();
1734 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1735 if (!AM.Base_Reg.getNode())
1736 AM.Base_Reg = CurDAG->getRegister(0, VT);
1739 if (!AM.IndexReg.getNode())
1740 AM.IndexReg = CurDAG->getRegister(0, VT);
1742 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1746 // We can only fold a load if all nodes between it and the root node have a
1747 // single use. If there are additional uses, we could end up duplicating the
1749 static bool hasSingleUsesFromRoot(SDNode *Root, SDNode *User) {
1750 while (User != Root) {
1751 if (!User->hasOneUse())
1753 User = *User->use_begin();
1759 /// Match a scalar SSE load. In particular, we want to match a load whose top
1760 /// elements are either undef or zeros. The load flavor is derived from the
1761 /// type of N, which is either v4f32 or v2f64.
1764 /// PatternChainNode: this is the matched node that has a chain input and
1766 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent,
1767 SDValue N, SDValue &Base,
1768 SDValue &Scale, SDValue &Index,
1769 SDValue &Disp, SDValue &Segment,
1770 SDValue &PatternNodeWithChain) {
1771 if (!hasSingleUsesFromRoot(Root, Parent))
1774 // We can allow a full vector load here since narrowing a load is ok.
1775 if (ISD::isNON_EXTLoad(N.getNode())) {
1776 PatternNodeWithChain = N;
1777 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1778 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
1779 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1780 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1785 // We can also match the special zero extended load opcode.
1786 if (N.getOpcode() == X86ISD::VZEXT_LOAD) {
1787 PatternNodeWithChain = N;
1788 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1789 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
1790 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain);
1791 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp,
1796 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
1797 // once. Otherwise the load might get duplicated and the chain output of the
1798 // duplicate load will not be observed by all dependencies.
1799 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) {
1800 PatternNodeWithChain = N.getOperand(0);
1801 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1802 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1803 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
1804 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1805 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1810 // Also handle the case where we explicitly require zeros in the top
1811 // elements. This is a vector shuffle from the zero vector.
1812 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1813 // Check to see if the top elements are all zeros (or bitcast of zeros).
1814 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1815 N.getOperand(0).getNode()->hasOneUse()) {
1816 PatternNodeWithChain = N.getOperand(0).getOperand(0);
1817 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1818 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1819 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
1820 // Okay, this is a zero extending load. Fold it.
1821 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1822 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1831 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
1832 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1833 uint64_t ImmVal = CN->getZExtValue();
1834 if (!isUInt<32>(ImmVal))
1837 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
1841 // In static codegen with small code model, we can get the address of a label
1842 // into a register with 'movl'
1843 if (N->getOpcode() != X86ISD::Wrapper)
1846 N = N.getOperand(0);
1848 // At least GNU as does not accept 'movl' for TPOFF relocations.
1849 // FIXME: We could use 'movl' when we know we are targeting MC.
1850 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
1854 if (N->getOpcode() != ISD::TargetGlobalAddress)
1855 return TM.getCodeModel() == CodeModel::Small;
1857 Optional<ConstantRange> CR =
1858 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
1860 return TM.getCodeModel() == CodeModel::Small;
1862 return CR->getUnsignedMax().ult(1ull << 32);
1865 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
1866 SDValue &Scale, SDValue &Index,
1867 SDValue &Disp, SDValue &Segment) {
1868 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
1871 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
1874 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
1875 if (RN && RN->getReg() == 0)
1876 Base = CurDAG->getRegister(0, MVT::i64);
1877 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) {
1878 // Base could already be %rip, particularly in the x32 ABI.
1879 Base = SDValue(CurDAG->getMachineNode(
1880 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1881 CurDAG->getTargetConstant(0, DL, MVT::i64),
1883 CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
1887 RN = dyn_cast<RegisterSDNode>(Index);
1888 if (RN && RN->getReg() == 0)
1889 Index = CurDAG->getRegister(0, MVT::i64);
1891 assert(Index.getValueType() == MVT::i32 &&
1892 "Expect to be extending 32-bit registers for use in LEA");
1893 Index = SDValue(CurDAG->getMachineNode(
1894 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1895 CurDAG->getTargetConstant(0, DL, MVT::i64),
1897 CurDAG->getTargetConstant(X86::sub_32bit, DL,
1905 /// Calls SelectAddr and determines if the maximal addressing
1906 /// mode it matches can be cost effectively emitted as an LEA instruction.
1907 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
1908 SDValue &Base, SDValue &Scale,
1909 SDValue &Index, SDValue &Disp,
1911 X86ISelAddressMode AM;
1913 // Save the DL and VT before calling matchAddress, it can invalidate N.
1915 MVT VT = N.getSimpleValueType();
1917 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1919 SDValue Copy = AM.Segment;
1920 SDValue T = CurDAG->getRegister(0, MVT::i32);
1922 if (matchAddress(N, AM))
1924 assert (T == AM.Segment);
1927 unsigned Complexity = 0;
1928 if (AM.BaseType == X86ISelAddressMode::RegBase)
1929 if (AM.Base_Reg.getNode())
1932 AM.Base_Reg = CurDAG->getRegister(0, VT);
1933 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1936 if (AM.IndexReg.getNode())
1939 AM.IndexReg = CurDAG->getRegister(0, VT);
1941 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1946 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1947 // to a LEA. This is determined with some experimentation but is by no means
1948 // optimal (especially for code size consideration). LEA is nice because of
1949 // its three-address nature. Tweak the cost function again when we can run
1950 // convertToThreeAddress() at register allocation time.
1951 if (AM.hasSymbolicDisplacement()) {
1952 // For X86-64, always use LEA to materialize RIP-relative addresses.
1953 if (Subtarget->is64Bit())
1959 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1962 // If it isn't worth using an LEA, reject it.
1963 if (Complexity <= 2)
1966 getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment);
1970 /// This is only run on TargetGlobalTLSAddress nodes.
1971 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
1972 SDValue &Scale, SDValue &Index,
1973 SDValue &Disp, SDValue &Segment) {
1974 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1975 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1977 X86ISelAddressMode AM;
1978 AM.GV = GA->getGlobal();
1979 AM.Disp += GA->getOffset();
1980 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1981 AM.SymbolFlags = GA->getTargetFlags();
1983 if (N.getValueType() == MVT::i32) {
1985 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1987 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1990 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1994 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
1995 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
1996 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN),
2001 // Keep track of the original value type and whether this value was
2002 // truncated. If we see a truncation from pointer type to VT that truncates
2003 // bits that are known to be zero, we can use a narrow reference.
2004 EVT VT = N.getValueType();
2005 bool WasTruncated = false;
2006 if (N.getOpcode() == ISD::TRUNCATE) {
2007 WasTruncated = true;
2008 N = N.getOperand(0);
2011 if (N.getOpcode() != X86ISD::Wrapper)
2014 // We can only use non-GlobalValues as immediates if they were not truncated,
2015 // as we do not have any range information. If we have a GlobalValue and the
2016 // address was not truncated, we can select it as an operand directly.
2017 unsigned Opc = N.getOperand(0)->getOpcode();
2018 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2019 Op = N.getOperand(0);
2020 // We can only select the operand directly if we didn't have to look past a
2022 return !WasTruncated;
2025 // Check that the global's range fits into VT.
2026 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2027 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2028 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2031 // Okay, we can use a narrow reference.
2032 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2033 GA->getOffset(), GA->getTargetFlags());
2037 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2038 SDValue &Base, SDValue &Scale,
2039 SDValue &Index, SDValue &Disp,
2041 if (!ISD::isNON_EXTLoad(N.getNode()) ||
2042 !IsProfitableToFold(N, P, Root) ||
2043 !IsLegalToFold(N, P, Root, OptLevel))
2046 return selectAddr(N.getNode(),
2047 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2050 bool X86DAGToDAGISel::tryFoldVecLoad(SDNode *Root, SDNode *P, SDValue N,
2051 SDValue &Base, SDValue &Scale,
2052 SDValue &Index, SDValue &Disp,
2054 if (!ISD::isNON_EXTLoad(N.getNode()) ||
2055 useNonTemporalLoad(cast<LoadSDNode>(N)) ||
2056 !IsProfitableToFold(N, P, Root) ||
2057 !IsLegalToFold(N, P, Root, OptLevel))
2060 return selectAddr(N.getNode(),
2061 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2064 /// Return an SDNode that returns the value of the global base register.
2065 /// Output instructions required to initialize the global base register,
2067 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2068 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2069 auto &DL = MF->getDataLayout();
2070 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2073 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2074 if (N->getOpcode() == ISD::TRUNCATE)
2075 N = N->getOperand(0).getNode();
2076 if (N->getOpcode() != X86ISD::Wrapper)
2079 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2083 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2084 return CR && CR->getSignedMin().sge(-1ull << Width) &&
2085 CR->getSignedMax().slt(1ull << Width);
2088 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2089 /// or OF bits to be accurate.
2090 static bool hasNoSignedComparisonUses(SDNode *N) {
2091 // Examine each user of the node.
2092 for (SDNode::use_iterator UI = N->use_begin(),
2093 UE = N->use_end(); UI != UE; ++UI) {
2094 // Only examine CopyToReg uses.
2095 if (UI->getOpcode() != ISD::CopyToReg)
2097 // Only examine CopyToReg uses that copy to EFLAGS.
2098 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
2101 // Examine each user of the CopyToReg use.
2102 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2103 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2104 // Only examine the Flag result.
2105 if (FlagUI.getUse().getResNo() != 1) continue;
2106 // Anything unusual: assume conservatively.
2107 if (!FlagUI->isMachineOpcode()) return false;
2108 // Examine the opcode of the user.
2109 switch (FlagUI->getMachineOpcode()) {
2110 // These comparisons don't treat the most significant bit specially.
2111 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
2112 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
2113 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
2114 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
2115 case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
2116 case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
2117 case X86::CMOVA16rr: case X86::CMOVA16rm:
2118 case X86::CMOVA32rr: case X86::CMOVA32rm:
2119 case X86::CMOVA64rr: case X86::CMOVA64rm:
2120 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
2121 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
2122 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
2123 case X86::CMOVB16rr: case X86::CMOVB16rm:
2124 case X86::CMOVB32rr: case X86::CMOVB32rm:
2125 case X86::CMOVB64rr: case X86::CMOVB64rm:
2126 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
2127 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
2128 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
2129 case X86::CMOVE16rr: case X86::CMOVE16rm:
2130 case X86::CMOVE32rr: case X86::CMOVE32rm:
2131 case X86::CMOVE64rr: case X86::CMOVE64rm:
2132 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
2133 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
2134 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
2135 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
2136 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
2137 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
2138 case X86::CMOVP16rr: case X86::CMOVP16rm:
2139 case X86::CMOVP32rr: case X86::CMOVP32rm:
2140 case X86::CMOVP64rr: case X86::CMOVP64rm:
2142 // Anything else: assume conservatively.
2143 default: return false;
2150 /// Test whether the given node which sets flags has any uses which require the
2151 /// CF flag to be accurate.
2152 static bool hasNoCarryFlagUses(SDNode *N) {
2153 // Examine each user of the node.
2154 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE;
2156 // Only check things that use the flags.
2157 if (UI.getUse().getResNo() != 1)
2159 // Only examine CopyToReg uses.
2160 if (UI->getOpcode() != ISD::CopyToReg)
2162 // Only examine CopyToReg uses that copy to EFLAGS.
2163 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2165 // Examine each user of the CopyToReg use.
2166 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2167 FlagUI != FlagUE; ++FlagUI) {
2168 // Only examine the Flag result.
2169 if (FlagUI.getUse().getResNo() != 1)
2171 // Anything unusual: assume conservatively.
2172 if (!FlagUI->isMachineOpcode())
2174 // Examine the opcode of the user.
2175 switch (FlagUI->getMachineOpcode()) {
2176 // Comparisons which don't examine the CF flag.
2177 case X86::SETOr: case X86::SETNOr: case X86::SETEr: case X86::SETNEr:
2178 case X86::SETSr: case X86::SETNSr: case X86::SETPr: case X86::SETNPr:
2179 case X86::SETLr: case X86::SETGEr: case X86::SETLEr: case X86::SETGr:
2180 case X86::JO_1: case X86::JNO_1: case X86::JE_1: case X86::JNE_1:
2181 case X86::JS_1: case X86::JNS_1: case X86::JP_1: case X86::JNP_1:
2182 case X86::JL_1: case X86::JGE_1: case X86::JLE_1: case X86::JG_1:
2183 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
2184 case X86::CMOVO16rm: case X86::CMOVO32rm: case X86::CMOVO64rm:
2185 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr:
2186 case X86::CMOVNO16rm: case X86::CMOVNO32rm: case X86::CMOVNO64rm:
2187 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
2188 case X86::CMOVE16rm: case X86::CMOVE32rm: case X86::CMOVE64rm:
2189 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
2190 case X86::CMOVNE16rm: case X86::CMOVNE32rm: case X86::CMOVNE64rm:
2191 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr:
2192 case X86::CMOVS16rm: case X86::CMOVS32rm: case X86::CMOVS64rm:
2193 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
2194 case X86::CMOVNS16rm: case X86::CMOVNS32rm: case X86::CMOVNS64rm:
2195 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr:
2196 case X86::CMOVP16rm: case X86::CMOVP32rm: case X86::CMOVP64rm:
2197 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
2198 case X86::CMOVNP16rm: case X86::CMOVNP32rm: case X86::CMOVNP64rm:
2199 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr:
2200 case X86::CMOVL16rm: case X86::CMOVL32rm: case X86::CMOVL64rm:
2201 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
2202 case X86::CMOVGE16rm: case X86::CMOVGE32rm: case X86::CMOVGE64rm:
2203 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
2204 case X86::CMOVLE16rm: case X86::CMOVLE32rm: case X86::CMOVLE64rm:
2205 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr:
2206 case X86::CMOVG16rm: case X86::CMOVG32rm: case X86::CMOVG64rm:
2208 // Anything else: assume conservatively.
2217 /// Check whether or not the chain ending in StoreNode is suitable for doing
2218 /// the {load; op; store} to modify transformation.
2219 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
2220 SDValue StoredVal, SelectionDAG *CurDAG,
2221 LoadSDNode *&LoadNode,
2222 SDValue &InputChain) {
2223 // is the stored value result 0 of the load?
2224 if (StoredVal.getResNo() != 0) return false;
2226 // are there other uses of the loaded value than the inc or dec?
2227 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2229 // is the store non-extending and non-indexed?
2230 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2233 SDValue Load = StoredVal->getOperand(0);
2234 // Is the stored value a non-extending and non-indexed load?
2235 if (!ISD::isNormalLoad(Load.getNode())) return false;
2237 // Return LoadNode by reference.
2238 LoadNode = cast<LoadSDNode>(Load);
2240 // Is store the only read of the loaded value?
2241 if (!Load.hasOneUse())
2244 // Is the address of the store the same as the load?
2245 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2246 LoadNode->getOffset() != StoreNode->getOffset())
2249 bool FoundLoad = false;
2250 SmallVector<SDValue, 4> ChainOps;
2251 SmallVector<const SDNode *, 4> LoopWorklist;
2252 SmallPtrSet<const SDNode *, 16> Visited;
2253 const unsigned int Max = 1024;
2255 // Visualization of Load-Op-Store fusion:
2256 // -------------------------
2258 // *-lines = Chain operand dependencies.
2259 // |-lines = Normal operand dependencies.
2260 // Dependencies flow down and right. n-suffix references multiple nodes.
2268 // * * \ | => A--LD_OP_ST
2276 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
2280 // Ensure the transform is safe by checking for the dual
2281 // dependencies to make sure we do not induce a loop.
2283 // As LD is a predecessor to both OP and ST we can do this by checking:
2284 // a). if LD is a predecessor to a member of Xn or Yn.
2285 // b). if a Zn is a predecessor to ST.
2287 // However, (b) can only occur through being a chain predecessor to
2288 // ST, which is the same as Zn being a member or predecessor of Xn,
2289 // which is a subset of LD being a predecessor of Xn. So it's
2290 // subsumed by check (a).
2292 SDValue Chain = StoreNode->getChain();
2294 // Gather X elements in ChainOps.
2295 if (Chain == Load.getValue(1)) {
2297 ChainOps.push_back(Load.getOperand(0));
2298 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2299 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2300 SDValue Op = Chain.getOperand(i);
2301 if (Op == Load.getValue(1)) {
2303 // Drop Load, but keep its chain. No cycle check necessary.
2304 ChainOps.push_back(Load.getOperand(0));
2307 LoopWorklist.push_back(Op.getNode());
2308 ChainOps.push_back(Op);
2315 // Worklist is currently Xn. Add Yn to worklist.
2316 for (SDValue Op : StoredVal->ops())
2317 if (Op.getNode() != LoadNode)
2318 LoopWorklist.push_back(Op.getNode());
2320 // Check (a) if Load is a predecessor to Xn + Yn
2321 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
2326 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
2330 // Change a chain of {load; op; store} of the same value into a simple op
2331 // through memory of that value, if the uses of the modified value and its
2332 // address are suitable.
2334 // The tablegen pattern memory operand pattern is currently not able to match
2335 // the case where the EFLAGS on the original operation are used.
2337 // To move this to tablegen, we'll need to improve tablegen to allow flags to
2338 // be transferred from a node in the pattern to the result node, probably with
2339 // a new keyword. For example, we have this
2340 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2341 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2342 // (implicit EFLAGS)]>;
2343 // but maybe need something like this
2344 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2345 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2346 // (transferrable EFLAGS)]>;
2348 // Until then, we manually fold these and instruction select the operation
2350 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
2351 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2352 SDValue StoredVal = StoreNode->getOperand(1);
2353 unsigned Opc = StoredVal->getOpcode();
2355 // Before we try to select anything, make sure this is memory operand size
2356 // and opcode we can handle. Note that this must match the code below that
2357 // actually lowers the opcodes.
2358 EVT MemVT = StoreNode->getMemoryVT();
2359 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
2377 LoadSDNode *LoadNode = nullptr;
2379 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode,
2383 SDValue Base, Scale, Index, Disp, Segment;
2384 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
2388 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
2390 switch (MemVT.getSimpleVT().SimpleTy) {
2400 llvm_unreachable("Invalid size!");
2404 MachineSDNode *Result;
2410 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
2411 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
2412 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
2414 CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, Ops);
2424 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
2427 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
2430 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
2433 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
2436 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
2439 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
2442 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
2444 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
2447 llvm_unreachable("Invalid opcode!");
2450 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
2453 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
2455 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
2457 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
2459 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
2461 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
2463 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
2465 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
2467 llvm_unreachable("Invalid opcode!");
2470 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
2473 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
2476 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
2479 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
2482 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
2485 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
2488 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
2491 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
2494 llvm_unreachable("Invalid opcode!");
2498 unsigned NewOpc = SelectRegOpcode(Opc);
2499 SDValue Operand = StoredVal->getOperand(1);
2501 // See if the operand is a constant that we can fold into an immediate
2503 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
2504 auto OperandV = OperandC->getAPIntValue();
2506 // Check if we can shrink the operand enough to fit in an immediate (or
2507 // fit into a smaller immediate) by negating it and switching the
2509 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
2510 ((MemVT != MVT::i8 && OperandV.getMinSignedBits() > 8 &&
2511 (-OperandV).getMinSignedBits() <= 8) ||
2512 (MemVT == MVT::i64 && OperandV.getMinSignedBits() > 32 &&
2513 (-OperandV).getMinSignedBits() <= 32)) &&
2514 hasNoCarryFlagUses(StoredVal.getNode())) {
2515 OperandV = -OperandV;
2516 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
2519 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
2520 // the larger immediate operand.
2521 if (MemVT != MVT::i8 && OperandV.getMinSignedBits() <= 8) {
2522 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
2523 NewOpc = SelectImm8Opcode(Opc);
2524 } else if (OperandV.getActiveBits() <= MemVT.getSizeInBits() &&
2525 (MemVT != MVT::i64 || OperandV.getMinSignedBits() <= 32)) {
2526 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
2527 NewOpc = SelectImmOpcode(Opc);
2531 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
2533 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
2534 StoredVal.getOperand(2), SDValue());
2536 const SDValue Ops[] = {Base, Scale, Index, Disp,
2537 Segment, Operand, CopyTo, CopyTo.getValue(1)};
2538 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
2541 const SDValue Ops[] = {Base, Scale, Index, Disp,
2542 Segment, Operand, InputChain};
2543 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
2549 llvm_unreachable("Invalid opcode!");
2552 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2553 MemOp[0] = StoreNode->getMemOperand();
2554 MemOp[1] = LoadNode->getMemOperand();
2555 Result->setMemRefs(MemOp, MemOp + 2);
2557 // Update Load Chain uses as well.
2558 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
2559 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2560 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2561 CurDAG->RemoveDeadNode(Node);
2565 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
2566 bool X86DAGToDAGISel::matchBEXTRFromAnd(SDNode *Node) {
2567 MVT NVT = Node->getSimpleValueType(0);
2570 SDValue N0 = Node->getOperand(0);
2571 SDValue N1 = Node->getOperand(1);
2573 if (!Subtarget->hasBMI() && !Subtarget->hasTBM())
2576 // Must have a shift right.
2577 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
2580 // Shift can't have additional users.
2581 if (!N0->hasOneUse())
2584 // Only supported for 32 and 64 bits.
2585 if (NVT != MVT::i32 && NVT != MVT::i64)
2588 // Shift amount and RHS of and must be constant.
2589 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
2590 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2591 if (!MaskCst || !ShiftCst)
2594 // And RHS must be a mask.
2595 uint64_t Mask = MaskCst->getZExtValue();
2596 if (!isMask_64(Mask))
2599 uint64_t Shift = ShiftCst->getZExtValue();
2600 uint64_t MaskSize = countPopulation(Mask);
2602 // Don't interfere with something that can be handled by extracting AH.
2603 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
2604 if (Shift == 8 && MaskSize == 8)
2607 // Make sure we are only using bits that were in the original value, not
2609 if (Shift + MaskSize > NVT.getSizeInBits())
2612 // Create a BEXTR node and run it through selection.
2613 SDValue C = CurDAG->getConstant(Shift | (MaskSize << 8), dl, NVT);
2614 SDValue New = CurDAG->getNode(X86ISD::BEXTR, dl, NVT,
2615 N0->getOperand(0), C);
2616 ReplaceNode(Node, New.getNode());
2617 SelectCode(New.getNode());
2621 // Emit a PCMISTR(I/M) instruction.
2622 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
2623 bool MayFoldLoad, const SDLoc &dl,
2624 MVT VT, SDNode *Node) {
2625 SDValue N0 = Node->getOperand(0);
2626 SDValue N1 = Node->getOperand(1);
2627 SDValue Imm = Node->getOperand(2);
2628 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
2629 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
2631 // If there is a load, it will be behind a bitcast. We don't need to check
2632 // alignment on this load.
2633 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2634 if (MayFoldLoad && N1->getOpcode() == ISD::BITCAST && N1->hasOneUse() &&
2635 tryFoldVecLoad(Node, N1.getNode(), N1.getOperand(0), Tmp0, Tmp1, Tmp2,
2637 SDValue Load = N1.getOperand(0);
2638 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
2639 Load.getOperand(0) };
2640 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
2641 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2642 // Update the chain.
2643 ReplaceUses(Load.getValue(1), SDValue(CNode, 2));
2644 // Record the mem-refs
2645 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2646 MemOp[0] = cast<LoadSDNode>(Load)->getMemOperand();
2647 CNode->setMemRefs(MemOp, MemOp + 1);
2651 SDValue Ops[] = { N0, N1, Imm };
2652 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
2653 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
2657 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
2658 // to emit a second instruction after this one. This is needed since we have two
2659 // copyToReg nodes glued before this and we need to continue that glue through.
2660 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
2661 bool MayFoldLoad, const SDLoc &dl,
2662 MVT VT, SDNode *Node,
2664 SDValue N0 = Node->getOperand(0);
2665 SDValue N2 = Node->getOperand(2);
2666 SDValue Imm = Node->getOperand(4);
2667 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
2668 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
2670 // If there is a load, it will be behind a bitcast. We don't need to check
2671 // alignment on this load.
2672 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2673 if (MayFoldLoad && N2->getOpcode() == ISD::BITCAST && N2->hasOneUse() &&
2674 tryFoldVecLoad(Node, N2.getNode(), N2.getOperand(0), Tmp0, Tmp1, Tmp2,
2676 SDValue Load = N2.getOperand(0);
2677 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
2678 Load.getOperand(0), InFlag };
2679 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
2680 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2681 InFlag = SDValue(CNode, 3);
2682 // Update the chain.
2683 ReplaceUses(Load.getValue(1), SDValue(CNode, 2));
2684 // Record the mem-refs
2685 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2686 MemOp[0] = cast<LoadSDNode>(Load)->getMemOperand();
2687 CNode->setMemRefs(MemOp, MemOp + 1);
2691 SDValue Ops[] = { N0, N2, Imm, InFlag };
2692 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
2693 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
2694 InFlag = SDValue(CNode, 2);
2698 /// If the high bits of an 'and' operand are known zero, try setting the
2699 /// high bits of an 'and' constant operand to produce a smaller encoding by
2700 /// creating a small, sign-extended negative immediate rather than a large
2701 /// positive one. This reverses a transform in SimplifyDemandedBits that
2702 /// shrinks mask constants by clearing bits. There is also a possibility that
2703 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
2704 /// case, just replace the 'and'. Return 'true' if the node is replaced.
2705 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
2706 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
2707 // have immediate operands.
2708 MVT VT = And->getSimpleValueType(0);
2709 if (VT != MVT::i32 && VT != MVT::i64)
2712 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
2716 // Bail out if the mask constant is already negative. It's can't shrink more.
2717 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
2718 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
2719 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
2720 // are negative too.
2721 APInt MaskVal = And1C->getAPIntValue();
2722 unsigned MaskLZ = MaskVal.countLeadingZeros();
2723 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
2726 // Don't extend into the upper 32 bits of a 64 bit mask.
2727 if (VT == MVT::i64 && MaskLZ >= 32) {
2729 MaskVal = MaskVal.trunc(32);
2732 SDValue And0 = And->getOperand(0);
2733 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
2734 APInt NegMaskVal = MaskVal | HighZeros;
2736 // If a negative constant would not allow a smaller encoding, there's no need
2737 // to continue. Only change the constant when we know it's a win.
2738 unsigned MinWidth = NegMaskVal.getMinSignedBits();
2739 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
2742 // Extend masks if we truncated above.
2743 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
2744 NegMaskVal = NegMaskVal.zext(64);
2745 HighZeros = HighZeros.zext(64);
2748 // The variable operand must be all zeros in the top bits to allow using the
2749 // new, negative constant as the mask.
2750 if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
2753 // Check if the mask is -1. In that case, this is an unnecessary instruction
2754 // that escaped earlier analysis.
2755 if (NegMaskVal.isAllOnesValue()) {
2756 ReplaceNode(And, And0.getNode());
2760 // A negative mask allows a smaller encoding. Create a new 'and' node.
2761 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
2762 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
2763 ReplaceNode(And, NewAnd.getNode());
2764 SelectCode(NewAnd.getNode());
2768 void X86DAGToDAGISel::Select(SDNode *Node) {
2769 MVT NVT = Node->getSimpleValueType(0);
2770 unsigned Opcode = Node->getOpcode();
2773 if (Node->isMachineOpcode()) {
2774 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
2775 Node->setNodeId(-1);
2776 return; // Already selected.
2782 if (Subtarget->isTargetNaCl())
2783 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
2784 // leave the instruction alone.
2786 if (Subtarget->isTarget64BitILP32()) {
2787 // Converts a 32-bit register to a 64-bit, zero-extended version of
2788 // it. This is needed because x86-64 can do many things, but jmp %r32
2789 // ain't one of them.
2790 const SDValue &Target = Node->getOperand(1);
2791 assert(Target.getSimpleValueType() == llvm::MVT::i32);
2792 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
2793 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
2794 Node->getOperand(0), ZextTarget);
2795 ReplaceNode(Node, Brind.getNode());
2796 SelectCode(ZextTarget.getNode());
2797 SelectCode(Brind.getNode());
2802 case X86ISD::GlobalBaseReg:
2803 ReplaceNode(Node, getGlobalBaseReg());
2806 case X86ISD::SELECT:
2807 case X86ISD::SHRUNKBLEND: {
2808 // SHRUNKBLEND selects like a regular VSELECT. Same with X86ISD::SELECT.
2809 SDValue VSelect = CurDAG->getNode(
2810 ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
2811 Node->getOperand(1), Node->getOperand(2));
2812 ReplaceNode(Node, VSelect.getNode());
2813 SelectCode(VSelect.getNode());
2814 // We already called ReplaceUses.
2819 if (matchBEXTRFromAnd(Node))
2821 if (shrinkAndImmediate(Node))
2828 // For operations of the form (x << C1) op C2, check if we can use a smaller
2829 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2830 SDValue N0 = Node->getOperand(0);
2831 SDValue N1 = Node->getOperand(1);
2833 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2836 // i8 is unshrinkable, i16 should be promoted to i32.
2837 if (NVT != MVT::i32 && NVT != MVT::i64)
2840 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2841 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2842 if (!Cst || !ShlCst)
2845 int64_t Val = Cst->getSExtValue();
2846 uint64_t ShlVal = ShlCst->getZExtValue();
2848 // Make sure that we don't change the operation by removing bits.
2849 // This only matters for OR and XOR, AND is unaffected.
2850 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2851 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2854 unsigned ShlOp, AddOp, Op;
2857 // Check the minimum bitwidth for the new constant.
2858 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2859 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2860 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2861 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2863 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2866 // Bail if there is no smaller encoding.
2870 switch (NVT.SimpleTy) {
2871 default: llvm_unreachable("Unsupported VT!");
2873 assert(CstVT == MVT::i8);
2874 ShlOp = X86::SHL32ri;
2875 AddOp = X86::ADD32rr;
2878 default: llvm_unreachable("Impossible opcode");
2879 case ISD::AND: Op = X86::AND32ri8; break;
2880 case ISD::OR: Op = X86::OR32ri8; break;
2881 case ISD::XOR: Op = X86::XOR32ri8; break;
2885 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2886 ShlOp = X86::SHL64ri;
2887 AddOp = X86::ADD64rr;
2890 default: llvm_unreachable("Impossible opcode");
2891 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2892 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2893 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2898 // Emit the smaller op and the shift.
2899 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
2900 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2902 CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
2905 CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2906 getI8Imm(ShlVal, dl));
2910 case X86ISD::SMUL8: {
2911 SDValue N0 = Node->getOperand(0);
2912 SDValue N1 = Node->getOperand(1);
2914 unsigned Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r);
2916 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL,
2917 N0, SDValue()).getValue(1);
2919 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32);
2920 SDValue Ops[] = {N1, InFlag};
2921 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2923 ReplaceNode(Node, CNode);
2927 case X86ISD::UMUL: {
2928 SDValue N0 = Node->getOperand(0);
2929 SDValue N1 = Node->getOperand(1);
2931 unsigned LoReg, Opc;
2932 switch (NVT.SimpleTy) {
2933 default: llvm_unreachable("Unsupported VT!");
2934 // MVT::i8 is handled by X86ISD::UMUL8.
2935 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2936 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2937 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2940 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2941 N0, SDValue()).getValue(1);
2943 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2944 SDValue Ops[] = {N1, InFlag};
2945 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2947 ReplaceNode(Node, CNode);
2951 case ISD::SMUL_LOHI:
2952 case ISD::UMUL_LOHI: {
2953 SDValue N0 = Node->getOperand(0);
2954 SDValue N1 = Node->getOperand(1);
2957 bool isSigned = Opcode == ISD::SMUL_LOHI;
2958 bool hasBMI2 = Subtarget->hasBMI2();
2960 switch (NVT.SimpleTy) {
2961 default: llvm_unreachable("Unsupported VT!");
2962 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2963 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2964 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2965 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2968 switch (NVT.SimpleTy) {
2969 default: llvm_unreachable("Unsupported VT!");
2970 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2971 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2975 unsigned SrcReg, LoReg, HiReg;
2977 default: llvm_unreachable("Unknown MUL opcode!");
2980 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2984 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2987 SrcReg = X86::EDX; LoReg = HiReg = 0;
2990 SrcReg = X86::RDX; LoReg = HiReg = 0;
2994 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2995 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2996 // Multiply is commmutative.
2998 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
3003 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
3004 N0, SDValue()).getValue(1);
3005 SDValue ResHi, ResLo;
3009 MachineSDNode *CNode = nullptr;
3010 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
3012 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
3013 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
3014 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3015 ResHi = SDValue(CNode, 0);
3016 ResLo = SDValue(CNode, 1);
3017 Chain = SDValue(CNode, 2);
3018 InFlag = SDValue(CNode, 3);
3020 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
3021 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3022 Chain = SDValue(CNode, 0);
3023 InFlag = SDValue(CNode, 1);
3026 // Update the chain.
3027 ReplaceUses(N1.getValue(1), Chain);
3028 // Record the mem-refs
3029 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3030 MemOp[0] = cast<LoadSDNode>(N1)->getMemOperand();
3031 CNode->setMemRefs(MemOp, MemOp + 1);
3033 SDValue Ops[] = { N1, InFlag };
3034 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
3035 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
3036 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
3037 ResHi = SDValue(CNode, 0);
3038 ResLo = SDValue(CNode, 1);
3039 InFlag = SDValue(CNode, 2);
3041 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
3042 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
3043 InFlag = SDValue(CNode, 0);
3047 // Copy the low half of the result, if it is needed.
3048 if (!SDValue(Node, 0).use_empty()) {
3049 if (!ResLo.getNode()) {
3050 assert(LoReg && "Register for low half is not defined!");
3051 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
3053 InFlag = ResLo.getValue(2);
3055 ReplaceUses(SDValue(Node, 0), ResLo);
3056 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
3059 // Copy the high half of the result, if it is needed.
3060 if (!SDValue(Node, 1).use_empty()) {
3061 if (!ResHi.getNode()) {
3062 assert(HiReg && "Register for high half is not defined!");
3063 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
3065 InFlag = ResHi.getValue(2);
3067 ReplaceUses(SDValue(Node, 1), ResHi);
3068 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
3072 CurDAG->RemoveDeadNode(Node);
3078 case X86ISD::SDIVREM8_SEXT_HREG:
3079 case X86ISD::UDIVREM8_ZEXT_HREG: {
3080 SDValue N0 = Node->getOperand(0);
3081 SDValue N1 = Node->getOperand(1);
3084 bool isSigned = (Opcode == ISD::SDIVREM ||
3085 Opcode == X86ISD::SDIVREM8_SEXT_HREG);
3087 switch (NVT.SimpleTy) {
3088 default: llvm_unreachable("Unsupported VT!");
3089 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
3090 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
3091 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
3092 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
3095 switch (NVT.SimpleTy) {
3096 default: llvm_unreachable("Unsupported VT!");
3097 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
3098 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
3099 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
3100 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
3104 unsigned LoReg, HiReg, ClrReg;
3105 unsigned SExtOpcode;
3106 switch (NVT.SimpleTy) {
3107 default: llvm_unreachable("Unsupported VT!");
3109 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
3110 SExtOpcode = X86::CBW;
3113 LoReg = X86::AX; HiReg = X86::DX;
3115 SExtOpcode = X86::CWD;
3118 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
3119 SExtOpcode = X86::CDQ;
3122 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
3123 SExtOpcode = X86::CQO;
3127 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3128 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
3129 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
3132 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
3133 // Special case for div8, just use a move with zero extension to AX to
3134 // clear the upper 8 bits (AH).
3135 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
3136 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3137 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
3139 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
3140 MVT::Other, Ops), 0);
3141 Chain = Move.getValue(1);
3142 ReplaceUses(N0.getValue(1), Chain);
3145 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
3146 Chain = CurDAG->getEntryNode();
3148 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
3149 InFlag = Chain.getValue(1);
3152 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
3153 LoReg, N0, SDValue()).getValue(1);
3154 if (isSigned && !signBitIsZero) {
3155 // Sign extend the low part into the high part.
3157 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
3159 // Zero out the high part, effectively zero extending the input.
3160 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
3161 switch (NVT.SimpleTy) {
3164 SDValue(CurDAG->getMachineNode(
3165 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
3166 CurDAG->getTargetConstant(X86::sub_16bit, dl,
3174 SDValue(CurDAG->getMachineNode(
3175 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
3176 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
3177 CurDAG->getTargetConstant(X86::sub_32bit, dl,
3182 llvm_unreachable("Unexpected division source");
3185 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
3186 ClrNode, InFlag).getValue(1);
3191 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
3193 MachineSDNode *CNode =
3194 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
3195 InFlag = SDValue(CNode, 1);
3196 // Update the chain.
3197 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
3198 // Record the mem-refs
3199 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3200 MemOp[0] = cast<LoadSDNode>(N1)->getMemOperand();
3201 CNode->setMemRefs(MemOp, MemOp + 1);
3204 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
3207 // Prevent use of AH in a REX instruction by explicitly copying it to
3208 // an ABCD_L register.
3210 // The current assumption of the register allocator is that isel
3211 // won't generate explicit references to the GR8_ABCD_H registers. If
3212 // the allocator and/or the backend get enhanced to be more robust in
3213 // that regard, this can be, and should be, removed.
3214 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
3215 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
3216 unsigned AHExtOpcode =
3217 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
3219 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
3220 MVT::Glue, AHCopy, InFlag);
3221 SDValue Result(RNode, 0);
3222 InFlag = SDValue(RNode, 1);
3224 if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG ||
3225 Opcode == X86ISD::SDIVREM8_SEXT_HREG) {
3226 assert(Node->getValueType(1) == MVT::i32 && "Unexpected result type!");
3229 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
3231 ReplaceUses(SDValue(Node, 1), Result);
3232 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
3235 // Copy the division (low) result, if it is needed.
3236 if (!SDValue(Node, 0).use_empty()) {
3237 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3238 LoReg, NVT, InFlag);
3239 InFlag = Result.getValue(2);
3240 ReplaceUses(SDValue(Node, 0), Result);
3241 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
3244 // Copy the remainder (high) result, if it is needed.
3245 if (!SDValue(Node, 1).use_empty()) {
3246 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3247 HiReg, NVT, InFlag);
3248 InFlag = Result.getValue(2);
3249 ReplaceUses(SDValue(Node, 1), Result);
3250 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
3253 CurDAG->RemoveDeadNode(Node);
3258 SDValue N0 = Node->getOperand(0);
3259 SDValue N1 = Node->getOperand(1);
3261 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
3262 hasNoSignedComparisonUses(Node))
3263 N0 = N0.getOperand(0);
3265 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
3266 // use a smaller encoding.
3267 // Look past the truncate if CMP is the only use of it.
3268 if (N0.getOpcode() == ISD::AND &&
3269 N0.getNode()->hasOneUse() &&
3270 N0.getValueType() != MVT::i8 &&
3271 X86::isZeroNode(N1)) {
3272 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3274 uint64_t Mask = C->getZExtValue();
3280 if (isUInt<8>(Mask) &&
3281 (!(Mask & 0x80) || hasNoSignedComparisonUses(Node))) {
3282 // For example, convert "testl %eax, $8" to "testb %al, $8"
3284 SubRegOp = X86::sub_8bit;
3286 } else if (OptForMinSize && isUInt<16>(Mask) &&
3287 (!(Mask & 0x8000) || hasNoSignedComparisonUses(Node))) {
3288 // For example, "testl %eax, $32776" to "testw %ax, $32776".
3289 // NOTE: We only want to form TESTW instructions if optimizing for
3290 // min size. Otherwise we only save one byte and possibly get a length
3291 // changing prefix penalty in the decoders.
3293 SubRegOp = X86::sub_16bit;
3295 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
3296 (!(Mask & 0x80000000) || hasNoSignedComparisonUses(Node))) {
3297 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
3298 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
3299 // Otherwize, we find ourselves in a position where we have to do
3300 // promotion. If previous passes did not promote the and, we assume
3301 // they had a good reason not to and do not promote here.
3303 SubRegOp = X86::sub_32bit;
3306 // No eligible transformation was found.
3310 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
3311 SDValue Reg = N0.getOperand(0);
3313 // Extract the subregister if necessary.
3314 if (N0.getValueType() != VT)
3315 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
3317 // Emit a testl or testw.
3318 SDNode *NewNode = CurDAG->getMachineNode(Op, dl, MVT::i32, Reg, Imm);
3319 // Replace CMP with TEST.
3320 ReplaceNode(Node, NewNode);
3325 case X86ISD::PCMPISTR: {
3326 if (!Subtarget->hasSSE42())
3329 bool NeedIndex = !SDValue(Node, 0).use_empty();
3330 bool NeedMask = !SDValue(Node, 1).use_empty();
3331 // We can't fold a load if we are going to make two instructions.
3332 bool MayFoldLoad = !NeedIndex || !NeedMask;
3334 MachineSDNode *CNode;
3336 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
3337 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
3338 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
3339 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
3341 if (NeedIndex || !NeedMask) {
3342 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
3343 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
3344 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
3345 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
3348 // Connect the flag usage to the last instruction created.
3349 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
3350 CurDAG->RemoveDeadNode(Node);
3353 case X86ISD::PCMPESTR: {
3354 if (!Subtarget->hasSSE42())
3357 // Copy the two implicit register inputs.
3358 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
3359 Node->getOperand(1),
3360 SDValue()).getValue(1);
3361 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
3362 Node->getOperand(3), InFlag).getValue(1);
3364 bool NeedIndex = !SDValue(Node, 0).use_empty();
3365 bool NeedMask = !SDValue(Node, 1).use_empty();
3366 // We can't fold a load if we are going to make two instructions.
3367 bool MayFoldLoad = !NeedIndex || !NeedMask;
3369 MachineSDNode *CNode;
3371 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
3372 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
3373 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
3375 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
3377 if (NeedIndex || !NeedMask) {
3378 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
3379 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
3380 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag);
3381 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
3383 // Connect the flag usage to the last instruction created.
3384 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
3385 CurDAG->RemoveDeadNode(Node);
3390 if (foldLoadStoreIntoMemOperand(Node))
3398 bool X86DAGToDAGISel::
3399 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
3400 std::vector<SDValue> &OutOps) {
3401 SDValue Op0, Op1, Op2, Op3, Op4;
3402 switch (ConstraintID) {
3404 llvm_unreachable("Unexpected asm memory constraint");
3405 case InlineAsm::Constraint_i:
3406 // FIXME: It seems strange that 'i' is needed here since it's supposed to
3407 // be an immediate and not a memory constraint.
3409 case InlineAsm::Constraint_o: // offsetable ??
3410 case InlineAsm::Constraint_v: // not offsetable ??
3411 case InlineAsm::Constraint_m: // memory
3412 case InlineAsm::Constraint_X:
3413 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
3418 OutOps.push_back(Op0);
3419 OutOps.push_back(Op1);
3420 OutOps.push_back(Op2);
3421 OutOps.push_back(Op3);
3422 OutOps.push_back(Op4);
3426 /// This pass converts a legalized DAG into a X86-specific DAG,
3427 /// ready for instruction scheduling.
3428 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
3429 CodeGenOpt::Level OptLevel) {
3430 return new X86DAGToDAGISel(TM, OptLevel);