1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
16 #include "X86MachineFunctionInfo.h"
17 #include "X86RegisterInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/IR/ConstantRange.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/KnownBits.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
39 #define DEBUG_TYPE "x86-isel"
41 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
43 //===----------------------------------------------------------------------===//
44 // Pattern Matcher Implementation
45 //===----------------------------------------------------------------------===//
48 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
49 /// numbers for the leaves of the matched tree.
50 struct X86ISelAddressMode {
56 // This is really a union, discriminated by BaseType!
64 const GlobalValue *GV;
66 const BlockAddress *BlockAddr;
70 unsigned Align; // CP alignment.
71 unsigned char SymbolFlags; // X86II::MO_*
74 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
75 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
76 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
78 bool hasSymbolicDisplacement() const {
79 return GV != nullptr || CP != nullptr || ES != nullptr ||
80 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
83 bool hasBaseOrIndexReg() const {
84 return BaseType == FrameIndexBase ||
85 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
88 /// Return true if this addressing mode is already RIP-relative.
89 bool isRIPRelative() const {
90 if (BaseType != RegBase) return false;
91 if (RegisterSDNode *RegNode =
92 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
93 return RegNode->getReg() == X86::RIP;
97 void setBaseReg(SDValue Reg) {
102 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
104 dbgs() << "X86ISelAddressMode " << this << '\n';
105 dbgs() << "Base_Reg ";
106 if (Base_Reg.getNode())
107 Base_Reg.getNode()->dump();
110 if (BaseType == FrameIndexBase)
111 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
112 dbgs() << " Scale " << Scale << '\n'
114 if (IndexReg.getNode())
115 IndexReg.getNode()->dump();
118 dbgs() << " Disp " << Disp << '\n'
140 dbgs() << " JT" << JT << " Align" << Align << '\n';
147 //===--------------------------------------------------------------------===//
148 /// ISel - X86-specific code to select X86 machine instructions for
149 /// SelectionDAG operations.
151 class X86DAGToDAGISel final : public SelectionDAGISel {
152 /// Keep a pointer to the X86Subtarget around so that we can
153 /// make the right decision when generating code for different targets.
154 const X86Subtarget *Subtarget;
156 /// If true, selector should try to optimize for code size instead of
160 /// If true, selector should try to optimize for minimum code size.
164 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
165 : SelectionDAGISel(tm, OptLevel), OptForSize(false),
166 OptForMinSize(false) {}
168 StringRef getPassName() const override {
169 return "X86 DAG->DAG Instruction Selection";
172 bool runOnMachineFunction(MachineFunction &MF) override {
173 // Reset the subtarget each time through.
174 Subtarget = &MF.getSubtarget<X86Subtarget>();
175 SelectionDAGISel::runOnMachineFunction(MF);
179 void EmitFunctionEntryCode() override;
181 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
183 void PreprocessISelDAG() override;
185 // Include the pieces autogenerated from the target description.
186 #include "X86GenDAGISel.inc"
189 void Select(SDNode *N) override;
191 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
192 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
193 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
194 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
195 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
196 bool matchAdd(SDValue N, X86ISelAddressMode &AM, unsigned Depth);
197 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
199 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
200 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
201 SDValue &Scale, SDValue &Index, SDValue &Disp,
203 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
204 SDValue &Scale, SDValue &Index, SDValue &Disp,
206 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
207 bool selectLEAAddr(SDValue N, SDValue &Base,
208 SDValue &Scale, SDValue &Index, SDValue &Disp,
210 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
211 SDValue &Scale, SDValue &Index, SDValue &Disp,
213 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
214 SDValue &Scale, SDValue &Index, SDValue &Disp,
216 bool selectScalarSSELoad(SDNode *Root, SDValue N,
217 SDValue &Base, SDValue &Scale,
218 SDValue &Index, SDValue &Disp,
220 SDValue &NodeWithChain);
221 bool selectRelocImm(SDValue N, SDValue &Op);
223 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
224 SDValue &Base, SDValue &Scale,
225 SDValue &Index, SDValue &Disp,
228 // Convience method where P is also root.
229 bool tryFoldLoad(SDNode *P, SDValue N,
230 SDValue &Base, SDValue &Scale,
231 SDValue &Index, SDValue &Disp,
233 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
236 /// Implement addressing mode selection for inline asm expressions.
237 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
238 unsigned ConstraintID,
239 std::vector<SDValue> &OutOps) override;
241 void emitSpecialCodeForMain();
243 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
244 SDValue &Base, SDValue &Scale,
245 SDValue &Index, SDValue &Disp,
247 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
248 ? CurDAG->getTargetFrameIndex(
250 TLI->getPointerTy(CurDAG->getDataLayout()))
252 Scale = getI8Imm(AM.Scale, DL);
254 // These are 32-bit even in 64-bit mode since RIP-relative offset
257 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
261 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
262 AM.Align, AM.Disp, AM.SymbolFlags);
264 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
265 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
266 } else if (AM.MCSym) {
267 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
268 assert(AM.SymbolFlags == 0 && "oo");
269 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
270 } else if (AM.JT != -1) {
271 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
272 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
273 } else if (AM.BlockAddr)
274 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
277 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
279 if (AM.Segment.getNode())
280 Segment = AM.Segment;
282 Segment = CurDAG->getRegister(0, MVT::i32);
285 // Utility function to determine whether we should avoid selecting
286 // immediate forms of instructions for better code size or not.
287 // At a high level, we'd like to avoid such instructions when
288 // we have similar constants used within the same basic block
289 // that can be kept in a register.
291 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
292 uint32_t UseCount = 0;
294 // Do not want to hoist if we're not optimizing for size.
295 // TODO: We'd like to remove this restriction.
296 // See the comment in X86InstrInfo.td for more info.
300 // Walk all the users of the immediate.
301 for (SDNode::use_iterator UI = N->use_begin(),
302 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
306 // This user is already selected. Count it as a legitimate use and
308 if (User->isMachineOpcode()) {
313 // We want to count stores of immediates as real uses.
314 if (User->getOpcode() == ISD::STORE &&
315 User->getOperand(1).getNode() == N) {
320 // We don't currently match users that have > 2 operands (except
321 // for stores, which are handled above)
322 // Those instruction won't match in ISEL, for now, and would
323 // be counted incorrectly.
324 // This may change in the future as we add additional instruction
326 if (User->getNumOperands() != 2)
329 // Immediates that are used for offsets as part of stack
330 // manipulation should be left alone. These are typically
331 // used to indicate SP offsets for argument passing and
332 // will get pulled into stores/pushes (implicitly).
333 if (User->getOpcode() == X86ISD::ADD ||
334 User->getOpcode() == ISD::ADD ||
335 User->getOpcode() == X86ISD::SUB ||
336 User->getOpcode() == ISD::SUB) {
338 // Find the other operand of the add/sub.
339 SDValue OtherOp = User->getOperand(0);
340 if (OtherOp.getNode() == N)
341 OtherOp = User->getOperand(1);
343 // Don't count if the other operand is SP.
344 RegisterSDNode *RegNode;
345 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
346 (RegNode = dyn_cast_or_null<RegisterSDNode>(
347 OtherOp->getOperand(1).getNode())))
348 if ((RegNode->getReg() == X86::ESP) ||
349 (RegNode->getReg() == X86::RSP))
353 // ... otherwise, count this and move on.
357 // If we have more than 1 use, then recommend for hoisting.
358 return (UseCount > 1);
361 /// Return a target constant with the specified value of type i8.
362 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
363 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
366 /// Return a target constant with the specified value, of type i32.
367 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
368 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
371 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
373 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
374 uint64_t Index = N->getConstantOperandVal(1);
375 MVT VecVT = N->getOperand(0).getSimpleValueType();
376 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
379 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
381 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
382 uint64_t Index = N->getConstantOperandVal(2);
383 MVT VecVT = N->getSimpleValueType(0);
384 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
387 /// Return an SDNode that returns the value of the global base register.
388 /// Output instructions required to initialize the global base register,
390 SDNode *getGlobalBaseReg();
392 /// Return a reference to the TargetMachine, casted to the target-specific
394 const X86TargetMachine &getTargetMachine() const {
395 return static_cast<const X86TargetMachine &>(TM);
398 /// Return a reference to the TargetInstrInfo, casted to the target-specific
400 const X86InstrInfo *getInstrInfo() const {
401 return Subtarget->getInstrInfo();
404 /// \brief Address-mode matching performs shift-of-and to and-of-shift
405 /// reassociation in order to expose more scaled addressing
407 bool ComplexPatternFuncMutatesDAG() const override {
411 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
413 /// Returns whether this is a relocatable immediate in the range
414 /// [-2^Width .. 2^Width-1].
415 template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
416 if (auto *CN = dyn_cast<ConstantSDNode>(N))
417 return isInt<Width>(CN->getSExtValue());
418 return isSExtAbsoluteSymbolRef(Width, N);
421 // Indicates we should prefer to use a non-temporal load for this load.
422 bool useNonTemporalLoad(LoadSDNode *N) const {
423 if (!N->isNonTemporal())
426 unsigned StoreSize = N->getMemoryVT().getStoreSize();
428 if (N->getAlignment() < StoreSize)
432 default: llvm_unreachable("Unsupported store size");
434 return Subtarget->hasSSE41();
436 return Subtarget->hasAVX2();
438 return Subtarget->hasAVX512();
442 bool foldLoadStoreIntoMemOperand(SDNode *Node);
444 bool matchBEXTRFromAnd(SDNode *Node);
446 bool isMaskZeroExtended(SDNode *N) const;
451 // Returns true if this masked compare can be implemented legally with this
453 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
454 unsigned Opcode = N->getOpcode();
455 if (Opcode == X86ISD::PCMPEQM || Opcode == X86ISD::PCMPGTM ||
456 Opcode == X86ISD::CMPM || Opcode == X86ISD::TESTM ||
457 Opcode == X86ISD::TESTNM || Opcode == X86ISD::CMPMU ||
458 Opcode == X86ISD::CMPM_RND) {
459 // We can get 256-bit 8 element types here without VLX being enabled. When
460 // this happens we will use 512-bit operations and the mask will not be
462 EVT OpVT = N->getOperand(0).getValueType();
463 if (OpVT == MVT::v8i32 || OpVT == MVT::v8f32)
464 return Subtarget->hasVLX();
472 // Returns true if we can assume the writer of the mask has zero extended it
474 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
475 // If this is an AND, check if we have a compare on either side. As long as
476 // one side guarantees the mask is zero extended, the AND will preserve those
478 if (N->getOpcode() == ISD::AND)
479 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
480 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
482 return isLegalMaskCompare(N, Subtarget);
486 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
487 if (OptLevel == CodeGenOpt::None) return false;
492 if (N.getOpcode() != ISD::LOAD)
495 // If N is a load, do additional profitability checks.
497 switch (U->getOpcode()) {
509 SDValue Op1 = U->getOperand(1);
511 // If the other operand is a 8-bit immediate we should fold the immediate
512 // instead. This reduces code size.
514 // movl 4(%esp), %eax
518 // addl 4(%esp), %eax
519 // The former is 2 bytes shorter. In case where the increment is 1, then
520 // the saving can be 4 bytes (by using incl %eax).
521 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
522 if (Imm->getAPIntValue().isSignedIntN(8))
525 // If the other operand is a TLS address, we should fold it instead.
528 // leal i@NTPOFF(%eax), %eax
530 // movl $i@NTPOFF, %eax
532 // if the block also has an access to a second TLS address this will save
534 // FIXME: This is probably also true for non-TLS addresses.
535 if (Op1.getOpcode() == X86ISD::Wrapper) {
536 SDValue Val = Op1.getOperand(0);
537 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
547 /// Replace the original chain operand of the call with
548 /// load's chain operand and move load below the call's chain operand.
549 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
550 SDValue Call, SDValue OrigChain) {
551 SmallVector<SDValue, 8> Ops;
552 SDValue Chain = OrigChain.getOperand(0);
553 if (Chain.getNode() == Load.getNode())
554 Ops.push_back(Load.getOperand(0));
556 assert(Chain.getOpcode() == ISD::TokenFactor &&
557 "Unexpected chain operand");
558 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
559 if (Chain.getOperand(i).getNode() == Load.getNode())
560 Ops.push_back(Load.getOperand(0));
562 Ops.push_back(Chain.getOperand(i));
564 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
566 Ops.push_back(NewChain);
568 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
569 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
570 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
571 Load.getOperand(1), Load.getOperand(2));
574 Ops.push_back(SDValue(Load.getNode(), 1));
575 Ops.append(Call->op_begin() + 1, Call->op_end());
576 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
579 /// Return true if call address is a load and it can be
580 /// moved below CALLSEQ_START and the chains leading up to the call.
581 /// Return the CALLSEQ_START by reference as a second output.
582 /// In the case of a tail call, there isn't a callseq node between the call
583 /// chain and the load.
584 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
585 // The transformation is somewhat dangerous if the call's chain was glued to
586 // the call. After MoveBelowOrigChain the load is moved between the call and
587 // the chain, this can create a cycle if the load is not folded. So it is
588 // *really* important that we are sure the load will be folded.
589 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
591 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
594 LD->getAddressingMode() != ISD::UNINDEXED ||
595 LD->getExtensionType() != ISD::NON_EXTLOAD)
598 // Now let's find the callseq_start.
599 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
600 if (!Chain.hasOneUse())
602 Chain = Chain.getOperand(0);
605 if (!Chain.getNumOperands())
607 // Since we are not checking for AA here, conservatively abort if the chain
608 // writes to memory. It's not safe to move the callee (a load) across a store.
609 if (isa<MemSDNode>(Chain.getNode()) &&
610 cast<MemSDNode>(Chain.getNode())->writeMem())
612 if (Chain.getOperand(0).getNode() == Callee.getNode())
614 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
615 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
616 Callee.getValue(1).hasOneUse())
621 void X86DAGToDAGISel::PreprocessISelDAG() {
622 // OptFor[Min]Size are used in pattern predicates that isel is matching.
623 OptForSize = MF->getFunction().optForSize();
624 OptForMinSize = MF->getFunction().optForMinSize();
625 assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
627 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
628 E = CurDAG->allnodes_end(); I != E; ) {
629 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
631 if (OptLevel != CodeGenOpt::None &&
632 // Only does this when target favors doesn't favor register indirect
634 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
635 (N->getOpcode() == X86ISD::TC_RETURN &&
636 // Only does this if load can be folded into TC_RETURN.
637 (Subtarget->is64Bit() ||
638 !getTargetMachine().isPositionIndependent())))) {
639 /// Also try moving call address load from outside callseq_start to just
640 /// before the call to allow it to be folded.
658 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
659 SDValue Chain = N->getOperand(0);
660 SDValue Load = N->getOperand(1);
661 if (!isCalleeLoad(Load, Chain, HasCallSeq))
663 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
668 // Lower fpround and fpextend nodes that target the FP stack to be store and
669 // load to the stack. This is a gross hack. We would like to simply mark
670 // these as being illegal, but when we do that, legalize produces these when
671 // it expands calls, then expands these in the same legalize pass. We would
672 // like dag combine to be able to hack on these between the call expansion
673 // and the node legalization. As such this pass basically does "really
674 // late" legalization of these inline with the X86 isel pass.
675 // FIXME: This should only happen when not compiled with -O0.
676 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
679 MVT SrcVT = N->getOperand(0).getSimpleValueType();
680 MVT DstVT = N->getSimpleValueType(0);
682 // If any of the sources are vectors, no fp stack involved.
683 if (SrcVT.isVector() || DstVT.isVector())
686 // If the source and destination are SSE registers, then this is a legal
687 // conversion that should not be lowered.
688 const X86TargetLowering *X86Lowering =
689 static_cast<const X86TargetLowering *>(TLI);
690 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
691 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
692 if (SrcIsSSE && DstIsSSE)
695 if (!SrcIsSSE && !DstIsSSE) {
696 // If this is an FPStack extension, it is a noop.
697 if (N->getOpcode() == ISD::FP_EXTEND)
699 // If this is a value-preserving FPStack truncation, it is a noop.
700 if (N->getConstantOperandVal(1))
704 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
705 // FPStack has extload and truncstore. SSE can fold direct loads into other
706 // operations. Based on this, decide what we want to do.
708 if (N->getOpcode() == ISD::FP_ROUND)
709 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
711 MemVT = SrcIsSSE ? SrcVT : DstVT;
713 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
716 // FIXME: optimize the case where the src/dest is a load or store?
718 CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
719 MemTmp, MachinePointerInfo(), MemVT);
720 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
721 MachinePointerInfo(), MemVT);
723 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
724 // extload we created. This will cause general havok on the dag because
725 // anything below the conversion could be folded into other existing nodes.
726 // To avoid invalidating 'I', back it up to the convert node.
728 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
730 // Now that we did that, the node is dead. Increment the iterator to the
731 // next node to process, then delete N.
733 CurDAG->DeleteNode(N);
738 /// Emit any code that needs to be executed only in the main function.
739 void X86DAGToDAGISel::emitSpecialCodeForMain() {
740 if (Subtarget->isTargetCygMing()) {
741 TargetLowering::ArgListTy Args;
742 auto &DL = CurDAG->getDataLayout();
744 TargetLowering::CallLoweringInfo CLI(*CurDAG);
745 CLI.setChain(CurDAG->getRoot())
746 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
747 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
749 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
750 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
751 CurDAG->setRoot(Result.second);
755 void X86DAGToDAGISel::EmitFunctionEntryCode() {
756 // If this is main, emit special code for main.
757 const Function &F = MF->getFunction();
758 if (F.hasExternalLinkage() && F.getName() == "main")
759 emitSpecialCodeForMain();
762 static bool isDispSafeForFrameIndex(int64_t Val) {
763 // On 64-bit platforms, we can run into an issue where a frame index
764 // includes a displacement that, when added to the explicit displacement,
765 // will overflow the displacement field. Assuming that the frame index
766 // displacement fits into a 31-bit integer (which is only slightly more
767 // aggressive than the current fundamental assumption that it fits into
768 // a 32-bit integer), a 31-bit disp should always be safe.
769 return isInt<31>(Val);
772 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
773 X86ISelAddressMode &AM) {
774 // Cannot combine ExternalSymbol displacements with integer offsets.
775 if (Offset != 0 && (AM.ES || AM.MCSym))
777 int64_t Val = AM.Disp + Offset;
778 CodeModel::Model M = TM.getCodeModel();
779 if (Subtarget->is64Bit()) {
780 if (!X86::isOffsetSuitableForCodeModel(Val, M,
781 AM.hasSymbolicDisplacement()))
783 // In addition to the checks required for a register base, check that
784 // we do not try to use an unsafe Disp with a frame index.
785 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
786 !isDispSafeForFrameIndex(Val))
794 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
795 SDValue Address = N->getOperand(1);
797 // load gs:0 -> GS segment register.
798 // load fs:0 -> FS segment register.
800 // This optimization is valid because the GNU TLS model defines that
801 // gs:0 (or fs:0 on X86-64) contains its own address.
802 // For more information see http://people.redhat.com/drepper/tls.pdf
803 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
804 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
805 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
806 Subtarget->isTargetFuchsia()))
807 switch (N->getPointerInfo().getAddrSpace()) {
809 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
812 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
814 // Address space 258 is not handled here, because it is not used to
815 // address TLS areas.
821 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
822 /// mode. These wrap things that will resolve down into a symbol reference.
823 /// If no match is possible, this returns true, otherwise it returns false.
824 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
825 // If the addressing mode already has a symbol as the displacement, we can
826 // never match another symbol.
827 if (AM.hasSymbolicDisplacement())
830 SDValue N0 = N.getOperand(0);
831 CodeModel::Model M = TM.getCodeModel();
833 // Handle X86-64 rip-relative addresses. We check this before checking direct
834 // folding because RIP is preferable to non-RIP accesses.
835 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
836 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
837 // they cannot be folded into immediate fields.
838 // FIXME: This can be improved for kernel and other models?
839 (M == CodeModel::Small || M == CodeModel::Kernel)) {
840 // Base and index reg must be 0 in order to use %rip as base.
841 if (AM.hasBaseOrIndexReg())
843 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
844 X86ISelAddressMode Backup = AM;
845 AM.GV = G->getGlobal();
846 AM.SymbolFlags = G->getTargetFlags();
847 if (foldOffsetIntoAddress(G->getOffset(), AM)) {
851 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
852 X86ISelAddressMode Backup = AM;
853 AM.CP = CP->getConstVal();
854 AM.Align = CP->getAlignment();
855 AM.SymbolFlags = CP->getTargetFlags();
856 if (foldOffsetIntoAddress(CP->getOffset(), AM)) {
860 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
861 AM.ES = S->getSymbol();
862 AM.SymbolFlags = S->getTargetFlags();
863 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
864 AM.MCSym = S->getMCSymbol();
865 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
866 AM.JT = J->getIndex();
867 AM.SymbolFlags = J->getTargetFlags();
868 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
869 X86ISelAddressMode Backup = AM;
870 AM.BlockAddr = BA->getBlockAddress();
871 AM.SymbolFlags = BA->getTargetFlags();
872 if (foldOffsetIntoAddress(BA->getOffset(), AM)) {
877 llvm_unreachable("Unhandled symbol reference node.");
879 if (N.getOpcode() == X86ISD::WrapperRIP)
880 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
884 // Handle the case when globals fit in our immediate field: This is true for
885 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
886 // mode, this only applies to a non-RIP-relative computation.
887 if (!Subtarget->is64Bit() ||
888 M == CodeModel::Small || M == CodeModel::Kernel) {
889 assert(N.getOpcode() != X86ISD::WrapperRIP &&
890 "RIP-relative addressing already handled");
891 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
892 AM.GV = G->getGlobal();
893 AM.Disp += G->getOffset();
894 AM.SymbolFlags = G->getTargetFlags();
895 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
896 AM.CP = CP->getConstVal();
897 AM.Align = CP->getAlignment();
898 AM.Disp += CP->getOffset();
899 AM.SymbolFlags = CP->getTargetFlags();
900 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
901 AM.ES = S->getSymbol();
902 AM.SymbolFlags = S->getTargetFlags();
903 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
904 AM.MCSym = S->getMCSymbol();
905 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
906 AM.JT = J->getIndex();
907 AM.SymbolFlags = J->getTargetFlags();
908 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
909 AM.BlockAddr = BA->getBlockAddress();
910 AM.Disp += BA->getOffset();
911 AM.SymbolFlags = BA->getTargetFlags();
913 llvm_unreachable("Unhandled symbol reference node.");
920 /// Add the specified node to the specified addressing mode, returning true if
921 /// it cannot be done. This just pattern matches for the addressing mode.
922 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
923 if (matchAddressRecursively(N, AM, 0))
926 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
927 // a smaller encoding and avoids a scaled-index.
929 AM.BaseType == X86ISelAddressMode::RegBase &&
930 AM.Base_Reg.getNode() == nullptr) {
931 AM.Base_Reg = AM.IndexReg;
935 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
936 // because it has a smaller encoding.
937 // TODO: Which other code models can use this?
938 if (TM.getCodeModel() == CodeModel::Small &&
939 Subtarget->is64Bit() &&
941 AM.BaseType == X86ISelAddressMode::RegBase &&
942 AM.Base_Reg.getNode() == nullptr &&
943 AM.IndexReg.getNode() == nullptr &&
944 AM.SymbolFlags == X86II::MO_NO_FLAG &&
945 AM.hasSymbolicDisplacement())
946 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
951 bool X86DAGToDAGISel::matchAdd(SDValue N, X86ISelAddressMode &AM,
953 // Add an artificial use to this node so that we can keep track of
954 // it if it gets CSE'd with a different node.
955 HandleSDNode Handle(N);
957 X86ISelAddressMode Backup = AM;
958 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
959 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
963 // Try again after commuting the operands.
964 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
965 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
969 // If we couldn't fold both operands into the address at the same time,
970 // see if we can just put each operand into a register and fold at least
972 if (AM.BaseType == X86ISelAddressMode::RegBase &&
973 !AM.Base_Reg.getNode() &&
974 !AM.IndexReg.getNode()) {
975 N = Handle.getValue();
976 AM.Base_Reg = N.getOperand(0);
977 AM.IndexReg = N.getOperand(1);
981 N = Handle.getValue();
985 // Insert a node into the DAG at least before the Pos node's position. This
986 // will reposition the node as needed, and will assign it a node ID that is <=
987 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
988 // IDs! The selection DAG must no longer depend on their uniqueness when this
990 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
991 if (N.getNode()->getNodeId() == -1 ||
992 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
993 DAG.RepositionNode(Pos.getNode()->getIterator(), N.getNode());
994 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
998 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
999 // safe. This allows us to convert the shift and and into an h-register
1000 // extract and a scaled index. Returns false if the simplification is
1002 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1004 SDValue Shift, SDValue X,
1005 X86ISelAddressMode &AM) {
1006 if (Shift.getOpcode() != ISD::SRL ||
1007 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1011 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1012 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1013 Mask != (0xffu << ScaleLog))
1016 MVT VT = N.getSimpleValueType();
1018 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1019 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1020 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1021 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1022 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1023 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1025 // Insert the new nodes into the topological ordering. We must do this in
1026 // a valid topological ordering as nothing is going to go back and re-sort
1027 // these nodes. We continually insert before 'N' in sequence as this is
1028 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1029 // hierarchy left to express.
1030 insertDAGNode(DAG, N, Eight);
1031 insertDAGNode(DAG, N, Srl);
1032 insertDAGNode(DAG, N, NewMask);
1033 insertDAGNode(DAG, N, And);
1034 insertDAGNode(DAG, N, ShlCount);
1035 insertDAGNode(DAG, N, Shl);
1036 DAG.ReplaceAllUsesWith(N, Shl);
1038 AM.Scale = (1 << ScaleLog);
1042 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1043 // allows us to fold the shift into this addressing mode. Returns false if the
1044 // transform succeeded.
1045 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1047 SDValue Shift, SDValue X,
1048 X86ISelAddressMode &AM) {
1049 if (Shift.getOpcode() != ISD::SHL ||
1050 !isa<ConstantSDNode>(Shift.getOperand(1)))
1053 // Not likely to be profitable if either the AND or SHIFT node has more
1054 // than one use (unless all uses are for address computation). Besides,
1055 // isel mechanism requires their node ids to be reused.
1056 if (!N.hasOneUse() || !Shift.hasOneUse())
1059 // Verify that the shift amount is something we can fold.
1060 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1061 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1064 MVT VT = N.getSimpleValueType();
1066 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1067 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1068 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1070 // Insert the new nodes into the topological ordering. We must do this in
1071 // a valid topological ordering as nothing is going to go back and re-sort
1072 // these nodes. We continually insert before 'N' in sequence as this is
1073 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1074 // hierarchy left to express.
1075 insertDAGNode(DAG, N, NewMask);
1076 insertDAGNode(DAG, N, NewAnd);
1077 insertDAGNode(DAG, N, NewShift);
1078 DAG.ReplaceAllUsesWith(N, NewShift);
1080 AM.Scale = 1 << ShiftAmt;
1081 AM.IndexReg = NewAnd;
1085 // Implement some heroics to detect shifts of masked values where the mask can
1086 // be replaced by extending the shift and undoing that in the addressing mode
1087 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1088 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1089 // the addressing mode. This results in code such as:
1091 // int f(short *y, int *lookup_table) {
1093 // return *y + lookup_table[*y >> 11];
1097 // movzwl (%rdi), %eax
1100 // addl (%rsi,%rcx,4), %eax
1103 // movzwl (%rdi), %eax
1107 // addl (%rsi,%rcx), %eax
1109 // Note that this function assumes the mask is provided as a mask *after* the
1110 // value is shifted. The input chain may or may not match that, but computing
1111 // such a mask is trivial.
1112 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1114 SDValue Shift, SDValue X,
1115 X86ISelAddressMode &AM) {
1116 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1117 !isa<ConstantSDNode>(Shift.getOperand(1)))
1120 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1121 unsigned MaskLZ = countLeadingZeros(Mask);
1122 unsigned MaskTZ = countTrailingZeros(Mask);
1124 // The amount of shift we're trying to fit into the addressing mode is taken
1125 // from the trailing zeros of the mask.
1126 unsigned AMShiftAmt = MaskTZ;
1128 // There is nothing we can do here unless the mask is removing some bits.
1129 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1130 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1132 // We also need to ensure that mask is a continuous run of bits.
1133 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1135 // Scale the leading zero count down based on the actual size of the value.
1136 // Also scale it down based on the size of the shift.
1137 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1138 if (MaskLZ < ScaleDown)
1140 MaskLZ -= ScaleDown;
1142 // The final check is to ensure that any masked out high bits of X are
1143 // already known to be zero. Otherwise, the mask has a semantic impact
1144 // other than masking out a couple of low bits. Unfortunately, because of
1145 // the mask, zero extensions will be removed from operands in some cases.
1146 // This code works extra hard to look through extensions because we can
1147 // replace them with zero extensions cheaply if necessary.
1148 bool ReplacingAnyExtend = false;
1149 if (X.getOpcode() == ISD::ANY_EXTEND) {
1150 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1151 X.getOperand(0).getSimpleValueType().getSizeInBits();
1152 // Assume that we'll replace the any-extend with a zero-extend, and
1153 // narrow the search to the extended value.
1154 X = X.getOperand(0);
1155 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1156 ReplacingAnyExtend = true;
1158 APInt MaskedHighBits =
1159 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1161 DAG.computeKnownBits(X, Known);
1162 if (MaskedHighBits != Known.Zero) return true;
1164 // We've identified a pattern that can be transformed into a single shift
1165 // and an addressing mode. Make it so.
1166 MVT VT = N.getSimpleValueType();
1167 if (ReplacingAnyExtend) {
1168 assert(X.getValueType() != VT);
1169 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1170 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1171 insertDAGNode(DAG, N, NewX);
1175 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1176 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1177 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1178 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1180 // Insert the new nodes into the topological ordering. We must do this in
1181 // a valid topological ordering as nothing is going to go back and re-sort
1182 // these nodes. We continually insert before 'N' in sequence as this is
1183 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1184 // hierarchy left to express.
1185 insertDAGNode(DAG, N, NewSRLAmt);
1186 insertDAGNode(DAG, N, NewSRL);
1187 insertDAGNode(DAG, N, NewSHLAmt);
1188 insertDAGNode(DAG, N, NewSHL);
1189 DAG.ReplaceAllUsesWith(N, NewSHL);
1191 AM.Scale = 1 << AMShiftAmt;
1192 AM.IndexReg = NewSRL;
1196 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1200 dbgs() << "MatchAddress: ";
1205 return matchAddressBase(N, AM);
1207 // If this is already a %rip relative address, we can only merge immediates
1208 // into it. Instead of handling this in every case, we handle it here.
1209 // RIP relative addressing: %rip + 32-bit displacement!
1210 if (AM.isRIPRelative()) {
1211 // FIXME: JumpTable and ExternalSymbol address currently don't like
1212 // displacements. It isn't very important, but this should be fixed for
1214 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1217 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1218 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1223 switch (N.getOpcode()) {
1225 case ISD::LOCAL_RECOVER: {
1226 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1227 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1228 // Use the symbol and don't prefix it.
1229 AM.MCSym = ESNode->getMCSymbol();
1234 case ISD::Constant: {
1235 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1236 if (!foldOffsetIntoAddress(Val, AM))
1241 case X86ISD::Wrapper:
1242 case X86ISD::WrapperRIP:
1243 if (!matchWrapper(N, AM))
1248 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1252 case ISD::FrameIndex:
1253 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1254 AM.Base_Reg.getNode() == nullptr &&
1255 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1256 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1257 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1263 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1266 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1267 unsigned Val = CN->getZExtValue();
1268 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1269 // that the base operand remains free for further matching. If
1270 // the base doesn't end up getting used, a post-processing step
1271 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1272 if (Val == 1 || Val == 2 || Val == 3) {
1273 AM.Scale = 1 << Val;
1274 SDValue ShVal = N.getOperand(0);
1276 // Okay, we know that we have a scale by now. However, if the scaled
1277 // value is an add of something and a constant, we can fold the
1278 // constant into the disp field here.
1279 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1280 AM.IndexReg = ShVal.getOperand(0);
1281 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
1282 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1283 if (!foldOffsetIntoAddress(Disp, AM))
1287 AM.IndexReg = ShVal;
1294 // Scale must not be used already.
1295 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1297 SDValue And = N.getOperand(0);
1298 if (And.getOpcode() != ISD::AND) break;
1299 SDValue X = And.getOperand(0);
1301 // We only handle up to 64-bit values here as those are what matter for
1302 // addressing mode optimizations.
1303 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1305 // The mask used for the transform is expected to be post-shift, but we
1306 // found the shift first so just apply the shift to the mask before passing
1308 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1309 !isa<ConstantSDNode>(And.getOperand(1)))
1311 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1313 // Try to fold the mask and shift into the scale, and return false if we
1315 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1320 case ISD::SMUL_LOHI:
1321 case ISD::UMUL_LOHI:
1322 // A mul_lohi where we need the low part can be folded as a plain multiply.
1323 if (N.getResNo() != 0) break;
1326 case X86ISD::MUL_IMM:
1327 // X*[3,5,9] -> X+X*[2,4,8]
1328 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1329 AM.Base_Reg.getNode() == nullptr &&
1330 AM.IndexReg.getNode() == nullptr) {
1331 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
1332 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1333 CN->getZExtValue() == 9) {
1334 AM.Scale = unsigned(CN->getZExtValue())-1;
1336 SDValue MulVal = N.getOperand(0);
1339 // Okay, we know that we have a scale by now. However, if the scaled
1340 // value is an add of something and a constant, we can fold the
1341 // constant into the disp field here.
1342 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1343 isa<ConstantSDNode>(MulVal.getOperand(1))) {
1344 Reg = MulVal.getOperand(0);
1345 ConstantSDNode *AddVal =
1346 cast<ConstantSDNode>(MulVal.getOperand(1));
1347 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1348 if (foldOffsetIntoAddress(Disp, AM))
1349 Reg = N.getOperand(0);
1351 Reg = N.getOperand(0);
1354 AM.IndexReg = AM.Base_Reg = Reg;
1361 // Given A-B, if A can be completely folded into the address and
1362 // the index field with the index field unused, use -B as the index.
1363 // This is a win if a has multiple parts that can be folded into
1364 // the address. Also, this saves a mov if the base register has
1365 // other uses, since it avoids a two-address sub instruction, however
1366 // it costs an additional mov if the index register has other uses.
1368 // Add an artificial use to this node so that we can keep track of
1369 // it if it gets CSE'd with a different node.
1370 HandleSDNode Handle(N);
1372 // Test if the LHS of the sub can be folded.
1373 X86ISelAddressMode Backup = AM;
1374 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
1378 // Test if the index field is free for use.
1379 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1385 SDValue RHS = Handle.getValue().getOperand(1);
1386 // If the RHS involves a register with multiple uses, this
1387 // transformation incurs an extra mov, due to the neg instruction
1388 // clobbering its operand.
1389 if (!RHS.getNode()->hasOneUse() ||
1390 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1391 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1392 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1393 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1394 RHS.getOperand(0).getValueType() == MVT::i32))
1396 // If the base is a register with multiple uses, this
1397 // transformation may save a mov.
1398 // FIXME: Don't rely on DELETED_NODEs.
1399 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
1400 AM.Base_Reg->getOpcode() != ISD::DELETED_NODE &&
1401 !AM.Base_Reg.getNode()->hasOneUse()) ||
1402 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1404 // If the folded LHS was interesting, this transformation saves
1405 // address arithmetic.
1406 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1407 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1408 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1410 // If it doesn't look like it may be an overall win, don't do it.
1416 // Ok, the transformation is legal and appears profitable. Go for it.
1417 SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType());
1418 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1422 // Insert the new nodes into the topological ordering.
1423 insertDAGNode(*CurDAG, Handle.getValue(), Zero);
1424 insertDAGNode(*CurDAG, Handle.getValue(), Neg);
1429 if (!matchAdd(N, AM, Depth))
1434 // We want to look through a transform in InstCombine and DAGCombiner that
1435 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
1436 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
1437 // An 'lea' can then be used to match the shift (multiply) and add:
1439 // lea (%rsi, %rdi, 8), %rax
1440 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
1441 !matchAdd(N, AM, Depth))
1446 // Perform some heroic transforms on an and of a constant-count shift
1447 // with a constant to enable use of the scaled offset field.
1449 // Scale must not be used already.
1450 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1452 SDValue Shift = N.getOperand(0);
1453 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1454 SDValue X = Shift.getOperand(0);
1456 // We only handle up to 64-bit values here as those are what matter for
1457 // addressing mode optimizations.
1458 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1460 if (!isa<ConstantSDNode>(N.getOperand(1)))
1462 uint64_t Mask = N.getConstantOperandVal(1);
1464 // Try to fold the mask and shift into an extract and scale.
1465 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1468 // Try to fold the mask and shift directly into the scale.
1469 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1472 // Try to swap the mask and shift to place shifts which can be done as
1473 // a scale on the outside of the mask.
1474 if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1480 return matchAddressBase(N, AM);
1483 /// Helper for MatchAddress. Add the specified node to the
1484 /// specified addressing mode without any further recursion.
1485 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1486 // Is the base register already occupied?
1487 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1488 // If so, check to see if the scale index register is set.
1489 if (!AM.IndexReg.getNode()) {
1495 // Otherwise, we cannot select it.
1499 // Default, generate it as a register.
1500 AM.BaseType = X86ISelAddressMode::RegBase;
1505 /// Helper for selectVectorAddr. Handles things that can be folded into a
1506 /// gather scatter address. The index register and scale should have already
1508 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
1509 // TODO: Support other operations.
1510 switch (N.getOpcode()) {
1511 case X86ISD::Wrapper:
1512 if (!matchWrapper(N, AM))
1517 return matchAddressBase(N, AM);
1520 bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
1521 SDValue &Scale, SDValue &Index,
1522 SDValue &Disp, SDValue &Segment) {
1523 X86ISelAddressMode AM;
1524 auto *Mgs = cast<X86MaskedGatherScatterSDNode>(Parent);
1525 AM.IndexReg = Mgs->getIndex();
1526 AM.Scale = Mgs->getValue().getScalarValueSizeInBits() / 8;
1528 unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1529 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1530 if (AddrSpace == 256)
1531 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1532 if (AddrSpace == 257)
1533 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1534 if (AddrSpace == 258)
1535 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1537 // If Base is 0, the whole address is in index and the Scale is 1
1538 if (isa<ConstantSDNode>(N)) {
1539 assert(cast<ConstantSDNode>(N)->isNullValue() &&
1540 "Unexpected base in gather/scatter");
1543 // Otherwise, try to match into the base and displacement fields.
1544 else if (matchVectorAddress(N, AM))
1547 MVT VT = N.getSimpleValueType();
1548 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1549 if (!AM.Base_Reg.getNode())
1550 AM.Base_Reg = CurDAG->getRegister(0, VT);
1553 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1557 /// Returns true if it is able to pattern match an addressing mode.
1558 /// It returns the operands which make up the maximal addressing mode it can
1559 /// match by reference.
1561 /// Parent is the parent node of the addr operand that is being matched. It
1562 /// is always a load, store, atomic node, or null. It is only null when
1563 /// checking memory operands for inline asm nodes.
1564 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1565 SDValue &Scale, SDValue &Index,
1566 SDValue &Disp, SDValue &Segment) {
1567 X86ISelAddressMode AM;
1570 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1571 // that are not a MemSDNode, and thus don't have proper addrspace info.
1572 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1573 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1574 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1575 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1576 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
1577 unsigned AddrSpace =
1578 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1579 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1580 if (AddrSpace == 256)
1581 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1582 if (AddrSpace == 257)
1583 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1584 if (AddrSpace == 258)
1585 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1588 if (matchAddress(N, AM))
1591 MVT VT = N.getSimpleValueType();
1592 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1593 if (!AM.Base_Reg.getNode())
1594 AM.Base_Reg = CurDAG->getRegister(0, VT);
1597 if (!AM.IndexReg.getNode())
1598 AM.IndexReg = CurDAG->getRegister(0, VT);
1600 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1604 // We can only fold a load if all nodes between it and the root node have a
1605 // single use. If there are additional uses, we could end up duplicating the
1607 static bool hasSingleUsesFromRoot(SDNode *Root, SDNode *N) {
1608 SDNode *User = *N->use_begin();
1609 while (User != Root) {
1610 if (!User->hasOneUse())
1612 User = *User->use_begin();
1618 /// Match a scalar SSE load. In particular, we want to match a load whose top
1619 /// elements are either undef or zeros. The load flavor is derived from the
1620 /// type of N, which is either v4f32 or v2f64.
1623 /// PatternChainNode: this is the matched node that has a chain input and
1625 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root,
1626 SDValue N, SDValue &Base,
1627 SDValue &Scale, SDValue &Index,
1628 SDValue &Disp, SDValue &Segment,
1629 SDValue &PatternNodeWithChain) {
1630 // We can allow a full vector load here since narrowing a load is ok.
1631 if (ISD::isNON_EXTLoad(N.getNode())) {
1632 PatternNodeWithChain = N;
1633 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1634 IsLegalToFold(PatternNodeWithChain, *N->use_begin(), Root, OptLevel) &&
1635 hasSingleUsesFromRoot(Root, N.getNode())) {
1636 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1637 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1642 // We can also match the special zero extended load opcode.
1643 if (N.getOpcode() == X86ISD::VZEXT_LOAD) {
1644 PatternNodeWithChain = N;
1645 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1646 IsLegalToFold(PatternNodeWithChain, *N->use_begin(), Root, OptLevel) &&
1647 hasSingleUsesFromRoot(Root, N.getNode())) {
1648 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain);
1649 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp,
1654 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
1655 // once. Otherwise the load might get duplicated and the chain output of the
1656 // duplicate load will not be observed by all dependencies.
1657 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) {
1658 PatternNodeWithChain = N.getOperand(0);
1659 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1660 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1661 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel) &&
1662 hasSingleUsesFromRoot(Root, N.getNode())) {
1663 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1664 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1669 // Also handle the case where we explicitly require zeros in the top
1670 // elements. This is a vector shuffle from the zero vector.
1671 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1672 // Check to see if the top elements are all zeros (or bitcast of zeros).
1673 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1674 N.getOperand(0).getNode()->hasOneUse()) {
1675 PatternNodeWithChain = N.getOperand(0).getOperand(0);
1676 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1677 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1678 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel) &&
1679 hasSingleUsesFromRoot(Root, N.getNode())) {
1680 // Okay, this is a zero extending load. Fold it.
1681 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1682 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1691 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
1692 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1693 uint64_t ImmVal = CN->getZExtValue();
1694 if (!isUInt<32>(ImmVal))
1697 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
1701 // In static codegen with small code model, we can get the address of a label
1702 // into a register with 'movl'. TableGen has already made sure we're looking
1703 // at a label of some kind.
1704 assert(N->getOpcode() == X86ISD::Wrapper &&
1705 "Unexpected node type for MOV32ri64");
1706 N = N.getOperand(0);
1708 // At least GNU as does not accept 'movl' for TPOFF relocations.
1709 // FIXME: We could use 'movl' when we know we are targeting MC.
1710 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
1714 if (N->getOpcode() != ISD::TargetGlobalAddress)
1715 return TM.getCodeModel() == CodeModel::Small;
1717 Optional<ConstantRange> CR =
1718 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
1720 return TM.getCodeModel() == CodeModel::Small;
1722 return CR->getUnsignedMax().ult(1ull << 32);
1725 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
1726 SDValue &Scale, SDValue &Index,
1727 SDValue &Disp, SDValue &Segment) {
1728 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
1731 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
1734 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
1735 if (RN && RN->getReg() == 0)
1736 Base = CurDAG->getRegister(0, MVT::i64);
1737 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) {
1738 // Base could already be %rip, particularly in the x32 ABI.
1739 Base = SDValue(CurDAG->getMachineNode(
1740 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1741 CurDAG->getTargetConstant(0, DL, MVT::i64),
1743 CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
1747 RN = dyn_cast<RegisterSDNode>(Index);
1748 if (RN && RN->getReg() == 0)
1749 Index = CurDAG->getRegister(0, MVT::i64);
1751 assert(Index.getValueType() == MVT::i32 &&
1752 "Expect to be extending 32-bit registers for use in LEA");
1753 Index = SDValue(CurDAG->getMachineNode(
1754 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1755 CurDAG->getTargetConstant(0, DL, MVT::i64),
1757 CurDAG->getTargetConstant(X86::sub_32bit, DL,
1765 /// Calls SelectAddr and determines if the maximal addressing
1766 /// mode it matches can be cost effectively emitted as an LEA instruction.
1767 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
1768 SDValue &Base, SDValue &Scale,
1769 SDValue &Index, SDValue &Disp,
1771 X86ISelAddressMode AM;
1773 // Save the DL and VT before calling matchAddress, it can invalidate N.
1775 MVT VT = N.getSimpleValueType();
1777 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1779 SDValue Copy = AM.Segment;
1780 SDValue T = CurDAG->getRegister(0, MVT::i32);
1782 if (matchAddress(N, AM))
1784 assert (T == AM.Segment);
1787 unsigned Complexity = 0;
1788 if (AM.BaseType == X86ISelAddressMode::RegBase)
1789 if (AM.Base_Reg.getNode())
1792 AM.Base_Reg = CurDAG->getRegister(0, VT);
1793 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1796 if (AM.IndexReg.getNode())
1799 AM.IndexReg = CurDAG->getRegister(0, VT);
1801 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1806 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1807 // to a LEA. This is determined with some experimentation but is by no means
1808 // optimal (especially for code size consideration). LEA is nice because of
1809 // its three-address nature. Tweak the cost function again when we can run
1810 // convertToThreeAddress() at register allocation time.
1811 if (AM.hasSymbolicDisplacement()) {
1812 // For X86-64, always use LEA to materialize RIP-relative addresses.
1813 if (Subtarget->is64Bit())
1819 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1822 // If it isn't worth using an LEA, reject it.
1823 if (Complexity <= 2)
1826 getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment);
1830 /// This is only run on TargetGlobalTLSAddress nodes.
1831 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
1832 SDValue &Scale, SDValue &Index,
1833 SDValue &Disp, SDValue &Segment) {
1834 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1835 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1837 X86ISelAddressMode AM;
1838 AM.GV = GA->getGlobal();
1839 AM.Disp += GA->getOffset();
1840 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1841 AM.SymbolFlags = GA->getTargetFlags();
1843 if (N.getValueType() == MVT::i32) {
1845 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1847 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1850 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1854 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
1855 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
1856 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN),
1861 // Keep track of the original value type and whether this value was
1862 // truncated. If we see a truncation from pointer type to VT that truncates
1863 // bits that are known to be zero, we can use a narrow reference.
1864 EVT VT = N.getValueType();
1865 bool WasTruncated = false;
1866 if (N.getOpcode() == ISD::TRUNCATE) {
1867 WasTruncated = true;
1868 N = N.getOperand(0);
1871 if (N.getOpcode() != X86ISD::Wrapper)
1874 // We can only use non-GlobalValues as immediates if they were not truncated,
1875 // as we do not have any range information. If we have a GlobalValue and the
1876 // address was not truncated, we can select it as an operand directly.
1877 unsigned Opc = N.getOperand(0)->getOpcode();
1878 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
1879 Op = N.getOperand(0);
1880 // We can only select the operand directly if we didn't have to look past a
1882 return !WasTruncated;
1885 // Check that the global's range fits into VT.
1886 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
1887 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
1888 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
1891 // Okay, we can use a narrow reference.
1892 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
1893 GA->getOffset(), GA->getTargetFlags());
1897 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
1898 SDValue &Base, SDValue &Scale,
1899 SDValue &Index, SDValue &Disp,
1901 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1902 !IsProfitableToFold(N, P, Root) ||
1903 !IsLegalToFold(N, P, Root, OptLevel))
1906 return selectAddr(N.getNode(),
1907 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1910 /// Return an SDNode that returns the value of the global base register.
1911 /// Output instructions required to initialize the global base register,
1913 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1914 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1915 auto &DL = MF->getDataLayout();
1916 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
1919 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
1920 if (N->getOpcode() == ISD::TRUNCATE)
1921 N = N->getOperand(0).getNode();
1922 if (N->getOpcode() != X86ISD::Wrapper)
1925 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
1929 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
1930 return CR && CR->getSignedMin().sge(-1ull << Width) &&
1931 CR->getSignedMax().slt(1ull << Width);
1934 /// Test whether the given X86ISD::CMP node has any uses which require the SF
1935 /// or OF bits to be accurate.
1936 static bool hasNoSignedComparisonUses(SDNode *N) {
1937 // Examine each user of the node.
1938 for (SDNode::use_iterator UI = N->use_begin(),
1939 UE = N->use_end(); UI != UE; ++UI) {
1940 // Only examine CopyToReg uses.
1941 if (UI->getOpcode() != ISD::CopyToReg)
1943 // Only examine CopyToReg uses that copy to EFLAGS.
1944 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1947 // Examine each user of the CopyToReg use.
1948 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1949 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1950 // Only examine the Flag result.
1951 if (FlagUI.getUse().getResNo() != 1) continue;
1952 // Anything unusual: assume conservatively.
1953 if (!FlagUI->isMachineOpcode()) return false;
1954 // Examine the opcode of the user.
1955 switch (FlagUI->getMachineOpcode()) {
1956 // These comparisons don't treat the most significant bit specially.
1957 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1958 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1959 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1960 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1961 case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
1962 case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
1963 case X86::CMOVA16rr: case X86::CMOVA16rm:
1964 case X86::CMOVA32rr: case X86::CMOVA32rm:
1965 case X86::CMOVA64rr: case X86::CMOVA64rm:
1966 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1967 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1968 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1969 case X86::CMOVB16rr: case X86::CMOVB16rm:
1970 case X86::CMOVB32rr: case X86::CMOVB32rm:
1971 case X86::CMOVB64rr: case X86::CMOVB64rm:
1972 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1973 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1974 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1975 case X86::CMOVE16rr: case X86::CMOVE16rm:
1976 case X86::CMOVE32rr: case X86::CMOVE32rm:
1977 case X86::CMOVE64rr: case X86::CMOVE64rm:
1978 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1979 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1980 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1981 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1982 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1983 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1984 case X86::CMOVP16rr: case X86::CMOVP16rm:
1985 case X86::CMOVP32rr: case X86::CMOVP32rm:
1986 case X86::CMOVP64rr: case X86::CMOVP64rm:
1988 // Anything else: assume conservatively.
1989 default: return false;
1996 /// Test whether the given node which sets flags has any uses which require the
1997 /// CF flag to be accurate.
1998 static bool hasNoCarryFlagUses(SDNode *N) {
1999 // Examine each user of the node.
2000 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE;
2002 // Only check things that use the flags.
2003 if (UI.getUse().getResNo() != 1)
2005 // Only examine CopyToReg uses.
2006 if (UI->getOpcode() != ISD::CopyToReg)
2008 // Only examine CopyToReg uses that copy to EFLAGS.
2009 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2011 // Examine each user of the CopyToReg use.
2012 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2013 FlagUI != FlagUE; ++FlagUI) {
2014 // Only examine the Flag result.
2015 if (FlagUI.getUse().getResNo() != 1)
2017 // Anything unusual: assume conservatively.
2018 if (!FlagUI->isMachineOpcode())
2020 // Examine the opcode of the user.
2021 switch (FlagUI->getMachineOpcode()) {
2022 // Comparisons which don't examine the CF flag.
2023 case X86::SETOr: case X86::SETNOr: case X86::SETEr: case X86::SETNEr:
2024 case X86::SETSr: case X86::SETNSr: case X86::SETPr: case X86::SETNPr:
2025 case X86::SETLr: case X86::SETGEr: case X86::SETLEr: case X86::SETGr:
2026 case X86::JO_1: case X86::JNO_1: case X86::JE_1: case X86::JNE_1:
2027 case X86::JS_1: case X86::JNS_1: case X86::JP_1: case X86::JNP_1:
2028 case X86::JL_1: case X86::JGE_1: case X86::JLE_1: case X86::JG_1:
2029 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
2030 case X86::CMOVO16rm: case X86::CMOVO32rm: case X86::CMOVO64rm:
2031 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr:
2032 case X86::CMOVNO16rm: case X86::CMOVNO32rm: case X86::CMOVNO64rm:
2033 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
2034 case X86::CMOVE16rm: case X86::CMOVE32rm: case X86::CMOVE64rm:
2035 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
2036 case X86::CMOVNE16rm: case X86::CMOVNE32rm: case X86::CMOVNE64rm:
2037 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr:
2038 case X86::CMOVS16rm: case X86::CMOVS32rm: case X86::CMOVS64rm:
2039 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
2040 case X86::CMOVNS16rm: case X86::CMOVNS32rm: case X86::CMOVNS64rm:
2041 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr:
2042 case X86::CMOVP16rm: case X86::CMOVP32rm: case X86::CMOVP64rm:
2043 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
2044 case X86::CMOVNP16rm: case X86::CMOVNP32rm: case X86::CMOVNP64rm:
2045 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr:
2046 case X86::CMOVL16rm: case X86::CMOVL32rm: case X86::CMOVL64rm:
2047 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
2048 case X86::CMOVGE16rm: case X86::CMOVGE32rm: case X86::CMOVGE64rm:
2049 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
2050 case X86::CMOVLE16rm: case X86::CMOVLE32rm: case X86::CMOVLE64rm:
2051 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr:
2052 case X86::CMOVG16rm: case X86::CMOVG32rm: case X86::CMOVG64rm:
2054 // Anything else: assume conservatively.
2063 /// Check whether or not the chain ending in StoreNode is suitable for doing
2064 /// the {load; op; store} to modify transformation.
2065 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
2066 SDValue StoredVal, SelectionDAG *CurDAG,
2067 LoadSDNode *&LoadNode,
2068 SDValue &InputChain) {
2069 // is the stored value result 0 of the load?
2070 if (StoredVal.getResNo() != 0) return false;
2072 // are there other uses of the loaded value than the inc or dec?
2073 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2075 // is the store non-extending and non-indexed?
2076 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2079 SDValue Load = StoredVal->getOperand(0);
2080 // Is the stored value a non-extending and non-indexed load?
2081 if (!ISD::isNormalLoad(Load.getNode())) return false;
2083 // Return LoadNode by reference.
2084 LoadNode = cast<LoadSDNode>(Load);
2086 // Is store the only read of the loaded value?
2087 if (!Load.hasOneUse())
2090 // Is the address of the store the same as the load?
2091 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2092 LoadNode->getOffset() != StoreNode->getOffset())
2095 // Check if the chain is produced by the load or is a TokenFactor with
2096 // the load output chain as an operand. Return InputChain by reference.
2097 SDValue Chain = StoreNode->getChain();
2099 bool ChainCheck = false;
2100 if (Chain == Load.getValue(1)) {
2102 InputChain = LoadNode->getChain();
2103 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2104 SmallVector<SDValue, 4> ChainOps;
2105 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2106 SDValue Op = Chain.getOperand(i);
2107 if (Op == Load.getValue(1)) {
2109 // Drop Load, but keep its chain. No cycle check necessary.
2110 ChainOps.push_back(Load.getOperand(0));
2114 // Make sure using Op as part of the chain would not cause a cycle here.
2115 // In theory, we could check whether the chain node is a predecessor of
2116 // the load. But that can be very expensive. Instead visit the uses and
2117 // make sure they all have smaller node id than the load.
2118 int LoadId = LoadNode->getNodeId();
2119 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
2120 UE = UI->use_end(); UI != UE; ++UI) {
2121 if (UI.getUse().getResNo() != 0)
2123 if (UI->getNodeId() > LoadId)
2127 ChainOps.push_back(Op);
2131 // Make a new TokenFactor with all the other input chains except
2133 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
2134 MVT::Other, ChainOps);
2142 // Change a chain of {load; op; store} of the same value into a simple op
2143 // through memory of that value, if the uses of the modified value and its
2144 // address are suitable.
2146 // The tablegen pattern memory operand pattern is currently not able to match
2147 // the case where the EFLAGS on the original operation are used.
2149 // To move this to tablegen, we'll need to improve tablegen to allow flags to
2150 // be transferred from a node in the pattern to the result node, probably with
2151 // a new keyword. For example, we have this
2152 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2153 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2154 // (implicit EFLAGS)]>;
2155 // but maybe need something like this
2156 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2157 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2158 // (transferrable EFLAGS)]>;
2160 // Until then, we manually fold these and instruction select the operation
2162 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
2163 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2164 SDValue StoredVal = StoreNode->getOperand(1);
2165 unsigned Opc = StoredVal->getOpcode();
2167 // Before we try to select anything, make sure this is memory operand size
2168 // and opcode we can handle. Note that this must match the code below that
2169 // actually lowers the opcodes.
2170 EVT MemVT = StoreNode->getMemoryVT();
2171 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
2187 LoadSDNode *LoadNode = nullptr;
2189 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode,
2193 SDValue Base, Scale, Index, Disp, Segment;
2194 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
2198 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
2200 switch (MemVT.getSimpleVT().SimpleTy) {
2210 llvm_unreachable("Invalid size!");
2214 MachineSDNode *Result;
2220 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
2221 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
2222 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
2224 CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, Ops);
2232 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
2235 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
2238 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
2241 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
2244 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
2246 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
2249 llvm_unreachable("Invalid opcode!");
2252 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
2255 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
2257 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
2259 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
2261 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
2263 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
2265 llvm_unreachable("Invalid opcode!");
2268 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
2271 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
2274 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
2277 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
2280 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
2283 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
2286 llvm_unreachable("Invalid opcode!");
2290 unsigned NewOpc = SelectRegOpcode(Opc);
2291 SDValue Operand = StoredVal->getOperand(1);
2293 // See if the operand is a constant that we can fold into an immediate
2295 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
2296 auto OperandV = OperandC->getAPIntValue();
2298 // Check if we can shrink the operand enough to fit in an immediate (or
2299 // fit into a smaller immediate) by negating it and switching the
2301 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
2302 ((MemVT != MVT::i8 && OperandV.getMinSignedBits() > 8 &&
2303 (-OperandV).getMinSignedBits() <= 8) ||
2304 (MemVT == MVT::i64 && OperandV.getMinSignedBits() > 32 &&
2305 (-OperandV).getMinSignedBits() <= 32)) &&
2306 hasNoCarryFlagUses(StoredVal.getNode())) {
2307 OperandV = -OperandV;
2308 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
2311 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
2312 // the larger immediate operand.
2313 if (MemVT != MVT::i8 && OperandV.getMinSignedBits() <= 8) {
2314 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
2315 NewOpc = SelectImm8Opcode(Opc);
2316 } else if (OperandV.getActiveBits() <= MemVT.getSizeInBits() &&
2317 (MemVT != MVT::i64 || OperandV.getMinSignedBits() <= 32)) {
2318 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
2319 NewOpc = SelectImmOpcode(Opc);
2323 const SDValue Ops[] = {Base, Scale, Index, Disp,
2324 Segment, Operand, InputChain};
2326 CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, Ops);
2330 llvm_unreachable("Invalid opcode!");
2333 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2334 MemOp[0] = StoreNode->getMemOperand();
2335 MemOp[1] = LoadNode->getMemOperand();
2336 Result->setMemRefs(MemOp, MemOp + 2);
2338 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2339 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2340 CurDAG->RemoveDeadNode(Node);
2344 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
2345 bool X86DAGToDAGISel::matchBEXTRFromAnd(SDNode *Node) {
2346 MVT NVT = Node->getSimpleValueType(0);
2349 SDValue N0 = Node->getOperand(0);
2350 SDValue N1 = Node->getOperand(1);
2352 if (!Subtarget->hasBMI() && !Subtarget->hasTBM())
2355 // Must have a shift right.
2356 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
2359 // Shift can't have additional users.
2360 if (!N0->hasOneUse())
2363 // Only supported for 32 and 64 bits.
2364 if (NVT != MVT::i32 && NVT != MVT::i64)
2367 // Shift amount and RHS of and must be constant.
2368 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
2369 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2370 if (!MaskCst || !ShiftCst)
2373 // And RHS must be a mask.
2374 uint64_t Mask = MaskCst->getZExtValue();
2375 if (!isMask_64(Mask))
2378 uint64_t Shift = ShiftCst->getZExtValue();
2379 uint64_t MaskSize = countPopulation(Mask);
2381 // Don't interfere with something that can be handled by extracting AH.
2382 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
2383 if (Shift == 8 && MaskSize == 8)
2386 // Make sure we are only using bits that were in the original value, not
2388 if (Shift + MaskSize > NVT.getSizeInBits())
2391 SDValue New = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
2392 unsigned ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
2393 unsigned MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
2395 // BMI requires the immediate to placed in a register.
2396 if (!Subtarget->hasTBM()) {
2397 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
2398 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
2399 New = SDValue(CurDAG->getMachineNode(X86::MOV32ri, dl, NVT, New), 0);
2400 if (NVT == MVT::i64) {
2402 SDValue(CurDAG->getMachineNode(
2403 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2404 CurDAG->getTargetConstant(0, dl, MVT::i64), New,
2405 CurDAG->getTargetConstant(X86::sub_32bit, dl, MVT::i32)),
2410 MachineSDNode *NewNode;
2411 SDValue Input = N0->getOperand(0);
2412 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2413 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2414 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, New, Input.getOperand(0) };
2415 SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
2416 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2417 // Update the chain.
2418 ReplaceUses(Input.getValue(1), SDValue(NewNode, 1));
2419 // Record the mem-refs
2420 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2421 MemOp[0] = cast<LoadSDNode>(Input)->getMemOperand();
2422 NewNode->setMemRefs(MemOp, MemOp + 1);
2424 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, Input, New);
2427 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
2428 CurDAG->RemoveDeadNode(Node);
2432 void X86DAGToDAGISel::Select(SDNode *Node) {
2433 MVT NVT = Node->getSimpleValueType(0);
2435 unsigned Opcode = Node->getOpcode();
2438 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
2440 if (Node->isMachineOpcode()) {
2441 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
2442 Node->setNodeId(-1);
2443 return; // Already selected.
2449 if (Subtarget->isTargetNaCl())
2450 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
2451 // leave the instruction alone.
2453 if (Subtarget->isTarget64BitILP32()) {
2454 // Converts a 32-bit register to a 64-bit, zero-extended version of
2455 // it. This is needed because x86-64 can do many things, but jmp %r32
2456 // ain't one of them.
2457 const SDValue &Target = Node->getOperand(1);
2458 assert(Target.getSimpleValueType() == llvm::MVT::i32);
2459 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
2460 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
2461 Node->getOperand(0), ZextTarget);
2462 ReplaceNode(Node, Brind.getNode());
2463 SelectCode(ZextTarget.getNode());
2464 SelectCode(Brind.getNode());
2469 case X86ISD::GlobalBaseReg:
2470 ReplaceNode(Node, getGlobalBaseReg());
2473 case X86ISD::SELECT:
2474 case X86ISD::SHRUNKBLEND: {
2475 // SHRUNKBLEND selects like a regular VSELECT. Same with X86ISD::SELECT.
2476 SDValue VSelect = CurDAG->getNode(
2477 ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
2478 Node->getOperand(1), Node->getOperand(2));
2479 ReplaceNode(Node, VSelect.getNode());
2480 SelectCode(VSelect.getNode());
2481 // We already called ReplaceUses.
2486 // Try to match BEXTR/BEXTRI instruction.
2487 if (matchBEXTRFromAnd(Node))
2494 // For operations of the form (x << C1) op C2, check if we can use a smaller
2495 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2496 SDValue N0 = Node->getOperand(0);
2497 SDValue N1 = Node->getOperand(1);
2499 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2502 // i8 is unshrinkable, i16 should be promoted to i32.
2503 if (NVT != MVT::i32 && NVT != MVT::i64)
2506 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2507 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2508 if (!Cst || !ShlCst)
2511 int64_t Val = Cst->getSExtValue();
2512 uint64_t ShlVal = ShlCst->getZExtValue();
2514 // Make sure that we don't change the operation by removing bits.
2515 // This only matters for OR and XOR, AND is unaffected.
2516 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2517 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2520 unsigned ShlOp, AddOp, Op;
2523 // Check the minimum bitwidth for the new constant.
2524 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2525 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2526 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2527 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2529 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2532 // Bail if there is no smaller encoding.
2536 switch (NVT.SimpleTy) {
2537 default: llvm_unreachable("Unsupported VT!");
2539 assert(CstVT == MVT::i8);
2540 ShlOp = X86::SHL32ri;
2541 AddOp = X86::ADD32rr;
2544 default: llvm_unreachable("Impossible opcode");
2545 case ISD::AND: Op = X86::AND32ri8; break;
2546 case ISD::OR: Op = X86::OR32ri8; break;
2547 case ISD::XOR: Op = X86::XOR32ri8; break;
2551 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2552 ShlOp = X86::SHL64ri;
2553 AddOp = X86::ADD64rr;
2556 default: llvm_unreachable("Impossible opcode");
2557 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2558 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2559 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2564 // Emit the smaller op and the shift.
2565 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
2566 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2568 CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
2571 CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2572 getI8Imm(ShlVal, dl));
2576 case X86ISD::SMUL8: {
2577 SDValue N0 = Node->getOperand(0);
2578 SDValue N1 = Node->getOperand(1);
2580 Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r);
2582 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL,
2583 N0, SDValue()).getValue(1);
2585 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32);
2586 SDValue Ops[] = {N1, InFlag};
2587 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2589 ReplaceNode(Node, CNode);
2593 case X86ISD::UMUL: {
2594 SDValue N0 = Node->getOperand(0);
2595 SDValue N1 = Node->getOperand(1);
2598 switch (NVT.SimpleTy) {
2599 default: llvm_unreachable("Unsupported VT!");
2600 // MVT::i8 is handled by X86ISD::UMUL8.
2601 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2602 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2603 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2606 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2607 N0, SDValue()).getValue(1);
2609 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2610 SDValue Ops[] = {N1, InFlag};
2611 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2613 ReplaceNode(Node, CNode);
2617 case ISD::SMUL_LOHI:
2618 case ISD::UMUL_LOHI: {
2619 SDValue N0 = Node->getOperand(0);
2620 SDValue N1 = Node->getOperand(1);
2622 bool isSigned = Opcode == ISD::SMUL_LOHI;
2623 bool hasBMI2 = Subtarget->hasBMI2();
2625 switch (NVT.SimpleTy) {
2626 default: llvm_unreachable("Unsupported VT!");
2627 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2628 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2629 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2630 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2631 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2632 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2635 switch (NVT.SimpleTy) {
2636 default: llvm_unreachable("Unsupported VT!");
2637 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2638 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2639 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2640 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2644 unsigned SrcReg, LoReg, HiReg;
2646 default: llvm_unreachable("Unknown MUL opcode!");
2649 SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2653 SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2657 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2661 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2664 SrcReg = X86::EDX; LoReg = HiReg = 0;
2667 SrcReg = X86::RDX; LoReg = HiReg = 0;
2671 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2672 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2673 // Multiply is commmutative.
2675 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2680 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
2681 N0, SDValue()).getValue(1);
2682 SDValue ResHi, ResLo;
2686 MachineSDNode *CNode = nullptr;
2687 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2689 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2690 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2691 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2692 ResHi = SDValue(CNode, 0);
2693 ResLo = SDValue(CNode, 1);
2694 Chain = SDValue(CNode, 2);
2695 InFlag = SDValue(CNode, 3);
2697 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2698 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2699 Chain = SDValue(CNode, 0);
2700 InFlag = SDValue(CNode, 1);
2703 // Update the chain.
2704 ReplaceUses(N1.getValue(1), Chain);
2705 // Record the mem-refs
2706 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2707 MemOp[0] = cast<LoadSDNode>(N1)->getMemOperand();
2708 CNode->setMemRefs(MemOp, MemOp + 1);
2710 SDValue Ops[] = { N1, InFlag };
2711 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2712 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2713 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2714 ResHi = SDValue(CNode, 0);
2715 ResLo = SDValue(CNode, 1);
2716 InFlag = SDValue(CNode, 2);
2718 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2719 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2720 InFlag = SDValue(CNode, 0);
2724 // Prevent use of AH in a REX instruction by referencing AX instead.
2725 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2726 !SDValue(Node, 1).use_empty()) {
2727 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2728 X86::AX, MVT::i16, InFlag);
2729 InFlag = Result.getValue(2);
2730 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2732 if (!SDValue(Node, 0).use_empty())
2733 ReplaceUses(SDValue(Node, 0),
2734 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2736 // Shift AX down 8 bits.
2737 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2739 CurDAG->getTargetConstant(8, dl, MVT::i8)),
2741 // Then truncate it down to i8.
2742 ReplaceUses(SDValue(Node, 1),
2743 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2745 // Copy the low half of the result, if it is needed.
2746 if (!SDValue(Node, 0).use_empty()) {
2747 if (!ResLo.getNode()) {
2748 assert(LoReg && "Register for low half is not defined!");
2749 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2751 InFlag = ResLo.getValue(2);
2753 ReplaceUses(SDValue(Node, 0), ResLo);
2754 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
2756 // Copy the high half of the result, if it is needed.
2757 if (!SDValue(Node, 1).use_empty()) {
2758 if (!ResHi.getNode()) {
2759 assert(HiReg && "Register for high half is not defined!");
2760 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2762 InFlag = ResHi.getValue(2);
2764 ReplaceUses(SDValue(Node, 1), ResHi);
2765 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
2768 CurDAG->RemoveDeadNode(Node);
2774 case X86ISD::SDIVREM8_SEXT_HREG:
2775 case X86ISD::UDIVREM8_ZEXT_HREG: {
2776 SDValue N0 = Node->getOperand(0);
2777 SDValue N1 = Node->getOperand(1);
2779 bool isSigned = (Opcode == ISD::SDIVREM ||
2780 Opcode == X86ISD::SDIVREM8_SEXT_HREG);
2782 switch (NVT.SimpleTy) {
2783 default: llvm_unreachable("Unsupported VT!");
2784 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2785 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2786 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2787 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2790 switch (NVT.SimpleTy) {
2791 default: llvm_unreachable("Unsupported VT!");
2792 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2793 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2794 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2795 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2799 unsigned LoReg, HiReg, ClrReg;
2800 unsigned SExtOpcode;
2801 switch (NVT.SimpleTy) {
2802 default: llvm_unreachable("Unsupported VT!");
2804 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2805 SExtOpcode = X86::CBW;
2808 LoReg = X86::AX; HiReg = X86::DX;
2810 SExtOpcode = X86::CWD;
2813 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2814 SExtOpcode = X86::CDQ;
2817 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2818 SExtOpcode = X86::CQO;
2822 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2823 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2824 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2827 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2828 // Special case for div8, just use a move with zero extension to AX to
2829 // clear the upper 8 bits (AH).
2830 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2831 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2832 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2834 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2835 MVT::Other, Ops), 0);
2836 Chain = Move.getValue(1);
2837 ReplaceUses(N0.getValue(1), Chain);
2840 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2841 Chain = CurDAG->getEntryNode();
2843 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2844 InFlag = Chain.getValue(1);
2847 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2848 LoReg, N0, SDValue()).getValue(1);
2849 if (isSigned && !signBitIsZero) {
2850 // Sign extend the low part into the high part.
2852 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2854 // Zero out the high part, effectively zero extending the input.
2855 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
2856 switch (NVT.SimpleTy) {
2859 SDValue(CurDAG->getMachineNode(
2860 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
2861 CurDAG->getTargetConstant(X86::sub_16bit, dl,
2869 SDValue(CurDAG->getMachineNode(
2870 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2871 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
2872 CurDAG->getTargetConstant(X86::sub_32bit, dl,
2877 llvm_unreachable("Unexpected division source");
2880 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2881 ClrNode, InFlag).getValue(1);
2886 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2888 MachineSDNode *CNode =
2889 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
2890 InFlag = SDValue(CNode, 1);
2891 // Update the chain.
2892 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2893 // Record the mem-refs
2894 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2895 MemOp[0] = cast<LoadSDNode>(N1)->getMemOperand();
2896 CNode->setMemRefs(MemOp, MemOp + 1);
2899 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2902 // Prevent use of AH in a REX instruction by explicitly copying it to
2903 // an ABCD_L register.
2905 // The current assumption of the register allocator is that isel
2906 // won't generate explicit references to the GR8_ABCD_H registers. If
2907 // the allocator and/or the backend get enhanced to be more robust in
2908 // that regard, this can be, and should be, removed.
2909 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
2910 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
2911 unsigned AHExtOpcode =
2912 isSigned ? X86::MOVSX32_NOREXrr8 : X86::MOVZX32_NOREXrr8;
2914 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
2915 MVT::Glue, AHCopy, InFlag);
2916 SDValue Result(RNode, 0);
2917 InFlag = SDValue(RNode, 1);
2919 if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG ||
2920 Opcode == X86ISD::SDIVREM8_SEXT_HREG) {
2921 assert(Node->getValueType(1) == MVT::i32 && "Unexpected result type!");
2924 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
2926 ReplaceUses(SDValue(Node, 1), Result);
2927 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2929 // Copy the division (low) result, if it is needed.
2930 if (!SDValue(Node, 0).use_empty()) {
2931 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2932 LoReg, NVT, InFlag);
2933 InFlag = Result.getValue(2);
2934 ReplaceUses(SDValue(Node, 0), Result);
2935 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2937 // Copy the remainder (high) result, if it is needed.
2938 if (!SDValue(Node, 1).use_empty()) {
2939 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2940 HiReg, NVT, InFlag);
2941 InFlag = Result.getValue(2);
2942 ReplaceUses(SDValue(Node, 1), Result);
2943 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2945 CurDAG->RemoveDeadNode(Node);
2951 // Sometimes a SUB is used to perform comparison.
2952 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2953 // This node is not a CMP.
2955 SDValue N0 = Node->getOperand(0);
2956 SDValue N1 = Node->getOperand(1);
2958 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2959 hasNoSignedComparisonUses(Node))
2960 N0 = N0.getOperand(0);
2962 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2963 // use a smaller encoding.
2964 // Look past the truncate if CMP is the only use of it.
2965 if ((N0.getOpcode() == ISD::AND ||
2966 (N0.getResNo() == 0 && N0.getOpcode() == X86ISD::AND)) &&
2967 N0.getNode()->hasOneUse() &&
2968 N0.getValueType() != MVT::i8 &&
2969 X86::isZeroNode(N1)) {
2970 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
2972 uint64_t Mask = C->getZExtValue();
2974 // For example, convert "testl %eax, $8" to "testb %al, $8"
2975 if (isUInt<8>(Mask) &&
2976 (!(Mask & 0x80) || hasNoSignedComparisonUses(Node))) {
2977 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, MVT::i8);
2978 SDValue Reg = N0.getOperand(0);
2980 // Extract the l-register.
2981 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2985 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2987 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2988 // one, do not call ReplaceAllUsesWith.
2989 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2990 SDValue(NewNode, 0));
2991 CurDAG->RemoveDeadNode(Node);
2995 // For example, "testl %eax, $2048" to "testb %ah, $8".
2996 if (isShiftedUInt<8, 8>(Mask) &&
2997 (!(Mask & 0x8000) || hasNoSignedComparisonUses(Node))) {
2998 // Shift the immediate right by 8 bits.
2999 SDValue ShiftedImm = CurDAG->getTargetConstant(Mask >> 8, dl, MVT::i8);
3000 SDValue Reg = N0.getOperand(0);
3002 // Extract the h-register.
3003 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
3006 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
3007 // target GR8_NOREX registers, so make sure the register class is
3009 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
3010 MVT::i32, Subreg, ShiftedImm);
3011 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
3012 // one, do not call ReplaceAllUsesWith.
3013 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
3014 SDValue(NewNode, 0));
3015 CurDAG->RemoveDeadNode(Node);
3019 // For example, "testl %eax, $32776" to "testw %ax, $32776".
3020 // NOTE: We only want to form TESTW instructions if optimizing for
3021 // min size. Otherwise we only save one byte and possibly get a length
3022 // changing prefix penalty in the decoders.
3023 if (OptForMinSize && isUInt<16>(Mask) && N0.getValueType() != MVT::i16 &&
3024 (!(Mask & 0x8000) || hasNoSignedComparisonUses(Node))) {
3025 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, MVT::i16);
3026 SDValue Reg = N0.getOperand(0);
3028 // Extract the 16-bit subregister.
3029 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
3033 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
3035 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
3036 // one, do not call ReplaceAllUsesWith.
3037 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
3038 SDValue(NewNode, 0));
3039 CurDAG->RemoveDeadNode(Node);
3043 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
3044 if (isUInt<32>(Mask) && N0.getValueType() == MVT::i64 &&
3045 (!(Mask & 0x80000000) || hasNoSignedComparisonUses(Node))) {
3046 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, MVT::i32);
3047 SDValue Reg = N0.getOperand(0);
3049 // Extract the 32-bit subregister.
3050 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
3054 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
3056 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
3057 // one, do not call ReplaceAllUsesWith.
3058 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
3059 SDValue(NewNode, 0));
3060 CurDAG->RemoveDeadNode(Node);
3067 if (foldLoadStoreIntoMemOperand(Node))
3075 bool X86DAGToDAGISel::
3076 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
3077 std::vector<SDValue> &OutOps) {
3078 SDValue Op0, Op1, Op2, Op3, Op4;
3079 switch (ConstraintID) {
3081 llvm_unreachable("Unexpected asm memory constraint");
3082 case InlineAsm::Constraint_i:
3083 // FIXME: It seems strange that 'i' is needed here since it's supposed to
3084 // be an immediate and not a memory constraint.
3086 case InlineAsm::Constraint_o: // offsetable ??
3087 case InlineAsm::Constraint_v: // not offsetable ??
3088 case InlineAsm::Constraint_m: // memory
3089 case InlineAsm::Constraint_X:
3090 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
3095 OutOps.push_back(Op0);
3096 OutOps.push_back(Op1);
3097 OutOps.push_back(Op2);
3098 OutOps.push_back(Op3);
3099 OutOps.push_back(Op4);
3103 /// This pass converts a legalized DAG into a X86-specific DAG,
3104 /// ready for instruction scheduling.
3105 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
3106 CodeGenOpt::Level OptLevel) {
3107 return new X86DAGToDAGISel(TM, OptLevel);