1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86RegisterInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/IR/ConstantRange.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
41 #define DEBUG_TYPE "x86-isel"
43 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
45 //===----------------------------------------------------------------------===//
46 // Pattern Matcher Implementation
47 //===----------------------------------------------------------------------===//
50 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
51 /// numbers for the leaves of the matched tree.
52 struct X86ISelAddressMode {
58 // This is really a union, discriminated by BaseType!
66 const GlobalValue *GV;
68 const BlockAddress *BlockAddr;
72 unsigned Align; // CP alignment.
73 unsigned char SymbolFlags; // X86II::MO_*
76 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
77 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
78 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
80 bool hasSymbolicDisplacement() const {
81 return GV != nullptr || CP != nullptr || ES != nullptr ||
82 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
85 bool hasBaseOrIndexReg() const {
86 return BaseType == FrameIndexBase ||
87 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
90 /// Return true if this addressing mode is already RIP-relative.
91 bool isRIPRelative() const {
92 if (BaseType != RegBase) return false;
93 if (RegisterSDNode *RegNode =
94 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
95 return RegNode->getReg() == X86::RIP;
99 void setBaseReg(SDValue Reg) {
104 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
106 dbgs() << "X86ISelAddressMode " << this << '\n';
107 dbgs() << "Base_Reg ";
108 if (Base_Reg.getNode())
109 Base_Reg.getNode()->dump();
112 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
113 << " Scale" << Scale << '\n'
115 if (IndexReg.getNode())
116 IndexReg.getNode()->dump();
119 dbgs() << " Disp " << Disp << '\n'
141 dbgs() << " JT" << JT << " Align" << Align << '\n';
148 //===--------------------------------------------------------------------===//
149 /// ISel - X86-specific code to select X86 machine instructions for
150 /// SelectionDAG operations.
152 class X86DAGToDAGISel final : public SelectionDAGISel {
153 /// Keep a pointer to the X86Subtarget around so that we can
154 /// make the right decision when generating code for different targets.
155 const X86Subtarget *Subtarget;
157 /// If true, selector should try to optimize for code size instead of
161 /// If true, selector should try to optimize for minimum code size.
165 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
166 : SelectionDAGISel(tm, OptLevel), OptForSize(false),
167 OptForMinSize(false) {}
169 StringRef getPassName() const override {
170 return "X86 DAG->DAG Instruction Selection";
173 bool runOnMachineFunction(MachineFunction &MF) override {
174 // Reset the subtarget each time through.
175 Subtarget = &MF.getSubtarget<X86Subtarget>();
176 SelectionDAGISel::runOnMachineFunction(MF);
180 void EmitFunctionEntryCode() override;
182 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
184 void PreprocessISelDAG() override;
186 inline bool immSext8(SDNode *N) const {
187 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
190 // True if the 64-bit immediate fits in a 32-bit sign-extended field.
191 inline bool i64immSExt32(SDNode *N) const {
192 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
193 return (int64_t)v == (int32_t)v;
196 // Include the pieces autogenerated from the target description.
197 #include "X86GenDAGISel.inc"
200 void Select(SDNode *N) override;
201 bool tryGather(SDNode *N, unsigned Opc);
203 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
204 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
205 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
206 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
207 bool matchAdd(SDValue N, X86ISelAddressMode &AM, unsigned Depth);
208 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
210 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
211 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
212 SDValue &Scale, SDValue &Index, SDValue &Disp,
214 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
215 SDValue &Scale, SDValue &Index, SDValue &Disp,
217 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
218 bool selectLEAAddr(SDValue N, SDValue &Base,
219 SDValue &Scale, SDValue &Index, SDValue &Disp,
221 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
222 SDValue &Scale, SDValue &Index, SDValue &Disp,
224 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
225 SDValue &Scale, SDValue &Index, SDValue &Disp,
227 bool selectScalarSSELoad(SDNode *Root, SDValue N,
228 SDValue &Base, SDValue &Scale,
229 SDValue &Index, SDValue &Disp,
231 SDValue &NodeWithChain);
232 bool selectRelocImm(SDValue N, SDValue &Op);
234 bool tryFoldLoad(SDNode *P, SDValue N,
235 SDValue &Base, SDValue &Scale,
236 SDValue &Index, SDValue &Disp,
239 /// Implement addressing mode selection for inline asm expressions.
240 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
241 unsigned ConstraintID,
242 std::vector<SDValue> &OutOps) override;
244 void emitSpecialCodeForMain();
246 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
247 SDValue &Base, SDValue &Scale,
248 SDValue &Index, SDValue &Disp,
250 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
251 ? CurDAG->getTargetFrameIndex(
253 TLI->getPointerTy(CurDAG->getDataLayout()))
255 Scale = getI8Imm(AM.Scale, DL);
257 // These are 32-bit even in 64-bit mode since RIP-relative offset
260 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
264 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
265 AM.Align, AM.Disp, AM.SymbolFlags);
267 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
268 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
269 } else if (AM.MCSym) {
270 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
271 assert(AM.SymbolFlags == 0 && "oo");
272 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
273 } else if (AM.JT != -1) {
274 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
275 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
276 } else if (AM.BlockAddr)
277 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
280 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
282 if (AM.Segment.getNode())
283 Segment = AM.Segment;
285 Segment = CurDAG->getRegister(0, MVT::i32);
288 // Utility function to determine whether we should avoid selecting
289 // immediate forms of instructions for better code size or not.
290 // At a high level, we'd like to avoid such instructions when
291 // we have similar constants used within the same basic block
292 // that can be kept in a register.
294 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
295 uint32_t UseCount = 0;
297 // Do not want to hoist if we're not optimizing for size.
298 // TODO: We'd like to remove this restriction.
299 // See the comment in X86InstrInfo.td for more info.
303 // Walk all the users of the immediate.
304 for (SDNode::use_iterator UI = N->use_begin(),
305 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
309 // This user is already selected. Count it as a legitimate use and
311 if (User->isMachineOpcode()) {
316 // We want to count stores of immediates as real uses.
317 if (User->getOpcode() == ISD::STORE &&
318 User->getOperand(1).getNode() == N) {
323 // We don't currently match users that have > 2 operands (except
324 // for stores, which are handled above)
325 // Those instruction won't match in ISEL, for now, and would
326 // be counted incorrectly.
327 // This may change in the future as we add additional instruction
329 if (User->getNumOperands() != 2)
332 // Immediates that are used for offsets as part of stack
333 // manipulation should be left alone. These are typically
334 // used to indicate SP offsets for argument passing and
335 // will get pulled into stores/pushes (implicitly).
336 if (User->getOpcode() == X86ISD::ADD ||
337 User->getOpcode() == ISD::ADD ||
338 User->getOpcode() == X86ISD::SUB ||
339 User->getOpcode() == ISD::SUB) {
341 // Find the other operand of the add/sub.
342 SDValue OtherOp = User->getOperand(0);
343 if (OtherOp.getNode() == N)
344 OtherOp = User->getOperand(1);
346 // Don't count if the other operand is SP.
347 RegisterSDNode *RegNode;
348 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
349 (RegNode = dyn_cast_or_null<RegisterSDNode>(
350 OtherOp->getOperand(1).getNode())))
351 if ((RegNode->getReg() == X86::ESP) ||
352 (RegNode->getReg() == X86::RSP))
356 // ... otherwise, count this and move on.
360 // If we have more than 1 use, then recommend for hoisting.
361 return (UseCount > 1);
364 /// Return a target constant with the specified value of type i8.
365 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
366 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
369 /// Return a target constant with the specified value, of type i32.
370 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
371 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
374 /// Return an SDNode that returns the value of the global base register.
375 /// Output instructions required to initialize the global base register,
377 SDNode *getGlobalBaseReg();
379 /// Return a reference to the TargetMachine, casted to the target-specific
381 const X86TargetMachine &getTargetMachine() const {
382 return static_cast<const X86TargetMachine &>(TM);
385 /// Return a reference to the TargetInstrInfo, casted to the target-specific
387 const X86InstrInfo *getInstrInfo() const {
388 return Subtarget->getInstrInfo();
391 /// \brief Address-mode matching performs shift-of-and to and-of-shift
392 /// reassociation in order to expose more scaled addressing
394 bool ComplexPatternFuncMutatesDAG() const override {
402 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
403 if (OptLevel == CodeGenOpt::None) return false;
408 if (N.getOpcode() != ISD::LOAD)
411 // If N is a load, do additional profitability checks.
413 switch (U->getOpcode()) {
426 SDValue Op1 = U->getOperand(1);
428 // If the other operand is a 8-bit immediate we should fold the immediate
429 // instead. This reduces code size.
431 // movl 4(%esp), %eax
435 // addl 4(%esp), %eax
436 // The former is 2 bytes shorter. In case where the increment is 1, then
437 // the saving can be 4 bytes (by using incl %eax).
438 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
439 if (Imm->getAPIntValue().isSignedIntN(8))
442 // If the other operand is a TLS address, we should fold it instead.
445 // leal i@NTPOFF(%eax), %eax
447 // movl $i@NTPOFF, %eax
449 // if the block also has an access to a second TLS address this will save
451 // FIXME: This is probably also true for non-TLS addresses.
452 if (Op1.getOpcode() == X86ISD::Wrapper) {
453 SDValue Val = Op1.getOperand(0);
454 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
464 /// Replace the original chain operand of the call with
465 /// load's chain operand and move load below the call's chain operand.
466 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
467 SDValue Call, SDValue OrigChain) {
468 SmallVector<SDValue, 8> Ops;
469 SDValue Chain = OrigChain.getOperand(0);
470 if (Chain.getNode() == Load.getNode())
471 Ops.push_back(Load.getOperand(0));
473 assert(Chain.getOpcode() == ISD::TokenFactor &&
474 "Unexpected chain operand");
475 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
476 if (Chain.getOperand(i).getNode() == Load.getNode())
477 Ops.push_back(Load.getOperand(0));
479 Ops.push_back(Chain.getOperand(i));
481 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
483 Ops.push_back(NewChain);
485 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
486 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
487 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
488 Load.getOperand(1), Load.getOperand(2));
491 Ops.push_back(SDValue(Load.getNode(), 1));
492 Ops.append(Call->op_begin() + 1, Call->op_end());
493 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
496 /// Return true if call address is a load and it can be
497 /// moved below CALLSEQ_START and the chains leading up to the call.
498 /// Return the CALLSEQ_START by reference as a second output.
499 /// In the case of a tail call, there isn't a callseq node between the call
500 /// chain and the load.
501 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
502 // The transformation is somewhat dangerous if the call's chain was glued to
503 // the call. After MoveBelowOrigChain the load is moved between the call and
504 // the chain, this can create a cycle if the load is not folded. So it is
505 // *really* important that we are sure the load will be folded.
506 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
508 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
511 LD->getAddressingMode() != ISD::UNINDEXED ||
512 LD->getExtensionType() != ISD::NON_EXTLOAD)
515 // Now let's find the callseq_start.
516 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
517 if (!Chain.hasOneUse())
519 Chain = Chain.getOperand(0);
522 if (!Chain.getNumOperands())
524 // Since we are not checking for AA here, conservatively abort if the chain
525 // writes to memory. It's not safe to move the callee (a load) across a store.
526 if (isa<MemSDNode>(Chain.getNode()) &&
527 cast<MemSDNode>(Chain.getNode())->writeMem())
529 if (Chain.getOperand(0).getNode() == Callee.getNode())
531 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
532 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
533 Callee.getValue(1).hasOneUse())
538 void X86DAGToDAGISel::PreprocessISelDAG() {
539 // OptFor[Min]Size are used in pattern predicates that isel is matching.
540 OptForSize = MF->getFunction()->optForSize();
541 OptForMinSize = MF->getFunction()->optForMinSize();
542 assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
544 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
545 E = CurDAG->allnodes_end(); I != E; ) {
546 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
548 if (OptLevel != CodeGenOpt::None &&
549 // Only does this when target favors doesn't favor register indirect
551 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) ||
552 (N->getOpcode() == X86ISD::TC_RETURN &&
553 // Only does this if load can be folded into TC_RETURN.
554 (Subtarget->is64Bit() ||
555 !getTargetMachine().isPositionIndependent())))) {
556 /// Also try moving call address load from outside callseq_start to just
557 /// before the call to allow it to be folded.
575 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
576 SDValue Chain = N->getOperand(0);
577 SDValue Load = N->getOperand(1);
578 if (!isCalleeLoad(Load, Chain, HasCallSeq))
580 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
585 // Lower fpround and fpextend nodes that target the FP stack to be store and
586 // load to the stack. This is a gross hack. We would like to simply mark
587 // these as being illegal, but when we do that, legalize produces these when
588 // it expands calls, then expands these in the same legalize pass. We would
589 // like dag combine to be able to hack on these between the call expansion
590 // and the node legalization. As such this pass basically does "really
591 // late" legalization of these inline with the X86 isel pass.
592 // FIXME: This should only happen when not compiled with -O0.
593 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
596 MVT SrcVT = N->getOperand(0).getSimpleValueType();
597 MVT DstVT = N->getSimpleValueType(0);
599 // If any of the sources are vectors, no fp stack involved.
600 if (SrcVT.isVector() || DstVT.isVector())
603 // If the source and destination are SSE registers, then this is a legal
604 // conversion that should not be lowered.
605 const X86TargetLowering *X86Lowering =
606 static_cast<const X86TargetLowering *>(TLI);
607 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
608 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
609 if (SrcIsSSE && DstIsSSE)
612 if (!SrcIsSSE && !DstIsSSE) {
613 // If this is an FPStack extension, it is a noop.
614 if (N->getOpcode() == ISD::FP_EXTEND)
616 // If this is a value-preserving FPStack truncation, it is a noop.
617 if (N->getConstantOperandVal(1))
621 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
622 // FPStack has extload and truncstore. SSE can fold direct loads into other
623 // operations. Based on this, decide what we want to do.
625 if (N->getOpcode() == ISD::FP_ROUND)
626 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
628 MemVT = SrcIsSSE ? SrcVT : DstVT;
630 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
633 // FIXME: optimize the case where the src/dest is a load or store?
635 CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0),
636 MemTmp, MachinePointerInfo(), MemVT);
637 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
638 MachinePointerInfo(), MemVT);
640 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
641 // extload we created. This will cause general havok on the dag because
642 // anything below the conversion could be folded into other existing nodes.
643 // To avoid invalidating 'I', back it up to the convert node.
645 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
647 // Now that we did that, the node is dead. Increment the iterator to the
648 // next node to process, then delete N.
650 CurDAG->DeleteNode(N);
655 /// Emit any code that needs to be executed only in the main function.
656 void X86DAGToDAGISel::emitSpecialCodeForMain() {
657 if (Subtarget->isTargetCygMing()) {
658 TargetLowering::ArgListTy Args;
659 auto &DL = CurDAG->getDataLayout();
661 TargetLowering::CallLoweringInfo CLI(*CurDAG);
662 CLI.setChain(CurDAG->getRoot())
663 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
664 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
666 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
667 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
668 CurDAG->setRoot(Result.second);
672 void X86DAGToDAGISel::EmitFunctionEntryCode() {
673 // If this is main, emit special code for main.
674 if (const Function *Fn = MF->getFunction())
675 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
676 emitSpecialCodeForMain();
679 static bool isDispSafeForFrameIndex(int64_t Val) {
680 // On 64-bit platforms, we can run into an issue where a frame index
681 // includes a displacement that, when added to the explicit displacement,
682 // will overflow the displacement field. Assuming that the frame index
683 // displacement fits into a 31-bit integer (which is only slightly more
684 // aggressive than the current fundamental assumption that it fits into
685 // a 32-bit integer), a 31-bit disp should always be safe.
686 return isInt<31>(Val);
689 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
690 X86ISelAddressMode &AM) {
691 // Cannot combine ExternalSymbol displacements with integer offsets.
692 if (Offset != 0 && (AM.ES || AM.MCSym))
694 int64_t Val = AM.Disp + Offset;
695 CodeModel::Model M = TM.getCodeModel();
696 if (Subtarget->is64Bit()) {
697 if (!X86::isOffsetSuitableForCodeModel(Val, M,
698 AM.hasSymbolicDisplacement()))
700 // In addition to the checks required for a register base, check that
701 // we do not try to use an unsafe Disp with a frame index.
702 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
703 !isDispSafeForFrameIndex(Val))
711 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
712 SDValue Address = N->getOperand(1);
714 // load gs:0 -> GS segment register.
715 // load fs:0 -> FS segment register.
717 // This optimization is valid because the GNU TLS model defines that
718 // gs:0 (or fs:0 on X86-64) contains its own address.
719 // For more information see http://people.redhat.com/drepper/tls.pdf
720 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
721 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
722 Subtarget->isTargetGlibc())
723 switch (N->getPointerInfo().getAddrSpace()) {
725 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
728 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
730 // Address space 258 is not handled here, because it is not used to
731 // address TLS areas.
737 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
738 /// mode. These wrap things that will resolve down into a symbol reference.
739 /// If no match is possible, this returns true, otherwise it returns false.
740 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
741 // If the addressing mode already has a symbol as the displacement, we can
742 // never match another symbol.
743 if (AM.hasSymbolicDisplacement())
746 SDValue N0 = N.getOperand(0);
747 CodeModel::Model M = TM.getCodeModel();
749 // Handle X86-64 rip-relative addresses. We check this before checking direct
750 // folding because RIP is preferable to non-RIP accesses.
751 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
752 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
753 // they cannot be folded into immediate fields.
754 // FIXME: This can be improved for kernel and other models?
755 (M == CodeModel::Small || M == CodeModel::Kernel)) {
756 // Base and index reg must be 0 in order to use %rip as base.
757 if (AM.hasBaseOrIndexReg())
759 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
760 X86ISelAddressMode Backup = AM;
761 AM.GV = G->getGlobal();
762 AM.SymbolFlags = G->getTargetFlags();
763 if (foldOffsetIntoAddress(G->getOffset(), AM)) {
767 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
768 X86ISelAddressMode Backup = AM;
769 AM.CP = CP->getConstVal();
770 AM.Align = CP->getAlignment();
771 AM.SymbolFlags = CP->getTargetFlags();
772 if (foldOffsetIntoAddress(CP->getOffset(), AM)) {
776 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
777 AM.ES = S->getSymbol();
778 AM.SymbolFlags = S->getTargetFlags();
779 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
780 AM.MCSym = S->getMCSymbol();
781 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
782 AM.JT = J->getIndex();
783 AM.SymbolFlags = J->getTargetFlags();
784 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
785 X86ISelAddressMode Backup = AM;
786 AM.BlockAddr = BA->getBlockAddress();
787 AM.SymbolFlags = BA->getTargetFlags();
788 if (foldOffsetIntoAddress(BA->getOffset(), AM)) {
793 llvm_unreachable("Unhandled symbol reference node.");
795 if (N.getOpcode() == X86ISD::WrapperRIP)
796 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
800 // Handle the case when globals fit in our immediate field: This is true for
801 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
802 // mode, this only applies to a non-RIP-relative computation.
803 if (!Subtarget->is64Bit() ||
804 M == CodeModel::Small || M == CodeModel::Kernel) {
805 assert(N.getOpcode() != X86ISD::WrapperRIP &&
806 "RIP-relative addressing already handled");
807 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
808 AM.GV = G->getGlobal();
809 AM.Disp += G->getOffset();
810 AM.SymbolFlags = G->getTargetFlags();
811 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
812 AM.CP = CP->getConstVal();
813 AM.Align = CP->getAlignment();
814 AM.Disp += CP->getOffset();
815 AM.SymbolFlags = CP->getTargetFlags();
816 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
817 AM.ES = S->getSymbol();
818 AM.SymbolFlags = S->getTargetFlags();
819 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
820 AM.MCSym = S->getMCSymbol();
821 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
822 AM.JT = J->getIndex();
823 AM.SymbolFlags = J->getTargetFlags();
824 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
825 AM.BlockAddr = BA->getBlockAddress();
826 AM.Disp += BA->getOffset();
827 AM.SymbolFlags = BA->getTargetFlags();
829 llvm_unreachable("Unhandled symbol reference node.");
836 /// Add the specified node to the specified addressing mode, returning true if
837 /// it cannot be done. This just pattern matches for the addressing mode.
838 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
839 if (matchAddressRecursively(N, AM, 0))
842 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
843 // a smaller encoding and avoids a scaled-index.
845 AM.BaseType == X86ISelAddressMode::RegBase &&
846 AM.Base_Reg.getNode() == nullptr) {
847 AM.Base_Reg = AM.IndexReg;
851 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
852 // because it has a smaller encoding.
853 // TODO: Which other code models can use this?
854 if (TM.getCodeModel() == CodeModel::Small &&
855 Subtarget->is64Bit() &&
857 AM.BaseType == X86ISelAddressMode::RegBase &&
858 AM.Base_Reg.getNode() == nullptr &&
859 AM.IndexReg.getNode() == nullptr &&
860 AM.SymbolFlags == X86II::MO_NO_FLAG &&
861 AM.hasSymbolicDisplacement())
862 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
867 bool X86DAGToDAGISel::matchAdd(SDValue N, X86ISelAddressMode &AM,
869 // Add an artificial use to this node so that we can keep track of
870 // it if it gets CSE'd with a different node.
871 HandleSDNode Handle(N);
873 X86ISelAddressMode Backup = AM;
874 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
875 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
879 // Try again after commuting the operands.
880 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) &&
881 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
885 // If we couldn't fold both operands into the address at the same time,
886 // see if we can just put each operand into a register and fold at least
888 if (AM.BaseType == X86ISelAddressMode::RegBase &&
889 !AM.Base_Reg.getNode() &&
890 !AM.IndexReg.getNode()) {
891 N = Handle.getValue();
892 AM.Base_Reg = N.getOperand(0);
893 AM.IndexReg = N.getOperand(1);
897 N = Handle.getValue();
901 // Insert a node into the DAG at least before the Pos node's position. This
902 // will reposition the node as needed, and will assign it a node ID that is <=
903 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
904 // IDs! The selection DAG must no longer depend on their uniqueness when this
906 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
907 if (N.getNode()->getNodeId() == -1 ||
908 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
909 DAG.RepositionNode(Pos.getNode()->getIterator(), N.getNode());
910 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
914 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
915 // safe. This allows us to convert the shift and and into an h-register
916 // extract and a scaled index. Returns false if the simplification is
918 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
920 SDValue Shift, SDValue X,
921 X86ISelAddressMode &AM) {
922 if (Shift.getOpcode() != ISD::SRL ||
923 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
927 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
928 if (ScaleLog <= 0 || ScaleLog >= 4 ||
929 Mask != (0xffu << ScaleLog))
932 MVT VT = N.getSimpleValueType();
934 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
935 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
936 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
937 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
938 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
939 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
941 // Insert the new nodes into the topological ordering. We must do this in
942 // a valid topological ordering as nothing is going to go back and re-sort
943 // these nodes. We continually insert before 'N' in sequence as this is
944 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
945 // hierarchy left to express.
946 insertDAGNode(DAG, N, Eight);
947 insertDAGNode(DAG, N, Srl);
948 insertDAGNode(DAG, N, NewMask);
949 insertDAGNode(DAG, N, And);
950 insertDAGNode(DAG, N, ShlCount);
951 insertDAGNode(DAG, N, Shl);
952 DAG.ReplaceAllUsesWith(N, Shl);
954 AM.Scale = (1 << ScaleLog);
958 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
959 // allows us to fold the shift into this addressing mode. Returns false if the
960 // transform succeeded.
961 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
963 SDValue Shift, SDValue X,
964 X86ISelAddressMode &AM) {
965 if (Shift.getOpcode() != ISD::SHL ||
966 !isa<ConstantSDNode>(Shift.getOperand(1)))
969 // Not likely to be profitable if either the AND or SHIFT node has more
970 // than one use (unless all uses are for address computation). Besides,
971 // isel mechanism requires their node ids to be reused.
972 if (!N.hasOneUse() || !Shift.hasOneUse())
975 // Verify that the shift amount is something we can fold.
976 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
977 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
980 MVT VT = N.getSimpleValueType();
982 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
983 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
984 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
986 // Insert the new nodes into the topological ordering. We must do this in
987 // a valid topological ordering as nothing is going to go back and re-sort
988 // these nodes. We continually insert before 'N' in sequence as this is
989 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
990 // hierarchy left to express.
991 insertDAGNode(DAG, N, NewMask);
992 insertDAGNode(DAG, N, NewAnd);
993 insertDAGNode(DAG, N, NewShift);
994 DAG.ReplaceAllUsesWith(N, NewShift);
996 AM.Scale = 1 << ShiftAmt;
997 AM.IndexReg = NewAnd;
1001 // Implement some heroics to detect shifts of masked values where the mask can
1002 // be replaced by extending the shift and undoing that in the addressing mode
1003 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1004 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1005 // the addressing mode. This results in code such as:
1007 // int f(short *y, int *lookup_table) {
1009 // return *y + lookup_table[*y >> 11];
1013 // movzwl (%rdi), %eax
1016 // addl (%rsi,%rcx,4), %eax
1019 // movzwl (%rdi), %eax
1023 // addl (%rsi,%rcx), %eax
1025 // Note that this function assumes the mask is provided as a mask *after* the
1026 // value is shifted. The input chain may or may not match that, but computing
1027 // such a mask is trivial.
1028 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1030 SDValue Shift, SDValue X,
1031 X86ISelAddressMode &AM) {
1032 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1033 !isa<ConstantSDNode>(Shift.getOperand(1)))
1036 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1037 unsigned MaskLZ = countLeadingZeros(Mask);
1038 unsigned MaskTZ = countTrailingZeros(Mask);
1040 // The amount of shift we're trying to fit into the addressing mode is taken
1041 // from the trailing zeros of the mask.
1042 unsigned AMShiftAmt = MaskTZ;
1044 // There is nothing we can do here unless the mask is removing some bits.
1045 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1046 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1048 // We also need to ensure that mask is a continuous run of bits.
1049 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1051 // Scale the leading zero count down based on the actual size of the value.
1052 // Also scale it down based on the size of the shift.
1053 MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1055 // The final check is to ensure that any masked out high bits of X are
1056 // already known to be zero. Otherwise, the mask has a semantic impact
1057 // other than masking out a couple of low bits. Unfortunately, because of
1058 // the mask, zero extensions will be removed from operands in some cases.
1059 // This code works extra hard to look through extensions because we can
1060 // replace them with zero extensions cheaply if necessary.
1061 bool ReplacingAnyExtend = false;
1062 if (X.getOpcode() == ISD::ANY_EXTEND) {
1063 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1064 X.getOperand(0).getSimpleValueType().getSizeInBits();
1065 // Assume that we'll replace the any-extend with a zero-extend, and
1066 // narrow the search to the extended value.
1067 X = X.getOperand(0);
1068 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1069 ReplacingAnyExtend = true;
1071 APInt MaskedHighBits =
1072 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1073 APInt KnownZero, KnownOne;
1074 DAG.computeKnownBits(X, KnownZero, KnownOne);
1075 if (MaskedHighBits != KnownZero) return true;
1077 // We've identified a pattern that can be transformed into a single shift
1078 // and an addressing mode. Make it so.
1079 MVT VT = N.getSimpleValueType();
1080 if (ReplacingAnyExtend) {
1081 assert(X.getValueType() != VT);
1082 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1083 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1084 insertDAGNode(DAG, N, NewX);
1088 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1089 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1090 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1091 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1093 // Insert the new nodes into the topological ordering. We must do this in
1094 // a valid topological ordering as nothing is going to go back and re-sort
1095 // these nodes. We continually insert before 'N' in sequence as this is
1096 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1097 // hierarchy left to express.
1098 insertDAGNode(DAG, N, NewSRLAmt);
1099 insertDAGNode(DAG, N, NewSRL);
1100 insertDAGNode(DAG, N, NewSHLAmt);
1101 insertDAGNode(DAG, N, NewSHL);
1102 DAG.ReplaceAllUsesWith(N, NewSHL);
1104 AM.Scale = 1 << AMShiftAmt;
1105 AM.IndexReg = NewSRL;
1109 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1113 dbgs() << "MatchAddress: ";
1118 return matchAddressBase(N, AM);
1120 // If this is already a %rip relative address, we can only merge immediates
1121 // into it. Instead of handling this in every case, we handle it here.
1122 // RIP relative addressing: %rip + 32-bit displacement!
1123 if (AM.isRIPRelative()) {
1124 // FIXME: JumpTable and ExternalSymbol address currently don't like
1125 // displacements. It isn't very important, but this should be fixed for
1127 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1130 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1131 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
1136 switch (N.getOpcode()) {
1138 case ISD::LOCAL_RECOVER: {
1139 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1140 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1141 // Use the symbol and don't prefix it.
1142 AM.MCSym = ESNode->getMCSymbol();
1147 case ISD::Constant: {
1148 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1149 if (!foldOffsetIntoAddress(Val, AM))
1154 case X86ISD::Wrapper:
1155 case X86ISD::WrapperRIP:
1156 if (!matchWrapper(N, AM))
1161 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
1165 case ISD::FrameIndex:
1166 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1167 AM.Base_Reg.getNode() == nullptr &&
1168 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1169 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1170 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1176 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1180 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
1181 unsigned Val = CN->getZExtValue();
1182 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1183 // that the base operand remains free for further matching. If
1184 // the base doesn't end up getting used, a post-processing step
1185 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1186 if (Val == 1 || Val == 2 || Val == 3) {
1187 AM.Scale = 1 << Val;
1188 SDValue ShVal = N.getNode()->getOperand(0);
1190 // Okay, we know that we have a scale by now. However, if the scaled
1191 // value is an add of something and a constant, we can fold the
1192 // constant into the disp field here.
1193 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1194 AM.IndexReg = ShVal.getNode()->getOperand(0);
1195 ConstantSDNode *AddVal =
1196 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1197 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1198 if (!foldOffsetIntoAddress(Disp, AM))
1202 AM.IndexReg = ShVal;
1209 // Scale must not be used already.
1210 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1212 SDValue And = N.getOperand(0);
1213 if (And.getOpcode() != ISD::AND) break;
1214 SDValue X = And.getOperand(0);
1216 // We only handle up to 64-bit values here as those are what matter for
1217 // addressing mode optimizations.
1218 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1220 // The mask used for the transform is expected to be post-shift, but we
1221 // found the shift first so just apply the shift to the mask before passing
1223 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1224 !isa<ConstantSDNode>(And.getOperand(1)))
1226 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1228 // Try to fold the mask and shift into the scale, and return false if we
1230 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1235 case ISD::SMUL_LOHI:
1236 case ISD::UMUL_LOHI:
1237 // A mul_lohi where we need the low part can be folded as a plain multiply.
1238 if (N.getResNo() != 0) break;
1241 case X86ISD::MUL_IMM:
1242 // X*[3,5,9] -> X+X*[2,4,8]
1243 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1244 AM.Base_Reg.getNode() == nullptr &&
1245 AM.IndexReg.getNode() == nullptr) {
1247 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1248 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1249 CN->getZExtValue() == 9) {
1250 AM.Scale = unsigned(CN->getZExtValue())-1;
1252 SDValue MulVal = N.getNode()->getOperand(0);
1255 // Okay, we know that we have a scale by now. However, if the scaled
1256 // value is an add of something and a constant, we can fold the
1257 // constant into the disp field here.
1258 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1259 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1260 Reg = MulVal.getNode()->getOperand(0);
1261 ConstantSDNode *AddVal =
1262 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1263 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1264 if (foldOffsetIntoAddress(Disp, AM))
1265 Reg = N.getNode()->getOperand(0);
1267 Reg = N.getNode()->getOperand(0);
1270 AM.IndexReg = AM.Base_Reg = Reg;
1277 // Given A-B, if A can be completely folded into the address and
1278 // the index field with the index field unused, use -B as the index.
1279 // This is a win if a has multiple parts that can be folded into
1280 // the address. Also, this saves a mov if the base register has
1281 // other uses, since it avoids a two-address sub instruction, however
1282 // it costs an additional mov if the index register has other uses.
1284 // Add an artificial use to this node so that we can keep track of
1285 // it if it gets CSE'd with a different node.
1286 HandleSDNode Handle(N);
1288 // Test if the LHS of the sub can be folded.
1289 X86ISelAddressMode Backup = AM;
1290 if (matchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1294 // Test if the index field is free for use.
1295 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1301 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1302 // If the RHS involves a register with multiple uses, this
1303 // transformation incurs an extra mov, due to the neg instruction
1304 // clobbering its operand.
1305 if (!RHS.getNode()->hasOneUse() ||
1306 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1307 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1308 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1309 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1310 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1312 // If the base is a register with multiple uses, this
1313 // transformation may save a mov.
1314 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1315 AM.Base_Reg.getNode() &&
1316 !AM.Base_Reg.getNode()->hasOneUse()) ||
1317 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1319 // If the folded LHS was interesting, this transformation saves
1320 // address arithmetic.
1321 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1322 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1323 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1325 // If it doesn't look like it may be an overall win, don't do it.
1331 // Ok, the transformation is legal and appears profitable. Go for it.
1332 SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType());
1333 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1337 // Insert the new nodes into the topological ordering.
1338 insertDAGNode(*CurDAG, N, Zero);
1339 insertDAGNode(*CurDAG, N, Neg);
1344 if (!matchAdd(N, AM, Depth))
1349 // We want to look through a transform in InstCombine and DAGCombiner that
1350 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
1351 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
1352 // An 'lea' can then be used to match the shift (multiply) and add:
1354 // lea (%rsi, %rdi, 8), %rax
1355 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
1356 !matchAdd(N, AM, Depth))
1361 // Perform some heroic transforms on an and of a constant-count shift
1362 // with a constant to enable use of the scaled offset field.
1364 // Scale must not be used already.
1365 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1367 SDValue Shift = N.getOperand(0);
1368 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1369 SDValue X = Shift.getOperand(0);
1371 // We only handle up to 64-bit values here as those are what matter for
1372 // addressing mode optimizations.
1373 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1375 if (!isa<ConstantSDNode>(N.getOperand(1)))
1377 uint64_t Mask = N.getConstantOperandVal(1);
1379 // Try to fold the mask and shift into an extract and scale.
1380 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1383 // Try to fold the mask and shift directly into the scale.
1384 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1387 // Try to swap the mask and shift to place shifts which can be done as
1388 // a scale on the outside of the mask.
1389 if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1395 return matchAddressBase(N, AM);
1398 /// Helper for MatchAddress. Add the specified node to the
1399 /// specified addressing mode without any further recursion.
1400 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1401 // Is the base register already occupied?
1402 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1403 // If so, check to see if the scale index register is set.
1404 if (!AM.IndexReg.getNode()) {
1410 // Otherwise, we cannot select it.
1414 // Default, generate it as a register.
1415 AM.BaseType = X86ISelAddressMode::RegBase;
1420 bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
1421 SDValue &Scale, SDValue &Index,
1422 SDValue &Disp, SDValue &Segment) {
1424 MaskedGatherScatterSDNode *Mgs = dyn_cast<MaskedGatherScatterSDNode>(Parent);
1427 X86ISelAddressMode AM;
1428 unsigned AddrSpace = Mgs->getPointerInfo().getAddrSpace();
1429 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1430 if (AddrSpace == 256)
1431 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1432 if (AddrSpace == 257)
1433 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1434 if (AddrSpace == 258)
1435 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1438 Base = Mgs->getBasePtr();
1439 Index = Mgs->getIndex();
1440 unsigned ScalarSize = Mgs->getValue().getScalarValueSizeInBits();
1441 Scale = getI8Imm(ScalarSize/8, DL);
1443 // If Base is 0, the whole address is in index and the Scale is 1
1444 if (isa<ConstantSDNode>(Base)) {
1445 assert(cast<ConstantSDNode>(Base)->isNullValue() &&
1446 "Unexpected base in gather/scatter");
1447 Scale = getI8Imm(1, DL);
1448 Base = CurDAG->getRegister(0, MVT::i32);
1450 if (AM.Segment.getNode())
1451 Segment = AM.Segment;
1453 Segment = CurDAG->getRegister(0, MVT::i32);
1454 Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1458 /// Returns true if it is able to pattern match an addressing mode.
1459 /// It returns the operands which make up the maximal addressing mode it can
1460 /// match by reference.
1462 /// Parent is the parent node of the addr operand that is being matched. It
1463 /// is always a load, store, atomic node, or null. It is only null when
1464 /// checking memory operands for inline asm nodes.
1465 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1466 SDValue &Scale, SDValue &Index,
1467 SDValue &Disp, SDValue &Segment) {
1468 X86ISelAddressMode AM;
1471 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1472 // that are not a MemSDNode, and thus don't have proper addrspace info.
1473 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1474 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1475 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1476 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1477 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
1478 unsigned AddrSpace =
1479 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1480 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS.
1481 if (AddrSpace == 256)
1482 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1483 if (AddrSpace == 257)
1484 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1485 if (AddrSpace == 258)
1486 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
1489 if (matchAddress(N, AM))
1492 MVT VT = N.getSimpleValueType();
1493 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1494 if (!AM.Base_Reg.getNode())
1495 AM.Base_Reg = CurDAG->getRegister(0, VT);
1498 if (!AM.IndexReg.getNode())
1499 AM.IndexReg = CurDAG->getRegister(0, VT);
1501 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1505 /// Match a scalar SSE load. In particular, we want to match a load whose top
1506 /// elements are either undef or zeros. The load flavor is derived from the
1507 /// type of N, which is either v4f32 or v2f64.
1510 /// PatternChainNode: this is the matched node that has a chain input and
1512 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root,
1513 SDValue N, SDValue &Base,
1514 SDValue &Scale, SDValue &Index,
1515 SDValue &Disp, SDValue &Segment,
1516 SDValue &PatternNodeWithChain) {
1517 // We can allow a full vector load here since narrowing a load is ok.
1518 if (ISD::isNON_EXTLoad(N.getNode())) {
1519 PatternNodeWithChain = N;
1520 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1521 IsLegalToFold(PatternNodeWithChain, *N->use_begin(), Root, OptLevel)) {
1522 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1523 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1528 // We can also match the special zero extended load opcode.
1529 if (N.getOpcode() == X86ISD::VZEXT_LOAD) {
1530 PatternNodeWithChain = N;
1531 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1532 IsLegalToFold(PatternNodeWithChain, *N->use_begin(), Root, OptLevel)) {
1533 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain);
1534 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp,
1539 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used
1540 // once. Otherwise the load might get duplicated and the chain output of the
1541 // duplicate load will not be observed by all dependencies.
1542 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) {
1543 PatternNodeWithChain = N.getOperand(0);
1544 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1545 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1546 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
1547 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1548 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1553 // Also handle the case where we explicitly require zeros in the top
1554 // elements. This is a vector shuffle from the zero vector.
1555 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1556 // Check to see if the top elements are all zeros (or bitcast of zeros).
1557 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1558 N.getOperand(0).getNode()->hasOneUse()) {
1559 PatternNodeWithChain = N.getOperand(0).getOperand(0);
1560 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1561 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
1562 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) {
1563 // Okay, this is a zero extending load. Fold it.
1564 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1565 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
1574 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
1575 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1576 uint64_t ImmVal = CN->getZExtValue();
1577 if ((uint32_t)ImmVal != (uint64_t)ImmVal)
1580 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
1584 // In static codegen with small code model, we can get the address of a label
1585 // into a register with 'movl'. TableGen has already made sure we're looking
1586 // at a label of some kind.
1587 assert(N->getOpcode() == X86ISD::Wrapper &&
1588 "Unexpected node type for MOV32ri64");
1589 N = N.getOperand(0);
1591 // At least GNU as does not accept 'movl' for TPOFF relocations.
1592 // FIXME: We could use 'movl' when we know we are targeting MC.
1593 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
1597 if (N->getOpcode() != ISD::TargetGlobalAddress)
1598 return TM.getCodeModel() == CodeModel::Small;
1600 Optional<ConstantRange> CR =
1601 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
1603 return TM.getCodeModel() == CodeModel::Small;
1605 return CR->getUnsignedMax().ult(1ull << 32);
1608 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
1609 SDValue &Scale, SDValue &Index,
1610 SDValue &Disp, SDValue &Segment) {
1611 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
1614 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
1617 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
1618 if (RN && RN->getReg() == 0)
1619 Base = CurDAG->getRegister(0, MVT::i64);
1620 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) {
1621 // Base could already be %rip, particularly in the x32 ABI.
1622 Base = SDValue(CurDAG->getMachineNode(
1623 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1624 CurDAG->getTargetConstant(0, DL, MVT::i64),
1626 CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
1630 RN = dyn_cast<RegisterSDNode>(Index);
1631 if (RN && RN->getReg() == 0)
1632 Index = CurDAG->getRegister(0, MVT::i64);
1634 assert(Index.getValueType() == MVT::i32 &&
1635 "Expect to be extending 32-bit registers for use in LEA");
1636 Index = SDValue(CurDAG->getMachineNode(
1637 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1638 CurDAG->getTargetConstant(0, DL, MVT::i64),
1640 CurDAG->getTargetConstant(X86::sub_32bit, DL,
1648 /// Calls SelectAddr and determines if the maximal addressing
1649 /// mode it matches can be cost effectively emitted as an LEA instruction.
1650 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
1651 SDValue &Base, SDValue &Scale,
1652 SDValue &Index, SDValue &Disp,
1654 X86ISelAddressMode AM;
1656 // Save the DL and VT before calling matchAddress, it can invalidate N.
1658 MVT VT = N.getSimpleValueType();
1660 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1662 SDValue Copy = AM.Segment;
1663 SDValue T = CurDAG->getRegister(0, MVT::i32);
1665 if (matchAddress(N, AM))
1667 assert (T == AM.Segment);
1670 unsigned Complexity = 0;
1671 if (AM.BaseType == X86ISelAddressMode::RegBase)
1672 if (AM.Base_Reg.getNode())
1675 AM.Base_Reg = CurDAG->getRegister(0, VT);
1676 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1679 if (AM.IndexReg.getNode())
1682 AM.IndexReg = CurDAG->getRegister(0, VT);
1684 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1689 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1690 // to a LEA. This is determined with some experimentation but is by no means
1691 // optimal (especially for code size consideration). LEA is nice because of
1692 // its three-address nature. Tweak the cost function again when we can run
1693 // convertToThreeAddress() at register allocation time.
1694 if (AM.hasSymbolicDisplacement()) {
1695 // For X86-64, always use LEA to materialize RIP-relative addresses.
1696 if (Subtarget->is64Bit())
1702 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1705 // If it isn't worth using an LEA, reject it.
1706 if (Complexity <= 2)
1709 getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment);
1713 /// This is only run on TargetGlobalTLSAddress nodes.
1714 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
1715 SDValue &Scale, SDValue &Index,
1716 SDValue &Disp, SDValue &Segment) {
1717 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1718 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1720 X86ISelAddressMode AM;
1721 AM.GV = GA->getGlobal();
1722 AM.Disp += GA->getOffset();
1723 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1724 AM.SymbolFlags = GA->getTargetFlags();
1726 if (N.getValueType() == MVT::i32) {
1728 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1730 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1733 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1737 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
1738 if (auto *CN = dyn_cast<ConstantSDNode>(N)) {
1739 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN),
1744 // Keep track of the original value type and whether this value was
1745 // truncated. If we see a truncation from pointer type to VT that truncates
1746 // bits that are known to be zero, we can use a narrow reference.
1747 EVT VT = N.getValueType();
1748 bool WasTruncated = false;
1749 if (N.getOpcode() == ISD::TRUNCATE) {
1750 WasTruncated = true;
1751 N = N.getOperand(0);
1754 if (N.getOpcode() != X86ISD::Wrapper)
1757 // We can only use non-GlobalValues as immediates if they were not truncated,
1758 // as we do not have any range information. If we have a GlobalValue and the
1759 // address was not truncated, we can select it as an operand directly.
1760 unsigned Opc = N.getOperand(0)->getOpcode();
1761 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
1762 Op = N.getOperand(0);
1763 // We can only select the operand directly if we didn't have to look past a
1765 return !WasTruncated;
1768 // Check that the global's range fits into VT.
1769 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
1770 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
1771 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
1774 // Okay, we can use a narrow reference.
1775 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
1776 GA->getOffset(), GA->getTargetFlags());
1780 bool X86DAGToDAGISel::tryFoldLoad(SDNode *P, SDValue N,
1781 SDValue &Base, SDValue &Scale,
1782 SDValue &Index, SDValue &Disp,
1784 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1785 !IsProfitableToFold(N, P, P) ||
1786 !IsLegalToFold(N, P, P, OptLevel))
1789 return selectAddr(N.getNode(),
1790 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1793 /// Return an SDNode that returns the value of the global base register.
1794 /// Output instructions required to initialize the global base register,
1796 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1797 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1798 auto &DL = MF->getDataLayout();
1799 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
1802 /// Test whether the given X86ISD::CMP node has any uses which require the SF
1803 /// or OF bits to be accurate.
1804 static bool hasNoSignedComparisonUses(SDNode *N) {
1805 // Examine each user of the node.
1806 for (SDNode::use_iterator UI = N->use_begin(),
1807 UE = N->use_end(); UI != UE; ++UI) {
1808 // Only examine CopyToReg uses.
1809 if (UI->getOpcode() != ISD::CopyToReg)
1811 // Only examine CopyToReg uses that copy to EFLAGS.
1812 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1815 // Examine each user of the CopyToReg use.
1816 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1817 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1818 // Only examine the Flag result.
1819 if (FlagUI.getUse().getResNo() != 1) continue;
1820 // Anything unusual: assume conservatively.
1821 if (!FlagUI->isMachineOpcode()) return false;
1822 // Examine the opcode of the user.
1823 switch (FlagUI->getMachineOpcode()) {
1824 // These comparisons don't treat the most significant bit specially.
1825 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1826 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1827 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1828 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1829 case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
1830 case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
1831 case X86::CMOVA16rr: case X86::CMOVA16rm:
1832 case X86::CMOVA32rr: case X86::CMOVA32rm:
1833 case X86::CMOVA64rr: case X86::CMOVA64rm:
1834 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1835 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1836 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1837 case X86::CMOVB16rr: case X86::CMOVB16rm:
1838 case X86::CMOVB32rr: case X86::CMOVB32rm:
1839 case X86::CMOVB64rr: case X86::CMOVB64rm:
1840 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1841 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1842 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1843 case X86::CMOVE16rr: case X86::CMOVE16rm:
1844 case X86::CMOVE32rr: case X86::CMOVE32rm:
1845 case X86::CMOVE64rr: case X86::CMOVE64rm:
1846 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1847 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1848 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1849 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1850 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1851 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1852 case X86::CMOVP16rr: case X86::CMOVP16rm:
1853 case X86::CMOVP32rr: case X86::CMOVP32rm:
1854 case X86::CMOVP64rr: case X86::CMOVP64rm:
1856 // Anything else: assume conservatively.
1857 default: return false;
1864 /// Check whether or not the chain ending in StoreNode is suitable for doing
1865 /// the {load; increment or decrement; store} to modify transformation.
1866 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
1867 SDValue StoredVal, SelectionDAG *CurDAG,
1868 LoadSDNode* &LoadNode, SDValue &InputChain) {
1870 // is the value stored the result of a DEC or INC?
1871 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1873 // is the stored value result 0 of the load?
1874 if (StoredVal.getResNo() != 0) return false;
1876 // are there other uses of the loaded value than the inc or dec?
1877 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1879 // is the store non-extending and non-indexed?
1880 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1883 SDValue Load = StoredVal->getOperand(0);
1884 // Is the stored value a non-extending and non-indexed load?
1885 if (!ISD::isNormalLoad(Load.getNode())) return false;
1887 // Return LoadNode by reference.
1888 LoadNode = cast<LoadSDNode>(Load);
1889 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
1890 EVT LdVT = LoadNode->getMemoryVT();
1891 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
1895 // Is store the only read of the loaded value?
1896 if (!Load.hasOneUse())
1899 // Is the address of the store the same as the load?
1900 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1901 LoadNode->getOffset() != StoreNode->getOffset())
1904 // Check if the chain is produced by the load or is a TokenFactor with
1905 // the load output chain as an operand. Return InputChain by reference.
1906 SDValue Chain = StoreNode->getChain();
1908 bool ChainCheck = false;
1909 if (Chain == Load.getValue(1)) {
1911 InputChain = LoadNode->getChain();
1912 } else if (Chain.getOpcode() == ISD::TokenFactor) {
1913 SmallVector<SDValue, 4> ChainOps;
1914 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1915 SDValue Op = Chain.getOperand(i);
1916 if (Op == Load.getValue(1)) {
1921 // Make sure using Op as part of the chain would not cause a cycle here.
1922 // In theory, we could check whether the chain node is a predecessor of
1923 // the load. But that can be very expensive. Instead visit the uses and
1924 // make sure they all have smaller node id than the load.
1925 int LoadId = LoadNode->getNodeId();
1926 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1927 UE = UI->use_end(); UI != UE; ++UI) {
1928 if (UI.getUse().getResNo() != 0)
1930 if (UI->getNodeId() > LoadId)
1934 ChainOps.push_back(Op);
1938 // Make a new TokenFactor with all the other input chains except
1940 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
1941 MVT::Other, ChainOps);
1949 /// Get the appropriate X86 opcode for an in-memory increment or decrement.
1950 /// Opc should be X86ISD::DEC or X86ISD::INC.
1951 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
1952 if (Opc == X86ISD::DEC) {
1953 if (LdVT == MVT::i64) return X86::DEC64m;
1954 if (LdVT == MVT::i32) return X86::DEC32m;
1955 if (LdVT == MVT::i16) return X86::DEC16m;
1956 if (LdVT == MVT::i8) return X86::DEC8m;
1958 assert(Opc == X86ISD::INC && "unrecognized opcode");
1959 if (LdVT == MVT::i64) return X86::INC64m;
1960 if (LdVT == MVT::i32) return X86::INC32m;
1961 if (LdVT == MVT::i16) return X86::INC16m;
1962 if (LdVT == MVT::i8) return X86::INC8m;
1964 llvm_unreachable("unrecognized size for LdVT");
1967 /// Customized ISel for GATHER operations.
1968 bool X86DAGToDAGISel::tryGather(SDNode *Node, unsigned Opc) {
1969 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
1970 SDValue Chain = Node->getOperand(0);
1971 SDValue VSrc = Node->getOperand(2);
1972 SDValue Base = Node->getOperand(3);
1973 SDValue VIdx = Node->getOperand(4);
1974 SDValue VMask = Node->getOperand(5);
1975 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
1979 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
1984 // Memory Operands: Base, Scale, Index, Disp, Segment
1985 SDValue Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1986 SDValue Segment = CurDAG->getRegister(0, MVT::i32);
1987 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue(), DL), VIdx,
1988 Disp, Segment, VMask, Chain};
1989 SDNode *ResNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
1990 // Node has 2 outputs: VDst and MVT::Other.
1991 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
1992 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
1994 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1995 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
1996 CurDAG->RemoveDeadNode(Node);
2000 void X86DAGToDAGISel::Select(SDNode *Node) {
2001 MVT NVT = Node->getSimpleValueType(0);
2003 unsigned Opcode = Node->getOpcode();
2006 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
2008 if (Node->isMachineOpcode()) {
2009 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
2010 Node->setNodeId(-1);
2011 return; // Already selected.
2017 if (Subtarget->isTargetNaCl())
2018 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
2019 // leave the instruction alone.
2021 if (Subtarget->isTarget64BitILP32()) {
2022 // Converts a 32-bit register to a 64-bit, zero-extended version of
2023 // it. This is needed because x86-64 can do many things, but jmp %r32
2024 // ain't one of them.
2025 const SDValue &Target = Node->getOperand(1);
2026 assert(Target.getSimpleValueType() == llvm::MVT::i32);
2027 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64));
2028 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
2029 Node->getOperand(0), ZextTarget);
2030 ReplaceNode(Node, Brind.getNode());
2031 SelectCode(ZextTarget.getNode());
2032 SelectCode(Brind.getNode());
2037 case ISD::INTRINSIC_W_CHAIN: {
2038 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2041 case Intrinsic::x86_avx2_gather_d_pd:
2042 case Intrinsic::x86_avx2_gather_d_pd_256:
2043 case Intrinsic::x86_avx2_gather_q_pd:
2044 case Intrinsic::x86_avx2_gather_q_pd_256:
2045 case Intrinsic::x86_avx2_gather_d_ps:
2046 case Intrinsic::x86_avx2_gather_d_ps_256:
2047 case Intrinsic::x86_avx2_gather_q_ps:
2048 case Intrinsic::x86_avx2_gather_q_ps_256:
2049 case Intrinsic::x86_avx2_gather_d_q:
2050 case Intrinsic::x86_avx2_gather_d_q_256:
2051 case Intrinsic::x86_avx2_gather_q_q:
2052 case Intrinsic::x86_avx2_gather_q_q_256:
2053 case Intrinsic::x86_avx2_gather_d_d:
2054 case Intrinsic::x86_avx2_gather_d_d_256:
2055 case Intrinsic::x86_avx2_gather_q_d:
2056 case Intrinsic::x86_avx2_gather_q_d_256: {
2057 if (!Subtarget->hasAVX2())
2061 default: llvm_unreachable("Impossible intrinsic");
2062 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break;
2063 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
2064 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break;
2065 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
2066 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break;
2067 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
2068 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break;
2069 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2070 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break;
2071 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break;
2072 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break;
2073 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break;
2074 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break;
2075 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break;
2076 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
2077 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
2079 if (tryGather(Node, Opc))
2086 case X86ISD::GlobalBaseReg:
2087 ReplaceNode(Node, getGlobalBaseReg());
2090 case X86ISD::SHRUNKBLEND: {
2091 // SHRUNKBLEND selects like a regular VSELECT.
2092 SDValue VSelect = CurDAG->getNode(
2093 ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
2094 Node->getOperand(1), Node->getOperand(2));
2095 ReplaceUses(SDValue(Node, 0), VSelect);
2096 SelectCode(VSelect.getNode());
2097 // We already called ReplaceUses.
2104 // For operations of the form (x << C1) op C2, check if we can use a smaller
2105 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2106 SDValue N0 = Node->getOperand(0);
2107 SDValue N1 = Node->getOperand(1);
2109 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2112 // i8 is unshrinkable, i16 should be promoted to i32.
2113 if (NVT != MVT::i32 && NVT != MVT::i64)
2116 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2117 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2118 if (!Cst || !ShlCst)
2121 int64_t Val = Cst->getSExtValue();
2122 uint64_t ShlVal = ShlCst->getZExtValue();
2124 // Make sure that we don't change the operation by removing bits.
2125 // This only matters for OR and XOR, AND is unaffected.
2126 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2127 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2130 unsigned ShlOp, AddOp, Op;
2133 // Check the minimum bitwidth for the new constant.
2134 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2135 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2136 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2137 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2139 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2142 // Bail if there is no smaller encoding.
2146 switch (NVT.SimpleTy) {
2147 default: llvm_unreachable("Unsupported VT!");
2149 assert(CstVT == MVT::i8);
2150 ShlOp = X86::SHL32ri;
2151 AddOp = X86::ADD32rr;
2154 default: llvm_unreachable("Impossible opcode");
2155 case ISD::AND: Op = X86::AND32ri8; break;
2156 case ISD::OR: Op = X86::OR32ri8; break;
2157 case ISD::XOR: Op = X86::XOR32ri8; break;
2161 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2162 ShlOp = X86::SHL64ri;
2163 AddOp = X86::ADD64rr;
2166 default: llvm_unreachable("Impossible opcode");
2167 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2168 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2169 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2174 // Emit the smaller op and the shift.
2175 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
2176 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2178 CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
2181 CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2182 getI8Imm(ShlVal, dl));
2186 case X86ISD::SMUL8: {
2187 SDValue N0 = Node->getOperand(0);
2188 SDValue N1 = Node->getOperand(1);
2190 Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r);
2192 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL,
2193 N0, SDValue()).getValue(1);
2195 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32);
2196 SDValue Ops[] = {N1, InFlag};
2197 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2199 ReplaceNode(Node, CNode);
2203 case X86ISD::UMUL: {
2204 SDValue N0 = Node->getOperand(0);
2205 SDValue N1 = Node->getOperand(1);
2208 switch (NVT.SimpleTy) {
2209 default: llvm_unreachable("Unsupported VT!");
2210 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2211 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2212 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2213 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2216 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2217 N0, SDValue()).getValue(1);
2219 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2220 SDValue Ops[] = {N1, InFlag};
2221 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2223 ReplaceNode(Node, CNode);
2227 case ISD::SMUL_LOHI:
2228 case ISD::UMUL_LOHI: {
2229 SDValue N0 = Node->getOperand(0);
2230 SDValue N1 = Node->getOperand(1);
2232 bool isSigned = Opcode == ISD::SMUL_LOHI;
2233 bool hasBMI2 = Subtarget->hasBMI2();
2235 switch (NVT.SimpleTy) {
2236 default: llvm_unreachable("Unsupported VT!");
2237 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2238 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2239 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2240 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2241 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2242 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2245 switch (NVT.SimpleTy) {
2246 default: llvm_unreachable("Unsupported VT!");
2247 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2248 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2249 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2250 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2254 unsigned SrcReg, LoReg, HiReg;
2256 default: llvm_unreachable("Unknown MUL opcode!");
2259 SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2263 SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2267 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2271 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2274 SrcReg = X86::EDX; LoReg = HiReg = 0;
2277 SrcReg = X86::RDX; LoReg = HiReg = 0;
2281 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2282 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2283 // Multiply is commmutative.
2285 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2290 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
2291 N0, SDValue()).getValue(1);
2292 SDValue ResHi, ResLo;
2296 MachineSDNode *CNode = nullptr;
2297 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2299 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2300 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2301 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2302 ResHi = SDValue(CNode, 0);
2303 ResLo = SDValue(CNode, 1);
2304 Chain = SDValue(CNode, 2);
2305 InFlag = SDValue(CNode, 3);
2307 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2308 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2309 Chain = SDValue(CNode, 0);
2310 InFlag = SDValue(CNode, 1);
2313 // Update the chain.
2314 ReplaceUses(N1.getValue(1), Chain);
2315 // Record the mem-refs
2316 LoadSDNode *LoadNode = cast<LoadSDNode>(N1);
2318 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2319 MemOp[0] = LoadNode->getMemOperand();
2320 CNode->setMemRefs(MemOp, MemOp + 1);
2323 SDValue Ops[] = { N1, InFlag };
2324 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2325 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2326 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2327 ResHi = SDValue(CNode, 0);
2328 ResLo = SDValue(CNode, 1);
2329 InFlag = SDValue(CNode, 2);
2331 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2332 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2333 InFlag = SDValue(CNode, 0);
2337 // Prevent use of AH in a REX instruction by referencing AX instead.
2338 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2339 !SDValue(Node, 1).use_empty()) {
2340 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2341 X86::AX, MVT::i16, InFlag);
2342 InFlag = Result.getValue(2);
2343 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2345 if (!SDValue(Node, 0).use_empty())
2346 ReplaceUses(SDValue(Node, 1),
2347 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2349 // Shift AX down 8 bits.
2350 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2352 CurDAG->getTargetConstant(8, dl, MVT::i8)),
2354 // Then truncate it down to i8.
2355 ReplaceUses(SDValue(Node, 1),
2356 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2358 // Copy the low half of the result, if it is needed.
2359 if (!SDValue(Node, 0).use_empty()) {
2360 if (!ResLo.getNode()) {
2361 assert(LoReg && "Register for low half is not defined!");
2362 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2364 InFlag = ResLo.getValue(2);
2366 ReplaceUses(SDValue(Node, 0), ResLo);
2367 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
2369 // Copy the high half of the result, if it is needed.
2370 if (!SDValue(Node, 1).use_empty()) {
2371 if (!ResHi.getNode()) {
2372 assert(HiReg && "Register for high half is not defined!");
2373 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2375 InFlag = ResHi.getValue(2);
2377 ReplaceUses(SDValue(Node, 1), ResHi);
2378 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
2386 case X86ISD::SDIVREM8_SEXT_HREG:
2387 case X86ISD::UDIVREM8_ZEXT_HREG: {
2388 SDValue N0 = Node->getOperand(0);
2389 SDValue N1 = Node->getOperand(1);
2391 bool isSigned = (Opcode == ISD::SDIVREM ||
2392 Opcode == X86ISD::SDIVREM8_SEXT_HREG);
2394 switch (NVT.SimpleTy) {
2395 default: llvm_unreachable("Unsupported VT!");
2396 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2397 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2398 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2399 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2402 switch (NVT.SimpleTy) {
2403 default: llvm_unreachable("Unsupported VT!");
2404 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2405 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2406 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2407 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2411 unsigned LoReg, HiReg, ClrReg;
2412 unsigned SExtOpcode;
2413 switch (NVT.SimpleTy) {
2414 default: llvm_unreachable("Unsupported VT!");
2416 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2417 SExtOpcode = X86::CBW;
2420 LoReg = X86::AX; HiReg = X86::DX;
2422 SExtOpcode = X86::CWD;
2425 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2426 SExtOpcode = X86::CDQ;
2429 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2430 SExtOpcode = X86::CQO;
2434 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2435 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2436 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2439 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2440 // Special case for div8, just use a move with zero extension to AX to
2441 // clear the upper 8 bits (AH).
2442 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2443 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2444 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2446 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2447 MVT::Other, Ops), 0);
2448 Chain = Move.getValue(1);
2449 ReplaceUses(N0.getValue(1), Chain);
2452 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2453 Chain = CurDAG->getEntryNode();
2455 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2456 InFlag = Chain.getValue(1);
2459 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2460 LoReg, N0, SDValue()).getValue(1);
2461 if (isSigned && !signBitIsZero) {
2462 // Sign extend the low part into the high part.
2464 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2466 // Zero out the high part, effectively zero extending the input.
2467 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
2468 switch (NVT.SimpleTy) {
2471 SDValue(CurDAG->getMachineNode(
2472 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
2473 CurDAG->getTargetConstant(X86::sub_16bit, dl,
2481 SDValue(CurDAG->getMachineNode(
2482 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2483 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
2484 CurDAG->getTargetConstant(X86::sub_32bit, dl,
2489 llvm_unreachable("Unexpected division source");
2492 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2493 ClrNode, InFlag).getValue(1);
2498 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2501 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
2502 InFlag = SDValue(CNode, 1);
2503 // Update the chain.
2504 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2507 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2510 // Prevent use of AH in a REX instruction by explicitly copying it to
2511 // an ABCD_L register.
2513 // The current assumption of the register allocator is that isel
2514 // won't generate explicit references to the GR8_ABCD_H registers. If
2515 // the allocator and/or the backend get enhanced to be more robust in
2516 // that regard, this can be, and should be, removed.
2517 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
2518 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
2519 unsigned AHExtOpcode =
2520 isSigned ? X86::MOVSX32_NOREXrr8 : X86::MOVZX32_NOREXrr8;
2522 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
2523 MVT::Glue, AHCopy, InFlag);
2524 SDValue Result(RNode, 0);
2525 InFlag = SDValue(RNode, 1);
2527 if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG ||
2528 Opcode == X86ISD::SDIVREM8_SEXT_HREG) {
2529 if (Node->getValueType(1) == MVT::i64) {
2530 // It's not possible to directly movsx AH to a 64bit register, because
2531 // the latter needs the REX prefix, but the former can't have it.
2532 assert(Opcode != X86ISD::SDIVREM8_SEXT_HREG &&
2533 "Unexpected i64 sext of h-register");
2535 SDValue(CurDAG->getMachineNode(
2536 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2537 CurDAG->getTargetConstant(0, dl, MVT::i64), Result,
2538 CurDAG->getTargetConstant(X86::sub_32bit, dl,
2544 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
2546 ReplaceUses(SDValue(Node, 1), Result);
2547 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2549 // Copy the division (low) result, if it is needed.
2550 if (!SDValue(Node, 0).use_empty()) {
2551 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2552 LoReg, NVT, InFlag);
2553 InFlag = Result.getValue(2);
2554 ReplaceUses(SDValue(Node, 0), Result);
2555 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2557 // Copy the remainder (high) result, if it is needed.
2558 if (!SDValue(Node, 1).use_empty()) {
2559 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2560 HiReg, NVT, InFlag);
2561 InFlag = Result.getValue(2);
2562 ReplaceUses(SDValue(Node, 1), Result);
2563 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2570 // Sometimes a SUB is used to perform comparison.
2571 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2572 // This node is not a CMP.
2574 SDValue N0 = Node->getOperand(0);
2575 SDValue N1 = Node->getOperand(1);
2577 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2578 hasNoSignedComparisonUses(Node))
2579 N0 = N0.getOperand(0);
2581 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2582 // use a smaller encoding.
2583 // Look past the truncate if CMP is the only use of it.
2584 if ((N0.getNode()->getOpcode() == ISD::AND ||
2585 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2586 N0.getNode()->hasOneUse() &&
2587 N0.getValueType() != MVT::i8 &&
2588 X86::isZeroNode(N1)) {
2589 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2592 // For example, convert "testl %eax, $8" to "testb %al, $8"
2593 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2594 (!(C->getZExtValue() & 0x80) ||
2595 hasNoSignedComparisonUses(Node))) {
2596 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl, MVT::i8);
2597 SDValue Reg = N0.getNode()->getOperand(0);
2599 // On x86-32, only the ABCD registers have 8-bit subregisters.
2600 if (!Subtarget->is64Bit()) {
2601 const TargetRegisterClass *TRC;
2602 switch (N0.getSimpleValueType().SimpleTy) {
2603 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2604 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2605 default: llvm_unreachable("Unsupported TEST operand type!");
2607 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
2608 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2609 Reg.getValueType(), Reg, RC), 0);
2612 // Extract the l-register.
2613 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2617 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2619 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2620 // one, do not call ReplaceAllUsesWith.
2621 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2622 SDValue(NewNode, 0));
2626 // For example, "testl %eax, $2048" to "testb %ah, $8".
2627 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2628 (!(C->getZExtValue() & 0x8000) ||
2629 hasNoSignedComparisonUses(Node))) {
2630 // Shift the immediate right by 8 bits.
2631 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2633 SDValue Reg = N0.getNode()->getOperand(0);
2635 // Put the value in an ABCD register.
2636 const TargetRegisterClass *TRC;
2637 switch (N0.getSimpleValueType().SimpleTy) {
2638 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2639 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2640 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2641 default: llvm_unreachable("Unsupported TEST operand type!");
2643 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
2644 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2645 Reg.getValueType(), Reg, RC), 0);
2647 // Extract the h-register.
2648 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2651 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2652 // target GR8_NOREX registers, so make sure the register class is
2654 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
2655 MVT::i32, Subreg, ShiftedImm);
2656 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2657 // one, do not call ReplaceAllUsesWith.
2658 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2659 SDValue(NewNode, 0));
2663 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2664 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2665 N0.getValueType() != MVT::i16 &&
2666 (!(C->getZExtValue() & 0x8000) ||
2667 hasNoSignedComparisonUses(Node))) {
2668 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
2670 SDValue Reg = N0.getNode()->getOperand(0);
2672 // Extract the 16-bit subregister.
2673 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2677 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
2679 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2680 // one, do not call ReplaceAllUsesWith.
2681 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2682 SDValue(NewNode, 0));
2686 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2687 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2688 N0.getValueType() == MVT::i64 &&
2689 (!(C->getZExtValue() & 0x80000000) ||
2690 hasNoSignedComparisonUses(Node))) {
2691 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
2693 SDValue Reg = N0.getNode()->getOperand(0);
2695 // Extract the 32-bit subregister.
2696 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2700 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
2702 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2703 // one, do not call ReplaceAllUsesWith.
2704 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2705 SDValue(NewNode, 0));
2712 // Change a chain of {load; incr or dec; store} of the same value into
2713 // a simple increment or decrement through memory of that value, if the
2714 // uses of the modified value and its address are suitable.
2715 // The DEC64m tablegen pattern is currently not able to match the case where
2716 // the EFLAGS on the original DEC are used. (This also applies to
2717 // {INC,DEC}X{64,32,16,8}.)
2718 // We'll need to improve tablegen to allow flags to be transferred from a
2719 // node in the pattern to the result node. probably with a new keyword
2720 // for example, we have this
2721 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2722 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2723 // (implicit EFLAGS)]>;
2724 // but maybe need something like this
2725 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2726 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2727 // (transferrable EFLAGS)]>;
2729 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2730 SDValue StoredVal = StoreNode->getOperand(1);
2731 unsigned Opc = StoredVal->getOpcode();
2733 LoadSDNode *LoadNode = nullptr;
2735 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2736 LoadNode, InputChain))
2739 SDValue Base, Scale, Index, Disp, Segment;
2740 if (!selectAddr(LoadNode, LoadNode->getBasePtr(),
2741 Base, Scale, Index, Disp, Segment))
2744 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2745 MemOp[0] = StoreNode->getMemOperand();
2746 MemOp[1] = LoadNode->getMemOperand();
2747 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2748 EVT LdVT = LoadNode->getMemoryVT();
2749 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2750 MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
2752 MVT::i32, MVT::Other, Ops);
2753 Result->setMemRefs(MemOp, MemOp + 2);
2755 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2756 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2757 CurDAG->RemoveDeadNode(Node);
2765 bool X86DAGToDAGISel::
2766 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
2767 std::vector<SDValue> &OutOps) {
2768 SDValue Op0, Op1, Op2, Op3, Op4;
2769 switch (ConstraintID) {
2771 llvm_unreachable("Unexpected asm memory constraint");
2772 case InlineAsm::Constraint_i:
2773 // FIXME: It seems strange that 'i' is needed here since it's supposed to
2774 // be an immediate and not a memory constraint.
2776 case InlineAsm::Constraint_o: // offsetable ??
2777 case InlineAsm::Constraint_v: // not offsetable ??
2778 case InlineAsm::Constraint_m: // memory
2779 case InlineAsm::Constraint_X:
2780 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
2785 OutOps.push_back(Op0);
2786 OutOps.push_back(Op1);
2787 OutOps.push_back(Op2);
2788 OutOps.push_back(Op3);
2789 OutOps.push_back(Op4);
2793 /// This pass converts a legalized DAG into a X86-specific DAG,
2794 /// ready for instruction scheduling.
2795 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2796 CodeGenOpt::Level OptLevel) {
2797 return new X86DAGToDAGISel(TM, OptLevel);