1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/ManagedStatic.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Mutex.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Target/TargetInstrInfo.h"
44 #include "llvm/Target/TargetIntrinsicInfo.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetOptions.h"
48 #include "llvm/Target/TargetRegisterInfo.h"
49 #include "llvm/Target/TargetSubtargetInfo.h"
56 /// makeVTList - Return an instance of the SDVTList struct initialized with the
57 /// specified members.
58 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
59 SDVTList Res = {VTs, NumVTs};
63 // Default null implementations of the callbacks.
64 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
65 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
67 //===----------------------------------------------------------------------===//
68 // ConstantFPSDNode Class
69 //===----------------------------------------------------------------------===//
71 /// isExactlyValue - We don't rely on operator== working on double values, as
72 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
73 /// As such, this method can be used to do an exact bit-for-bit comparison of
74 /// two floating point values.
75 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
76 return getValueAPF().bitwiseIsEqual(V);
79 bool ConstantFPSDNode::isValueValidForType(EVT VT,
81 assert(VT.isFloatingPoint() && "Can only convert between FP types");
83 // convert modifies in place, so make a copy.
84 APFloat Val2 = APFloat(Val);
86 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
87 APFloat::rmNearestTiesToEven,
92 //===----------------------------------------------------------------------===//
94 //===----------------------------------------------------------------------===//
96 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
97 auto *BV = dyn_cast<BuildVectorSDNode>(N);
102 unsigned SplatBitSize;
104 EVT EltVT = N->getValueType(0).getVectorElementType();
105 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs) &&
106 EltVT.getSizeInBits() >= SplatBitSize;
109 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
110 // specializations of the more general isConstantSplatVector()?
112 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
113 // Look through a bit convert.
114 while (N->getOpcode() == ISD::BITCAST)
115 N = N->getOperand(0).getNode();
117 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
119 unsigned i = 0, e = N->getNumOperands();
121 // Skip over all of the undef values.
122 while (i != e && N->getOperand(i).isUndef())
125 // Do not accept an all-undef vector.
126 if (i == e) return false;
128 // Do not accept build_vectors that aren't all constants or which have non-~0
129 // elements. We have to be a bit careful here, as the type of the constant
130 // may not be the same as the type of the vector elements due to type
131 // legalization (the elements are promoted to a legal type for the target and
132 // a vector of a type may be legal when the base element type is not).
133 // We only want to check enough bits to cover the vector elements, because
134 // we care if the resultant vector is all ones, not whether the individual
136 SDValue NotZero = N->getOperand(i);
137 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
138 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
139 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
141 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
142 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
147 // Okay, we have at least one ~0 value, check to see if the rest match or are
148 // undefs. Even with the above element type twiddling, this should be OK, as
149 // the same type legalization should have applied to all the elements.
150 for (++i; i != e; ++i)
151 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
156 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
157 // Look through a bit convert.
158 while (N->getOpcode() == ISD::BITCAST)
159 N = N->getOperand(0).getNode();
161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
163 bool IsAllUndef = true;
164 for (const SDValue &Op : N->op_values()) {
168 // Do not accept build_vectors that aren't all constants or which have non-0
169 // elements. We have to be a bit careful here, as the type of the constant
170 // may not be the same as the type of the vector elements due to type
171 // legalization (the elements are promoted to a legal type for the target
172 // and a vector of a type may be legal when the base element type is not).
173 // We only want to check enough bits to cover the vector elements, because
174 // we care if the resultant vector is all zeros, not whether the individual
176 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
177 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
178 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
180 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
181 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
187 // Do not accept an all-undef vector.
193 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
194 if (N->getOpcode() != ISD::BUILD_VECTOR)
197 for (const SDValue &Op : N->op_values()) {
200 if (!isa<ConstantSDNode>(Op))
206 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
207 if (N->getOpcode() != ISD::BUILD_VECTOR)
210 for (const SDValue &Op : N->op_values()) {
213 if (!isa<ConstantFPSDNode>(Op))
219 bool ISD::allOperandsUndef(const SDNode *N) {
220 // Return false if the node has no operands.
221 // This is "logically inconsistent" with the definition of "all" but
222 // is probably the desired behavior.
223 if (N->getNumOperands() == 0)
226 for (const SDValue &Op : N->op_values())
233 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
236 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
238 return ISD::SIGN_EXTEND;
240 return ISD::ZERO_EXTEND;
245 llvm_unreachable("Invalid LoadExtType");
248 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
249 // To perform this operation, we just need to swap the L and G bits of the
251 unsigned OldL = (Operation >> 2) & 1;
252 unsigned OldG = (Operation >> 1) & 1;
253 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
254 (OldL << 1) | // New G bit
255 (OldG << 2)); // New L bit.
258 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
259 unsigned Operation = Op;
261 Operation ^= 7; // Flip L, G, E bits, but not U.
263 Operation ^= 15; // Flip all of the condition bits.
265 if (Operation > ISD::SETTRUE2)
266 Operation &= ~8; // Don't let N and U bits get set.
268 return ISD::CondCode(Operation);
272 /// For an integer comparison, return 1 if the comparison is a signed operation
273 /// and 2 if the result is an unsigned comparison. Return zero if the operation
274 /// does not depend on the sign of the input (setne and seteq).
275 static int isSignedOp(ISD::CondCode Opcode) {
277 default: llvm_unreachable("Illegal integer setcc operation!");
279 case ISD::SETNE: return 0;
283 case ISD::SETGE: return 1;
287 case ISD::SETUGE: return 2;
291 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
293 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
294 // Cannot fold a signed integer setcc with an unsigned integer setcc.
295 return ISD::SETCC_INVALID;
297 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
299 // If the N and U bits get set then the resultant comparison DOES suddenly
300 // care about orderedness, and is true when ordered.
301 if (Op > ISD::SETTRUE2)
302 Op &= ~16; // Clear the U bit if the N bit is set.
304 // Canonicalize illegal integer setcc's.
305 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
308 return ISD::CondCode(Op);
311 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
313 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
314 // Cannot fold a signed setcc with an unsigned setcc.
315 return ISD::SETCC_INVALID;
317 // Combine all of the condition bits.
318 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
320 // Canonicalize illegal integer setcc's.
324 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
325 case ISD::SETOEQ: // SETEQ & SETU[LG]E
326 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
327 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
328 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
335 //===----------------------------------------------------------------------===//
336 // SDNode Profile Support
337 //===----------------------------------------------------------------------===//
339 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
341 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
345 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
346 /// solely with their pointer.
347 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
348 ID.AddPointer(VTList.VTs);
351 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
353 static void AddNodeIDOperands(FoldingSetNodeID &ID,
354 ArrayRef<SDValue> Ops) {
355 for (auto& Op : Ops) {
356 ID.AddPointer(Op.getNode());
357 ID.AddInteger(Op.getResNo());
361 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
363 static void AddNodeIDOperands(FoldingSetNodeID &ID,
364 ArrayRef<SDUse> Ops) {
365 for (auto& Op : Ops) {
366 ID.AddPointer(Op.getNode());
367 ID.AddInteger(Op.getResNo());
371 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
372 SDVTList VTList, ArrayRef<SDValue> OpList) {
373 AddNodeIDOpcode(ID, OpC);
374 AddNodeIDValueTypes(ID, VTList);
375 AddNodeIDOperands(ID, OpList);
378 /// If this is an SDNode with special info, add this info to the NodeID data.
379 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
380 switch (N->getOpcode()) {
381 case ISD::TargetExternalSymbol:
382 case ISD::ExternalSymbol:
384 llvm_unreachable("Should only be used on nodes with operands");
385 default: break; // Normal nodes don't need extra info.
386 case ISD::TargetConstant:
387 case ISD::Constant: {
388 const ConstantSDNode *C = cast<ConstantSDNode>(N);
389 ID.AddPointer(C->getConstantIntValue());
390 ID.AddBoolean(C->isOpaque());
393 case ISD::TargetConstantFP:
394 case ISD::ConstantFP: {
395 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
398 case ISD::TargetGlobalAddress:
399 case ISD::GlobalAddress:
400 case ISD::TargetGlobalTLSAddress:
401 case ISD::GlobalTLSAddress: {
402 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
403 ID.AddPointer(GA->getGlobal());
404 ID.AddInteger(GA->getOffset());
405 ID.AddInteger(GA->getTargetFlags());
408 case ISD::BasicBlock:
409 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
412 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
414 case ISD::RegisterMask:
415 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
418 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
420 case ISD::FrameIndex:
421 case ISD::TargetFrameIndex:
422 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
425 case ISD::TargetJumpTable:
426 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
427 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
429 case ISD::ConstantPool:
430 case ISD::TargetConstantPool: {
431 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
432 ID.AddInteger(CP->getAlignment());
433 ID.AddInteger(CP->getOffset());
434 if (CP->isMachineConstantPoolEntry())
435 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
437 ID.AddPointer(CP->getConstVal());
438 ID.AddInteger(CP->getTargetFlags());
441 case ISD::TargetIndex: {
442 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
443 ID.AddInteger(TI->getIndex());
444 ID.AddInteger(TI->getOffset());
445 ID.AddInteger(TI->getTargetFlags());
449 const LoadSDNode *LD = cast<LoadSDNode>(N);
450 ID.AddInteger(LD->getMemoryVT().getRawBits());
451 ID.AddInteger(LD->getRawSubclassData());
452 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
456 const StoreSDNode *ST = cast<StoreSDNode>(N);
457 ID.AddInteger(ST->getMemoryVT().getRawBits());
458 ID.AddInteger(ST->getRawSubclassData());
459 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
462 case ISD::ATOMIC_CMP_SWAP:
463 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
464 case ISD::ATOMIC_SWAP:
465 case ISD::ATOMIC_LOAD_ADD:
466 case ISD::ATOMIC_LOAD_SUB:
467 case ISD::ATOMIC_LOAD_AND:
468 case ISD::ATOMIC_LOAD_OR:
469 case ISD::ATOMIC_LOAD_XOR:
470 case ISD::ATOMIC_LOAD_NAND:
471 case ISD::ATOMIC_LOAD_MIN:
472 case ISD::ATOMIC_LOAD_MAX:
473 case ISD::ATOMIC_LOAD_UMIN:
474 case ISD::ATOMIC_LOAD_UMAX:
475 case ISD::ATOMIC_LOAD:
476 case ISD::ATOMIC_STORE: {
477 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
478 ID.AddInteger(AT->getMemoryVT().getRawBits());
479 ID.AddInteger(AT->getRawSubclassData());
480 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
483 case ISD::PREFETCH: {
484 const MemSDNode *PF = cast<MemSDNode>(N);
485 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
488 case ISD::VECTOR_SHUFFLE: {
489 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
490 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
492 ID.AddInteger(SVN->getMaskElt(i));
495 case ISD::TargetBlockAddress:
496 case ISD::BlockAddress: {
497 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
498 ID.AddPointer(BA->getBlockAddress());
499 ID.AddInteger(BA->getOffset());
500 ID.AddInteger(BA->getTargetFlags());
503 } // end switch (N->getOpcode())
505 // Target specific memory nodes could also have address spaces to check.
506 if (N->isTargetMemoryOpcode())
507 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
510 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
512 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
513 AddNodeIDOpcode(ID, N->getOpcode());
514 // Add the return value info.
515 AddNodeIDValueTypes(ID, N->getVTList());
516 // Add the operand info.
517 AddNodeIDOperands(ID, N->ops());
519 // Handle SDNode leafs with special info.
520 AddNodeIDCustom(ID, N);
523 //===----------------------------------------------------------------------===//
524 // SelectionDAG Class
525 //===----------------------------------------------------------------------===//
527 /// doNotCSE - Return true if CSE should not be performed for this node.
528 static bool doNotCSE(SDNode *N) {
529 if (N->getValueType(0) == MVT::Glue)
530 return true; // Never CSE anything that produces a flag.
532 switch (N->getOpcode()) {
534 case ISD::HANDLENODE:
536 return true; // Never CSE these nodes.
539 // Check that remaining values produced are not flags.
540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
541 if (N->getValueType(i) == MVT::Glue)
542 return true; // Never CSE anything that produces a flag.
547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
549 void SelectionDAG::RemoveDeadNodes() {
550 // Create a dummy node (which is not added to allnodes), that adds a reference
551 // to the root node, preventing it from being deleted.
552 HandleSDNode Dummy(getRoot());
554 SmallVector<SDNode*, 128> DeadNodes;
556 // Add all obviously-dead nodes to the DeadNodes worklist.
557 for (SDNode &Node : allnodes())
558 if (Node.use_empty())
559 DeadNodes.push_back(&Node);
561 RemoveDeadNodes(DeadNodes);
563 // If the root changed (e.g. it was a dead load, update the root).
564 setRoot(Dummy.getValue());
567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
568 /// given list, and any nodes that become unreachable as a result.
569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
571 // Process the worklist, deleting the nodes and adding their uses to the
573 while (!DeadNodes.empty()) {
574 SDNode *N = DeadNodes.pop_back_val();
576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
577 DUL->NodeDeleted(N, nullptr);
579 // Take the node out of the appropriate CSE map.
580 RemoveNodeFromCSEMaps(N);
582 // Next, brutally remove the operand list. This is safe to do, as there are
583 // no cycles in the graph.
584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
586 SDNode *Operand = Use.getNode();
589 // Now that we removed this operand, see if there are no uses of it left.
590 if (Operand->use_empty())
591 DeadNodes.push_back(Operand);
598 void SelectionDAG::RemoveDeadNode(SDNode *N){
599 SmallVector<SDNode*, 16> DeadNodes(1, N);
601 // Create a dummy node that adds a reference to the root node, preventing
602 // it from being deleted. (This matters if the root is an operand of the
604 HandleSDNode Dummy(getRoot());
606 RemoveDeadNodes(DeadNodes);
609 void SelectionDAG::DeleteNode(SDNode *N) {
610 // First take this out of the appropriate CSE map.
611 RemoveNodeFromCSEMaps(N);
613 // Finally, remove uses due to operands of this node, remove from the
614 // AllNodes list, and delete the node.
615 DeleteNodeNotInCSEMaps(N);
618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
619 assert(N->getIterator() != AllNodes.begin() &&
620 "Cannot delete the entry node!");
621 assert(N->use_empty() && "Cannot delete a node that is not dead!");
623 // Drop all of the operands and decrement used node's use counts.
629 void SDDbgInfo::erase(const SDNode *Node) {
630 DbgValMapType::iterator I = DbgValMap.find(Node);
631 if (I == DbgValMap.end())
633 for (auto &Val: I->second)
634 Val->setIsInvalidated();
638 void SelectionDAG::DeallocateNode(SDNode *N) {
639 // If we have operands, deallocate them.
642 // Set the opcode to DELETED_NODE to help catch bugs when node
643 // memory is reallocated.
644 N->NodeType = ISD::DELETED_NODE;
646 NodeAllocator.Deallocate(AllNodes.remove(N));
648 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
649 // them and forget about that node.
654 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
655 static void VerifySDNode(SDNode *N) {
656 switch (N->getOpcode()) {
659 case ISD::BUILD_PAIR: {
660 EVT VT = N->getValueType(0);
661 assert(N->getNumValues() == 1 && "Too many results!");
662 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
663 "Wrong return type!");
664 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
665 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
666 "Mismatched operand types!");
667 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
668 "Wrong operand type!");
669 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
670 "Wrong return type size");
673 case ISD::BUILD_VECTOR: {
674 assert(N->getNumValues() == 1 && "Too many results!");
675 assert(N->getValueType(0).isVector() && "Wrong return type!");
676 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
677 "Wrong number of operands!");
678 EVT EltVT = N->getValueType(0).getVectorElementType();
679 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
680 assert((I->getValueType() == EltVT ||
681 (EltVT.isInteger() && I->getValueType().isInteger() &&
682 EltVT.bitsLE(I->getValueType()))) &&
683 "Wrong operand type!");
684 assert(I->getValueType() == N->getOperand(0).getValueType() &&
685 "Operands must all have the same type");
693 /// \brief Insert a newly allocated node into the DAG.
695 /// Handles insertion into the all nodes list and CSE map, as well as
696 /// verification and other common operations when a new node is allocated.
697 void SelectionDAG::InsertNode(SDNode *N) {
698 AllNodes.push_back(N);
700 N->PersistentId = NextPersistentId++;
705 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
706 /// correspond to it. This is useful when we're about to delete or repurpose
707 /// the node. We don't want future request for structurally identical nodes
708 /// to return N anymore.
709 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
711 switch (N->getOpcode()) {
712 case ISD::HANDLENODE: return false; // noop.
714 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
715 "Cond code doesn't exist!");
716 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
717 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
719 case ISD::ExternalSymbol:
720 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
722 case ISD::TargetExternalSymbol: {
723 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
724 Erased = TargetExternalSymbols.erase(
725 std::pair<std::string,unsigned char>(ESN->getSymbol(),
726 ESN->getTargetFlags()));
729 case ISD::MCSymbol: {
730 auto *MCSN = cast<MCSymbolSDNode>(N);
731 Erased = MCSymbols.erase(MCSN->getMCSymbol());
734 case ISD::VALUETYPE: {
735 EVT VT = cast<VTSDNode>(N)->getVT();
736 if (VT.isExtended()) {
737 Erased = ExtendedValueTypeNodes.erase(VT);
739 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
740 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
745 // Remove it from the CSE Map.
746 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
747 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
748 Erased = CSEMap.RemoveNode(N);
752 // Verify that the node was actually in one of the CSE maps, unless it has a
753 // flag result (which cannot be CSE'd) or is one of the special cases that are
754 // not subject to CSE.
755 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
756 !N->isMachineOpcode() && !doNotCSE(N)) {
759 llvm_unreachable("Node is not in map!");
765 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
766 /// maps and modified in place. Add it back to the CSE maps, unless an identical
767 /// node already exists, in which case transfer all its users to the existing
768 /// node. This transfer can potentially trigger recursive merging.
771 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
772 // For node types that aren't CSE'd, just act as if no identical node
775 SDNode *Existing = CSEMap.GetOrInsertNode(N);
777 // If there was already an existing matching node, use ReplaceAllUsesWith
778 // to replace the dead one with the existing one. This can cause
779 // recursive merging of other unrelated nodes down the line.
780 ReplaceAllUsesWith(N, Existing);
782 // N is now dead. Inform the listeners and delete it.
783 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
784 DUL->NodeDeleted(N, Existing);
785 DeleteNodeNotInCSEMaps(N);
790 // If the node doesn't already exist, we updated it. Inform listeners.
791 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
795 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
796 /// were replaced with those specified. If this node is never memoized,
797 /// return null, otherwise return a pointer to the slot it would take. If a
798 /// node already exists with these operands, the slot will be non-null.
799 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
804 SDValue Ops[] = { Op };
806 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
807 AddNodeIDCustom(ID, N);
808 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
810 if (const SDNodeFlags *Flags = N->getFlags())
811 Node->intersectFlagsWith(Flags);
815 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
816 /// were replaced with those specified. If this node is never memoized,
817 /// return null, otherwise return a pointer to the slot it would take. If a
818 /// node already exists with these operands, the slot will be non-null.
819 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
820 SDValue Op1, SDValue Op2,
825 SDValue Ops[] = { Op1, Op2 };
827 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
828 AddNodeIDCustom(ID, N);
829 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
831 if (const SDNodeFlags *Flags = N->getFlags())
832 Node->intersectFlagsWith(Flags);
837 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
838 /// were replaced with those specified. If this node is never memoized,
839 /// return null, otherwise return a pointer to the slot it would take. If a
840 /// node already exists with these operands, the slot will be non-null.
841 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
847 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
848 AddNodeIDCustom(ID, N);
849 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
851 if (const SDNodeFlags *Flags = N->getFlags())
852 Node->intersectFlagsWith(Flags);
856 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
857 Type *Ty = VT == MVT::iPTR ?
858 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
859 VT.getTypeForEVT(*getContext());
861 return getDataLayout().getABITypeAlignment(Ty);
864 // EntryNode could meaningfully have debug info if we can find it...
865 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
866 : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
867 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
868 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
869 UpdateListeners(nullptr) {
870 InsertNode(&EntryNode);
871 DbgInfo = new SDDbgInfo();
874 void SelectionDAG::init(MachineFunction &mf) {
876 TLI = getSubtarget().getTargetLowering();
877 TSI = getSubtarget().getSelectionDAGInfo();
878 Context = &mf.getFunction()->getContext();
881 SelectionDAG::~SelectionDAG() {
882 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
884 OperandRecycler.clear(OperandAllocator);
888 void SelectionDAG::allnodes_clear() {
889 assert(&*AllNodes.begin() == &EntryNode);
890 AllNodes.remove(AllNodes.begin());
891 while (!AllNodes.empty())
892 DeallocateNode(&AllNodes.front());
894 NextPersistentId = 0;
898 SDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, const SDLoc &DL,
899 SDVTList VTs, SDValue N1, SDValue N2,
900 const SDNodeFlags *Flags) {
901 SDValue Ops[] = {N1, N2};
903 if (isBinOpWithFlags(Opcode)) {
904 // If no flags were passed in, use a default flags object.
906 if (Flags == nullptr)
909 auto *FN = newSDNode<BinaryWithFlagsSDNode>(Opcode, DL.getIROrder(),
910 DL.getDebugLoc(), VTs, *Flags);
911 createOperands(FN, Ops);
916 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
917 createOperands(N, Ops);
921 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
923 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
925 switch (N->getOpcode()) {
928 case ISD::ConstantFP:
929 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
930 "debug location. Use another overload.");
936 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
937 const SDLoc &DL, void *&InsertPos) {
938 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
940 switch (N->getOpcode()) {
942 case ISD::ConstantFP:
943 // Erase debug location from the node if the node is used at several
944 // different places. Do not propagate one location to all uses as it
945 // will cause a worse single stepping debugging experience.
946 if (N->getDebugLoc() != DL.getDebugLoc())
947 N->setDebugLoc(DebugLoc());
950 // When the node's point of use is located earlier in the instruction
951 // sequence than its prior point of use, update its debug info to the
953 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
954 N->setDebugLoc(DL.getDebugLoc());
961 void SelectionDAG::clear() {
963 OperandRecycler.clear(OperandAllocator);
964 OperandAllocator.Reset();
967 ExtendedValueTypeNodes.clear();
968 ExternalSymbols.clear();
969 TargetExternalSymbols.clear();
971 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
972 static_cast<CondCodeSDNode*>(nullptr));
973 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
974 static_cast<SDNode*>(nullptr));
976 EntryNode.UseList = nullptr;
977 InsertNode(&EntryNode);
978 Root = getEntryNode();
982 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
983 return VT.bitsGT(Op.getValueType()) ?
984 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
985 getNode(ISD::TRUNCATE, DL, VT, Op);
988 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
989 return VT.bitsGT(Op.getValueType()) ?
990 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
991 getNode(ISD::TRUNCATE, DL, VT, Op);
994 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
995 return VT.bitsGT(Op.getValueType()) ?
996 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
997 getNode(ISD::TRUNCATE, DL, VT, Op);
1000 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1002 if (VT.bitsLE(Op.getValueType()))
1003 return getNode(ISD::TRUNCATE, SL, VT, Op);
1005 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1006 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1009 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1010 assert(!VT.isVector() &&
1011 "getZeroExtendInReg should use the vector element type instead of "
1012 "the vector type!");
1013 if (Op.getValueType() == VT) return Op;
1014 unsigned BitWidth = Op.getScalarValueSizeInBits();
1015 APInt Imm = APInt::getLowBitsSet(BitWidth,
1016 VT.getSizeInBits());
1017 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1018 getConstant(Imm, DL, Op.getValueType()));
1021 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
1023 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1024 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1025 "The sizes of the input and result must match in order to perform the "
1026 "extend in-register.");
1027 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1028 "The destination vector type must have fewer lanes than the input.");
1029 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1032 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
1034 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1035 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1036 "The sizes of the input and result must match in order to perform the "
1037 "extend in-register.");
1038 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1039 "The destination vector type must have fewer lanes than the input.");
1040 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1043 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
1045 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1046 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1047 "The sizes of the input and result must match in order to perform the "
1048 "extend in-register.");
1049 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1050 "The destination vector type must have fewer lanes than the input.");
1051 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1054 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1056 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1057 EVT EltVT = VT.getScalarType();
1059 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1060 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1063 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1064 EVT EltVT = VT.getScalarType();
1066 switch (TLI->getBooleanContents(VT)) {
1067 case TargetLowering::ZeroOrOneBooleanContent:
1068 case TargetLowering::UndefinedBooleanContent:
1069 TrueValue = getConstant(1, DL, VT);
1071 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1072 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
1076 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1079 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1080 bool isT, bool isO) {
1081 EVT EltVT = VT.getScalarType();
1082 assert((EltVT.getSizeInBits() >= 64 ||
1083 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1084 "getConstant with a uint64_t value that doesn't fit in the type!");
1085 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1088 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1089 bool isT, bool isO) {
1090 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1093 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1094 EVT VT, bool isT, bool isO) {
1095 assert(VT.isInteger() && "Cannot create FP integer constant!");
1097 EVT EltVT = VT.getScalarType();
1098 const ConstantInt *Elt = &Val;
1100 // In some cases the vector type is legal but the element type is illegal and
1101 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1102 // inserted value (the type does not need to match the vector element type).
1103 // Any extra bits introduced will be truncated away.
1104 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1105 TargetLowering::TypePromoteInteger) {
1106 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1107 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
1108 Elt = ConstantInt::get(*getContext(), NewVal);
1110 // In other cases the element type is illegal and needs to be expanded, for
1111 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1112 // the value into n parts and use a vector type with n-times the elements.
1113 // Then bitcast to the type requested.
1114 // Legalizing constants too early makes the DAGCombiner's job harder so we
1115 // only legalize if the DAG tells us we must produce legal types.
1116 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1117 TLI->getTypeAction(*getContext(), EltVT) ==
1118 TargetLowering::TypeExpandInteger) {
1119 const APInt &NewVal = Elt->getValue();
1120 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1121 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1122 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1123 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1125 // Check the temporary vector is the correct size. If this fails then
1126 // getTypeToTransformTo() probably returned a type whose size (in bits)
1127 // isn't a power-of-2 factor of the requested type size.
1128 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1130 SmallVector<SDValue, 2> EltParts;
1131 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1132 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1133 .trunc(ViaEltSizeInBits), DL,
1134 ViaEltVT, isT, isO));
1137 // EltParts is currently in little endian order. If we actually want
1138 // big-endian order then reverse it now.
1139 if (getDataLayout().isBigEndian())
1140 std::reverse(EltParts.begin(), EltParts.end());
1142 // The elements must be reversed when the element order is different
1143 // to the endianness of the elements (because the BITCAST is itself a
1144 // vector shuffle in this situation). However, we do not need any code to
1145 // perform this reversal because getConstant() is producing a vector
1147 // This situation occurs in MIPS MSA.
1149 SmallVector<SDValue, 8> Ops;
1150 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1151 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1152 return getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1155 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1156 "APInt size does not match type size!");
1157 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1158 FoldingSetNodeID ID;
1159 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1163 SDNode *N = nullptr;
1164 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1166 return SDValue(N, 0);
1169 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT);
1170 CSEMap.InsertNode(N, IP);
1174 SDValue Result(N, 0);
1176 Result = getSplatBuildVector(VT, DL, Result);
1180 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1182 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1185 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1187 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1190 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1191 EVT VT, bool isTarget) {
1192 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1194 EVT EltVT = VT.getScalarType();
1196 // Do the map lookup using the actual bit pattern for the floating point
1197 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1198 // we don't have issues with SNANs.
1199 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1200 FoldingSetNodeID ID;
1201 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1204 SDNode *N = nullptr;
1205 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1207 return SDValue(N, 0);
1210 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT);
1211 CSEMap.InsertNode(N, IP);
1215 SDValue Result(N, 0);
1217 Result = getSplatBuildVector(VT, DL, Result);
1221 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1223 EVT EltVT = VT.getScalarType();
1224 if (EltVT == MVT::f32)
1225 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1226 else if (EltVT == MVT::f64)
1227 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1228 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1229 EltVT == MVT::f16) {
1231 APFloat APF = APFloat(Val);
1232 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1234 return getConstantFP(APF, DL, VT, isTarget);
1236 llvm_unreachable("Unsupported type in getConstantFP");
1239 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1240 EVT VT, int64_t Offset, bool isTargetGA,
1241 unsigned char TargetFlags) {
1242 assert((TargetFlags == 0 || isTargetGA) &&
1243 "Cannot set target flags on target-independent globals");
1245 // Truncate (with sign-extension) the offset value to the pointer size.
1246 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1248 Offset = SignExtend64(Offset, BitWidth);
1251 if (GV->isThreadLocal())
1252 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1254 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1256 FoldingSetNodeID ID;
1257 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1259 ID.AddInteger(Offset);
1260 ID.AddInteger(TargetFlags);
1262 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1263 return SDValue(E, 0);
1265 auto *N = newSDNode<GlobalAddressSDNode>(
1266 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1267 CSEMap.InsertNode(N, IP);
1269 return SDValue(N, 0);
1272 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1273 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1274 FoldingSetNodeID ID;
1275 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1278 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1279 return SDValue(E, 0);
1281 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1282 CSEMap.InsertNode(N, IP);
1284 return SDValue(N, 0);
1287 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1288 unsigned char TargetFlags) {
1289 assert((TargetFlags == 0 || isTarget) &&
1290 "Cannot set target flags on target-independent jump tables");
1291 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1292 FoldingSetNodeID ID;
1293 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1295 ID.AddInteger(TargetFlags);
1297 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1298 return SDValue(E, 0);
1300 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1301 CSEMap.InsertNode(N, IP);
1303 return SDValue(N, 0);
1306 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1307 unsigned Alignment, int Offset,
1309 unsigned char TargetFlags) {
1310 assert((TargetFlags == 0 || isTarget) &&
1311 "Cannot set target flags on target-independent globals");
1313 Alignment = MF->getFunction()->optForSize()
1314 ? getDataLayout().getABITypeAlignment(C->getType())
1315 : getDataLayout().getPrefTypeAlignment(C->getType());
1316 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1317 FoldingSetNodeID ID;
1318 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1319 ID.AddInteger(Alignment);
1320 ID.AddInteger(Offset);
1322 ID.AddInteger(TargetFlags);
1324 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1325 return SDValue(E, 0);
1327 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1329 CSEMap.InsertNode(N, IP);
1331 return SDValue(N, 0);
1335 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1336 unsigned Alignment, int Offset,
1338 unsigned char TargetFlags) {
1339 assert((TargetFlags == 0 || isTarget) &&
1340 "Cannot set target flags on target-independent globals");
1342 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1343 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1344 FoldingSetNodeID ID;
1345 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1346 ID.AddInteger(Alignment);
1347 ID.AddInteger(Offset);
1348 C->addSelectionDAGCSEId(ID);
1349 ID.AddInteger(TargetFlags);
1351 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1352 return SDValue(E, 0);
1354 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1356 CSEMap.InsertNode(N, IP);
1358 return SDValue(N, 0);
1361 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1362 unsigned char TargetFlags) {
1363 FoldingSetNodeID ID;
1364 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1365 ID.AddInteger(Index);
1366 ID.AddInteger(Offset);
1367 ID.AddInteger(TargetFlags);
1369 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1370 return SDValue(E, 0);
1372 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1373 CSEMap.InsertNode(N, IP);
1375 return SDValue(N, 0);
1378 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1379 FoldingSetNodeID ID;
1380 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1383 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1384 return SDValue(E, 0);
1386 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1387 CSEMap.InsertNode(N, IP);
1389 return SDValue(N, 0);
1392 SDValue SelectionDAG::getValueType(EVT VT) {
1393 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1394 ValueTypeNodes.size())
1395 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1397 SDNode *&N = VT.isExtended() ?
1398 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1400 if (N) return SDValue(N, 0);
1401 N = newSDNode<VTSDNode>(VT);
1403 return SDValue(N, 0);
1406 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1407 SDNode *&N = ExternalSymbols[Sym];
1408 if (N) return SDValue(N, 0);
1409 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1411 return SDValue(N, 0);
1414 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1415 SDNode *&N = MCSymbols[Sym];
1417 return SDValue(N, 0);
1418 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1420 return SDValue(N, 0);
1423 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1424 unsigned char TargetFlags) {
1426 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1428 if (N) return SDValue(N, 0);
1429 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1431 return SDValue(N, 0);
1434 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1435 if ((unsigned)Cond >= CondCodeNodes.size())
1436 CondCodeNodes.resize(Cond+1);
1438 if (!CondCodeNodes[Cond]) {
1439 auto *N = newSDNode<CondCodeSDNode>(Cond);
1440 CondCodeNodes[Cond] = N;
1444 return SDValue(CondCodeNodes[Cond], 0);
1447 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1448 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1449 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1451 ShuffleVectorSDNode::commuteMask(M);
1454 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1455 SDValue N2, ArrayRef<int> Mask) {
1456 assert(VT.getVectorNumElements() == Mask.size() &&
1457 "Must have the same number of vector elements as mask elements!");
1458 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1459 "Invalid VECTOR_SHUFFLE");
1461 // Canonicalize shuffle undef, undef -> undef
1462 if (N1.isUndef() && N2.isUndef())
1463 return getUNDEF(VT);
1465 // Validate that all indices in Mask are within the range of the elements
1466 // input to the shuffle.
1467 int NElts = Mask.size();
1468 assert(all_of(Mask, [&](int M) { return M < (NElts * 2); }) &&
1469 "Index out of range");
1471 // Copy the mask so we can do any needed cleanup.
1472 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1474 // Canonicalize shuffle v, v -> v, undef
1477 for (int i = 0; i != NElts; ++i)
1478 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1481 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1483 commuteShuffle(N1, N2, MaskVec);
1485 // If shuffling a splat, try to blend the splat instead. We do this here so
1486 // that even when this arises during lowering we don't have to re-handle it.
1487 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1488 BitVector UndefElements;
1489 SDValue Splat = BV->getSplatValue(&UndefElements);
1493 for (int i = 0; i < NElts; ++i) {
1494 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1497 // If this input comes from undef, mark it as such.
1498 if (UndefElements[MaskVec[i] - Offset]) {
1503 // If we can blend a non-undef lane, use that instead.
1504 if (!UndefElements[i])
1505 MaskVec[i] = i + Offset;
1508 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1509 BlendSplat(N1BV, 0);
1510 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1511 BlendSplat(N2BV, NElts);
1513 // Canonicalize all index into lhs, -> shuffle lhs, undef
1514 // Canonicalize all index into rhs, -> shuffle rhs, undef
1515 bool AllLHS = true, AllRHS = true;
1516 bool N2Undef = N2.isUndef();
1517 for (int i = 0; i != NElts; ++i) {
1518 if (MaskVec[i] >= NElts) {
1523 } else if (MaskVec[i] >= 0) {
1527 if (AllLHS && AllRHS)
1528 return getUNDEF(VT);
1529 if (AllLHS && !N2Undef)
1533 commuteShuffle(N1, N2, MaskVec);
1535 // Reset our undef status after accounting for the mask.
1536 N2Undef = N2.isUndef();
1537 // Re-check whether both sides ended up undef.
1538 if (N1.isUndef() && N2Undef)
1539 return getUNDEF(VT);
1541 // If Identity shuffle return that node.
1542 bool Identity = true, AllSame = true;
1543 for (int i = 0; i != NElts; ++i) {
1544 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1545 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1547 if (Identity && NElts)
1550 // Shuffling a constant splat doesn't change the result.
1554 // Look through any bitcasts. We check that these don't change the number
1555 // (and size) of elements and just changes their types.
1556 while (V.getOpcode() == ISD::BITCAST)
1557 V = V->getOperand(0);
1559 // A splat should always show up as a build vector node.
1560 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1561 BitVector UndefElements;
1562 SDValue Splat = BV->getSplatValue(&UndefElements);
1563 // If this is a splat of an undef, shuffling it is also undef.
1564 if (Splat && Splat.isUndef())
1565 return getUNDEF(VT);
1568 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1570 // We only have a splat which can skip shuffles if there is a splatted
1571 // value and no undef lanes rearranged by the shuffle.
1572 if (Splat && UndefElements.none()) {
1573 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1574 // number of elements match or the value splatted is a zero constant.
1577 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1578 if (C->isNullValue())
1582 // If the shuffle itself creates a splat, build the vector directly.
1583 if (AllSame && SameNumElts) {
1584 EVT BuildVT = BV->getValueType(0);
1585 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1586 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1588 // We may have jumped through bitcasts, so the type of the
1589 // BUILD_VECTOR may not match the type of the shuffle.
1591 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1597 FoldingSetNodeID ID;
1598 SDValue Ops[2] = { N1, N2 };
1599 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1600 for (int i = 0; i != NElts; ++i)
1601 ID.AddInteger(MaskVec[i]);
1604 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1605 return SDValue(E, 0);
1607 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1608 // SDNode doesn't have access to it. This memory will be "leaked" when
1609 // the node is deallocated, but recovered when the NodeAllocator is released.
1610 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1611 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc);
1613 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1614 dl.getDebugLoc(), MaskAlloc);
1615 createOperands(N, Ops);
1617 CSEMap.InsertNode(N, IP);
1619 return SDValue(N, 0);
1622 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1623 MVT VT = SV.getSimpleValueType(0);
1624 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1625 ShuffleVectorSDNode::commuteMask(MaskVec);
1627 SDValue Op0 = SV.getOperand(0);
1628 SDValue Op1 = SV.getOperand(1);
1629 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1632 SDValue SelectionDAG::getConvertRndSat(EVT VT, const SDLoc &dl, SDValue Val,
1633 SDValue DTy, SDValue STy, SDValue Rnd,
1634 SDValue Sat, ISD::CvtCode Code) {
1635 // If the src and dest types are the same and the conversion is between
1636 // integer types of the same sign or two floats, no conversion is necessary.
1638 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1641 FoldingSetNodeID ID;
1642 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1643 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), Ops);
1645 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1646 return SDValue(E, 0);
1649 newSDNode<CvtRndSatSDNode>(VT, dl.getIROrder(), dl.getDebugLoc(), Code);
1650 createOperands(N, Ops);
1652 CSEMap.InsertNode(N, IP);
1654 return SDValue(N, 0);
1657 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1658 FoldingSetNodeID ID;
1659 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1660 ID.AddInteger(RegNo);
1662 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1663 return SDValue(E, 0);
1665 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1666 CSEMap.InsertNode(N, IP);
1668 return SDValue(N, 0);
1671 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1672 FoldingSetNodeID ID;
1673 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1674 ID.AddPointer(RegMask);
1676 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1677 return SDValue(E, 0);
1679 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1680 CSEMap.InsertNode(N, IP);
1682 return SDValue(N, 0);
1685 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1687 FoldingSetNodeID ID;
1688 SDValue Ops[] = { Root };
1689 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
1690 ID.AddPointer(Label);
1692 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1693 return SDValue(E, 0);
1695 auto *N = newSDNode<EHLabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label);
1696 createOperands(N, Ops);
1698 CSEMap.InsertNode(N, IP);
1700 return SDValue(N, 0);
1703 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1706 unsigned char TargetFlags) {
1707 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1709 FoldingSetNodeID ID;
1710 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1712 ID.AddInteger(Offset);
1713 ID.AddInteger(TargetFlags);
1715 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1716 return SDValue(E, 0);
1718 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1719 CSEMap.InsertNode(N, IP);
1721 return SDValue(N, 0);
1724 SDValue SelectionDAG::getSrcValue(const Value *V) {
1725 assert((!V || V->getType()->isPointerTy()) &&
1726 "SrcValue is not a pointer?");
1728 FoldingSetNodeID ID;
1729 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1733 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1734 return SDValue(E, 0);
1736 auto *N = newSDNode<SrcValueSDNode>(V);
1737 CSEMap.InsertNode(N, IP);
1739 return SDValue(N, 0);
1742 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1743 FoldingSetNodeID ID;
1744 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1748 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1749 return SDValue(E, 0);
1751 auto *N = newSDNode<MDNodeSDNode>(MD);
1752 CSEMap.InsertNode(N, IP);
1754 return SDValue(N, 0);
1757 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1758 if (VT == V.getValueType())
1761 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1764 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1765 unsigned SrcAS, unsigned DestAS) {
1766 SDValue Ops[] = {Ptr};
1767 FoldingSetNodeID ID;
1768 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1769 ID.AddInteger(SrcAS);
1770 ID.AddInteger(DestAS);
1773 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1774 return SDValue(E, 0);
1776 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1778 createOperands(N, Ops);
1780 CSEMap.InsertNode(N, IP);
1782 return SDValue(N, 0);
1785 /// getShiftAmountOperand - Return the specified value casted to
1786 /// the target's desired shift amount type.
1787 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1788 EVT OpTy = Op.getValueType();
1789 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1790 if (OpTy == ShTy || OpTy.isVector()) return Op;
1792 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1795 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1797 const TargetLowering &TLI = getTargetLoweringInfo();
1798 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1799 EVT VT = Node->getValueType(0);
1800 SDValue Tmp1 = Node->getOperand(0);
1801 SDValue Tmp2 = Node->getOperand(1);
1802 unsigned Align = Node->getConstantOperandVal(3);
1804 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1805 Tmp2, MachinePointerInfo(V));
1806 SDValue VAList = VAListLoad;
1808 if (Align > TLI.getMinStackArgumentAlignment()) {
1809 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1811 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1812 getConstant(Align - 1, dl, VAList.getValueType()));
1814 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1815 getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1818 // Increment the pointer, VAList, to the next vaarg
1819 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1820 getConstant(getDataLayout().getTypeAllocSize(
1821 VT.getTypeForEVT(*getContext())),
1822 dl, VAList.getValueType()));
1823 // Store the incremented VAList to the legalized pointer
1825 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1826 // Load the actual argument out of the pointer VAList
1827 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1830 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1832 const TargetLowering &TLI = getTargetLoweringInfo();
1833 // This defaults to loading a pointer from the input and storing it to the
1834 // output, returning the chain.
1835 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1836 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1838 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1839 Node->getOperand(2), MachinePointerInfo(VS));
1840 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1841 MachinePointerInfo(VD));
1844 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1845 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1846 unsigned ByteSize = VT.getStoreSize();
1847 Type *Ty = VT.getTypeForEVT(*getContext());
1848 unsigned StackAlign =
1849 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1851 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1852 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1855 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1856 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1857 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1858 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1859 const DataLayout &DL = getDataLayout();
1861 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1863 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1864 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1865 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1868 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
1869 ISD::CondCode Cond, const SDLoc &dl) {
1870 // These setcc operations always fold.
1874 case ISD::SETFALSE2: return getConstant(0, dl, VT);
1876 case ISD::SETTRUE2: {
1877 TargetLowering::BooleanContent Cnt =
1878 TLI->getBooleanContents(N1->getValueType(0));
1880 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1894 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1898 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1899 const APInt &C2 = N2C->getAPIntValue();
1900 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1901 const APInt &C1 = N1C->getAPIntValue();
1904 default: llvm_unreachable("Unknown integer setcc!");
1905 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT);
1906 case ISD::SETNE: return getConstant(C1 != C2, dl, VT);
1907 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
1908 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
1909 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
1910 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
1911 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT);
1912 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT);
1913 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT);
1914 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT);
1918 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
1919 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
1920 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1923 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1924 return getUNDEF(VT);
1926 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
1927 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1928 return getUNDEF(VT);
1930 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1931 R==APFloat::cmpLessThan, dl, VT);
1932 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1933 return getUNDEF(VT);
1935 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
1936 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1937 return getUNDEF(VT);
1939 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
1940 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1941 return getUNDEF(VT);
1943 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1944 R==APFloat::cmpEqual, dl, VT);
1945 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1946 return getUNDEF(VT);
1948 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1949 R==APFloat::cmpEqual, dl, VT);
1950 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT);
1951 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT);
1952 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1953 R==APFloat::cmpEqual, dl, VT);
1954 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
1955 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1956 R==APFloat::cmpLessThan, dl, VT);
1957 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1958 R==APFloat::cmpUnordered, dl, VT);
1959 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
1960 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
1963 // Ensure that the constant occurs on the RHS.
1964 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1965 MVT CompVT = N1.getValueType().getSimpleVT();
1966 if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
1969 return getSetCC(dl, VT, N2, N1, SwappedCond);
1973 // Could not fold it.
1977 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1978 /// use this predicate to simplify operations downstream.
1979 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1980 unsigned BitWidth = Op.getScalarValueSizeInBits();
1981 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1984 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1985 /// this predicate to simplify operations downstream. Mask is known to be zero
1986 /// for bits that V cannot have.
1987 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1988 unsigned Depth) const {
1989 APInt KnownZero, KnownOne;
1990 computeKnownBits(Op, KnownZero, KnownOne, Depth);
1991 return (KnownZero & Mask) == Mask;
1994 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
1995 /// is less than the element bit-width of the shift node, return it.
1996 static const APInt *getValidShiftAmountConstant(SDValue V) {
1997 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
1998 // Shifting more than the bitwidth is not valid.
1999 const APInt &ShAmt = SA->getAPIntValue();
2000 if (ShAmt.ult(V.getScalarValueSizeInBits()))
2006 /// Determine which bits of Op are known to be either zero or one and return
2007 /// them in the KnownZero/KnownOne bitsets. For vectors, the known bits are
2008 /// those that are shared by every vector element.
2009 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
2010 APInt &KnownOne, unsigned Depth) const {
2011 EVT VT = Op.getValueType();
2012 APInt DemandedElts = VT.isVector()
2013 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2015 computeKnownBits(Op, KnownZero, KnownOne, DemandedElts, Depth);
2018 /// Determine which bits of Op are known to be either zero or one and return
2019 /// them in the KnownZero/KnownOne bitsets. The DemandedElts argument allows
2020 /// us to only collect the known bits that are shared by the requested vector
2022 /// TODO: We only support DemandedElts on a few opcodes so far, the remainder
2023 /// should be added when they become necessary.
2024 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
2025 APInt &KnownOne, const APInt &DemandedElts,
2026 unsigned Depth) const {
2027 unsigned BitWidth = Op.getScalarValueSizeInBits();
2029 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
2031 return; // Limit search depth.
2033 APInt KnownZero2, KnownOne2;
2034 unsigned NumElts = DemandedElts.getBitWidth();
2037 return; // No demanded elts, better to assume we don't know anything.
2039 unsigned Opcode = Op.getOpcode();
2042 // We know all of the bits for a constant!
2043 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
2044 KnownZero = ~KnownOne;
2046 case ISD::BUILD_VECTOR:
2047 // Collect the known bits that are shared by every demanded vector element.
2048 assert(NumElts == Op.getValueType().getVectorNumElements() &&
2049 "Unexpected vector size");
2050 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2051 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2052 if (!DemandedElts[i])
2055 SDValue SrcOp = Op.getOperand(i);
2056 computeKnownBits(SrcOp, KnownZero2, KnownOne2, Depth + 1);
2058 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2059 if (SrcOp.getValueSizeInBits() != BitWidth) {
2060 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2061 "Expected BUILD_VECTOR implicit truncation");
2062 KnownOne2 = KnownOne2.trunc(BitWidth);
2063 KnownZero2 = KnownZero2.trunc(BitWidth);
2066 // Known bits are the values that are shared by every demanded element.
2067 KnownOne &= KnownOne2;
2068 KnownZero &= KnownZero2;
2070 // If we don't know any bits, early out.
2071 if (!KnownOne && !KnownZero)
2075 case ISD::VECTOR_SHUFFLE: {
2076 // Collect the known bits that are shared by every vector element referenced
2078 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2079 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2080 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2081 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2082 for (unsigned i = 0; i != NumElts; ++i) {
2083 if (!DemandedElts[i])
2086 int M = SVN->getMaskElt(i);
2088 // For UNDEF elements, we don't know anything about the common state of
2089 // the shuffle result.
2090 KnownOne.clearAllBits();
2091 KnownZero.clearAllBits();
2092 DemandedLHS.clearAllBits();
2093 DemandedRHS.clearAllBits();
2097 if ((unsigned)M < NumElts)
2098 DemandedLHS.setBit((unsigned)M % NumElts);
2100 DemandedRHS.setBit((unsigned)M % NumElts);
2102 // Known bits are the values that are shared by every demanded element.
2103 if (!!DemandedLHS) {
2104 SDValue LHS = Op.getOperand(0);
2105 computeKnownBits(LHS, KnownZero2, KnownOne2, DemandedLHS, Depth + 1);
2106 KnownOne &= KnownOne2;
2107 KnownZero &= KnownZero2;
2109 // If we don't know any bits, early out.
2110 if (!KnownOne && !KnownZero)
2112 if (!!DemandedRHS) {
2113 SDValue RHS = Op.getOperand(1);
2114 computeKnownBits(RHS, KnownZero2, KnownOne2, DemandedRHS, Depth + 1);
2115 KnownOne &= KnownOne2;
2116 KnownZero &= KnownZero2;
2120 case ISD::CONCAT_VECTORS: {
2121 // Split DemandedElts and test each of the demanded subvectors.
2122 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2123 EVT SubVectorVT = Op.getOperand(0).getValueType();
2124 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2125 unsigned NumSubVectors = Op.getNumOperands();
2126 for (unsigned i = 0; i != NumSubVectors; ++i) {
2127 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2128 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2129 if (!!DemandedSub) {
2130 SDValue Sub = Op.getOperand(i);
2131 computeKnownBits(Sub, KnownZero2, KnownOne2, DemandedSub, Depth + 1);
2132 KnownOne &= KnownOne2;
2133 KnownZero &= KnownZero2;
2135 // If we don't know any bits, early out.
2136 if (!KnownOne && !KnownZero)
2141 case ISD::EXTRACT_SUBVECTOR: {
2142 // If we know the element index, just demand that subvector elements,
2143 // otherwise demand them all.
2144 SDValue Src = Op.getOperand(0);
2145 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2146 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2147 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2148 // Offset the demanded elts by the subvector index.
2149 uint64_t Idx = SubIdx->getZExtValue();
2150 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
2151 computeKnownBits(Src, KnownZero, KnownOne, DemandedSrc, Depth + 1);
2153 computeKnownBits(Src, KnownZero, KnownOne, Depth + 1);
2157 case ISD::BITCAST: {
2158 SDValue N0 = Op.getOperand(0);
2159 unsigned SubBitWidth = N0.getScalarValueSizeInBits();
2161 // Ignore bitcasts from floating point.
2162 if (!N0.getValueType().isInteger())
2165 // Fast handling of 'identity' bitcasts.
2166 if (BitWidth == SubBitWidth) {
2167 computeKnownBits(N0, KnownZero, KnownOne, DemandedElts, Depth + 1);
2171 // Support big-endian targets when it becomes useful.
2172 bool IsLE = getDataLayout().isLittleEndian();
2176 // Bitcast 'small element' vector to 'large element' scalar/vector.
2177 if ((BitWidth % SubBitWidth) == 0) {
2178 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2180 // Collect known bits for the (larger) output by collecting the known
2181 // bits from each set of sub elements and shift these into place.
2182 // We need to separately call computeKnownBits for each set of
2183 // sub elements as the knownbits for each is likely to be different.
2184 unsigned SubScale = BitWidth / SubBitWidth;
2185 APInt SubDemandedElts(NumElts * SubScale, 0);
2186 for (unsigned i = 0; i != NumElts; ++i)
2187 if (DemandedElts[i])
2188 SubDemandedElts.setBit(i * SubScale);
2190 for (unsigned i = 0; i != SubScale; ++i) {
2191 computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts.shl(i),
2193 KnownOne |= KnownOne2.zext(BitWidth).shl(SubBitWidth * i);
2194 KnownZero |= KnownZero2.zext(BitWidth).shl(SubBitWidth * i);
2198 // Bitcast 'large element' scalar/vector to 'small element' vector.
2199 if ((SubBitWidth % BitWidth) == 0) {
2200 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2202 // Collect known bits for the (smaller) output by collecting the known
2203 // bits from the overlapping larger input elements and extracting the
2204 // sub sections we actually care about.
2205 unsigned SubScale = SubBitWidth / BitWidth;
2206 APInt SubDemandedElts(NumElts / SubScale, 0);
2207 for (unsigned i = 0; i != NumElts; ++i)
2208 if (DemandedElts[i])
2209 SubDemandedElts.setBit(i / SubScale);
2211 computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts, Depth + 1);
2213 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2214 for (unsigned i = 0; i != NumElts; ++i)
2215 if (DemandedElts[i]) {
2216 unsigned Offset = (i % SubScale) * BitWidth;
2217 KnownOne &= KnownOne2.lshr(Offset).trunc(BitWidth);
2218 KnownZero &= KnownZero2.lshr(Offset).trunc(BitWidth);
2219 // If we don't know any bits, early out.
2220 if (!KnownOne && !KnownZero)
2227 // If either the LHS or the RHS are Zero, the result is zero.
2228 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2230 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2233 // Output known-1 bits are only known if set in both the LHS & RHS.
2234 KnownOne &= KnownOne2;
2235 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2236 KnownZero |= KnownZero2;
2239 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2241 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2244 // Output known-0 bits are only known if clear in both the LHS & RHS.
2245 KnownZero &= KnownZero2;
2246 // Output known-1 are known to be set if set in either the LHS | RHS.
2247 KnownOne |= KnownOne2;
2250 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2252 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2255 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2256 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
2257 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2258 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
2259 KnownZero = KnownZeroOut;
2263 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2265 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2268 // If low bits are zero in either operand, output low known-0 bits.
2269 // Also compute a conservative estimate for high known-0 bits.
2270 // More trickiness is possible, but this is sufficient for the
2271 // interesting case of alignment computation.
2272 KnownOne.clearAllBits();
2273 unsigned TrailZ = KnownZero.countTrailingOnes() +
2274 KnownZero2.countTrailingOnes();
2275 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
2276 KnownZero2.countLeadingOnes(),
2277 BitWidth) - BitWidth;
2279 TrailZ = std::min(TrailZ, BitWidth);
2280 LeadZ = std::min(LeadZ, BitWidth);
2281 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
2282 APInt::getHighBitsSet(BitWidth, LeadZ);
2286 // For the purposes of computing leading zeros we can conservatively
2287 // treat a udiv as a logical right shift by the power of 2 known to
2288 // be less than the denominator.
2289 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2291 unsigned LeadZ = KnownZero2.countLeadingOnes();
2293 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2295 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
2296 if (RHSUnknownLeadingOnes != BitWidth)
2297 LeadZ = std::min(BitWidth,
2298 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2300 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2304 computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2305 // If we don't know any bits, early out.
2306 if (!KnownOne && !KnownZero)
2308 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2310 // Only known if known in both the LHS and RHS.
2311 KnownOne &= KnownOne2;
2312 KnownZero &= KnownZero2;
2314 case ISD::SELECT_CC:
2315 computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2316 // If we don't know any bits, early out.
2317 if (!KnownOne && !KnownZero)
2319 computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2321 // Only known if known in both the LHS and RHS.
2322 KnownOne &= KnownOne2;
2323 KnownZero &= KnownZero2;
2331 if (Op.getResNo() != 1)
2333 // The boolean result conforms to getBooleanContents.
2334 // If we know the result of a setcc has the top bits zero, use this info.
2335 // We know that we have an integer-based boolean since these operations
2336 // are only available for integer.
2337 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2338 TargetLowering::ZeroOrOneBooleanContent &&
2340 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2343 // If we know the result of a setcc has the top bits zero, use this info.
2344 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2345 TargetLowering::ZeroOrOneBooleanContent &&
2347 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2350 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2351 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2353 KnownZero = KnownZero << *ShAmt;
2354 KnownOne = KnownOne << *ShAmt;
2355 // Low bits are known zero.
2356 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt->getZExtValue());
2360 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2361 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2363 KnownZero = KnownZero.lshr(*ShAmt);
2364 KnownOne = KnownOne.lshr(*ShAmt);
2365 // High bits are known zero.
2366 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue());
2367 KnownZero |= HighBits;
2371 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2372 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2374 KnownZero = KnownZero.lshr(*ShAmt);
2375 KnownOne = KnownOne.lshr(*ShAmt);
2376 // If we know the value of the sign bit, then we know it is copied across
2377 // the high bits by the shift amount.
2378 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue());
2379 APInt SignBit = APInt::getSignBit(BitWidth);
2380 SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask.
2381 if (KnownZero.intersects(SignBit)) {
2382 KnownZero |= HighBits; // New bits are known zero.
2383 } else if (KnownOne.intersects(SignBit)) {
2384 KnownOne |= HighBits; // New bits are known one.
2388 case ISD::SIGN_EXTEND_INREG: {
2389 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2390 unsigned EBits = EVT.getScalarSizeInBits();
2392 // Sign extension. Compute the demanded bits in the result that are not
2393 // present in the input.
2394 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2396 APInt InSignBit = APInt::getSignBit(EBits);
2397 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2399 // If the sign extended bits are demanded, we know that the sign
2401 InSignBit = InSignBit.zext(BitWidth);
2402 if (NewBits.getBoolValue())
2403 InputDemandedBits |= InSignBit;
2405 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2407 KnownOne &= InputDemandedBits;
2408 KnownZero &= InputDemandedBits;
2410 // If the sign bit of the input is known set or clear, then we know the
2411 // top bits of the result.
2412 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2413 KnownZero |= NewBits;
2414 KnownOne &= ~NewBits;
2415 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2416 KnownOne |= NewBits;
2417 KnownZero &= ~NewBits;
2418 } else { // Input sign bit unknown
2419 KnownZero &= ~NewBits;
2420 KnownOne &= ~NewBits;
2425 case ISD::CTTZ_ZERO_UNDEF:
2427 case ISD::CTLZ_ZERO_UNDEF:
2429 unsigned LowBits = Log2_32(BitWidth)+1;
2430 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2431 KnownOne.clearAllBits();
2435 LoadSDNode *LD = cast<LoadSDNode>(Op);
2436 // If this is a ZEXTLoad and we are looking at the loaded value.
2437 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2438 EVT VT = LD->getMemoryVT();
2439 unsigned MemBits = VT.getScalarSizeInBits();
2440 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2441 } else if (const MDNode *Ranges = LD->getRanges()) {
2442 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2443 computeKnownBitsFromRangeMetadata(*Ranges, KnownZero, KnownOne);
2447 case ISD::ZERO_EXTEND: {
2448 EVT InVT = Op.getOperand(0).getValueType();
2449 unsigned InBits = InVT.getScalarSizeInBits();
2450 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2451 KnownZero = KnownZero.trunc(InBits);
2452 KnownOne = KnownOne.trunc(InBits);
2453 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2455 KnownZero = KnownZero.zext(BitWidth);
2456 KnownOne = KnownOne.zext(BitWidth);
2457 KnownZero |= NewBits;
2460 case ISD::SIGN_EXTEND: {
2461 EVT InVT = Op.getOperand(0).getValueType();
2462 unsigned InBits = InVT.getScalarSizeInBits();
2464 KnownZero = KnownZero.trunc(InBits);
2465 KnownOne = KnownOne.trunc(InBits);
2466 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2469 // If the sign bit is known to be zero or one, then sext will extend
2470 // it to the top bits, else it will just zext.
2471 KnownZero = KnownZero.sext(BitWidth);
2472 KnownOne = KnownOne.sext(BitWidth);
2475 case ISD::ANY_EXTEND: {
2476 EVT InVT = Op.getOperand(0).getValueType();
2477 unsigned InBits = InVT.getScalarSizeInBits();
2478 KnownZero = KnownZero.trunc(InBits);
2479 KnownOne = KnownOne.trunc(InBits);
2480 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2481 KnownZero = KnownZero.zext(BitWidth);
2482 KnownOne = KnownOne.zext(BitWidth);
2485 case ISD::TRUNCATE: {
2486 EVT InVT = Op.getOperand(0).getValueType();
2487 unsigned InBits = InVT.getScalarSizeInBits();
2488 KnownZero = KnownZero.zext(InBits);
2489 KnownOne = KnownOne.zext(InBits);
2490 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2492 KnownZero = KnownZero.trunc(BitWidth);
2493 KnownOne = KnownOne.trunc(BitWidth);
2496 case ISD::AssertZext: {
2497 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2498 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2499 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2500 KnownZero |= (~InMask);
2501 KnownOne &= (~KnownZero);
2505 // All bits are zero except the low bit.
2506 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2510 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) {
2511 // We know that the top bits of C-X are clear if X contains less bits
2512 // than C (i.e. no wrap-around can happen). For example, 20-X is
2513 // positive if we can prove that X is >= 0 and < 16.
2514 if (CLHS->getAPIntValue().isNonNegative()) {
2515 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2516 // NLZ can't be BitWidth with no sign bit
2517 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2518 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2521 // If all of the MaskV bits are known to be zero, then we know the
2522 // output top bits are zero, because we now know that the output is
2524 if ((KnownZero2 & MaskV) == MaskV) {
2525 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2526 // Top bits known zero.
2527 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2535 // Output known-0 bits are known if clear or set in both the low clear bits
2536 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2537 // low 3 bits clear.
2538 // Output known-0 bits are also known if the top bits of each input are
2539 // known to be clear. For example, if one input has the top 10 bits clear
2540 // and the other has the top 8 bits clear, we know the top 7 bits of the
2541 // output must be clear.
2542 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2544 unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
2545 unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
2547 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2549 KnownZeroHigh = std::min(KnownZeroHigh,
2550 KnownZero2.countLeadingOnes());
2551 KnownZeroLow = std::min(KnownZeroLow,
2552 KnownZero2.countTrailingOnes());
2554 if (Opcode == ISD::ADD) {
2555 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
2556 if (KnownZeroHigh > 1)
2557 KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
2561 // With ADDE, a carry bit may be added in, so we can only use this
2562 // information if we know (at least) that the low two bits are clear. We
2563 // then return to the caller that the low bit is unknown but that other bits
2565 if (KnownZeroLow >= 2) // ADDE
2566 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
2570 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2571 const APInt &RA = Rem->getAPIntValue().abs();
2572 if (RA.isPowerOf2()) {
2573 APInt LowBits = RA - 1;
2574 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2577 // The low bits of the first operand are unchanged by the srem.
2578 KnownZero = KnownZero2 & LowBits;
2579 KnownOne = KnownOne2 & LowBits;
2581 // If the first operand is non-negative or has all low bits zero, then
2582 // the upper bits are all zero.
2583 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2584 KnownZero |= ~LowBits;
2586 // If the first operand is negative and not all low bits are zero, then
2587 // the upper bits are all one.
2588 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2589 KnownOne |= ~LowBits;
2590 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2595 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2596 const APInt &RA = Rem->getAPIntValue();
2597 if (RA.isPowerOf2()) {
2598 APInt LowBits = (RA - 1);
2599 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2602 // The upper bits are all zero, the lower ones are unchanged.
2603 KnownZero = KnownZero2 | ~LowBits;
2604 KnownOne = KnownOne2 & LowBits;
2609 // Since the result is less than or equal to either operand, any leading
2610 // zero bits in either operand must also exist in the result.
2611 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2613 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2616 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2617 KnownZero2.countLeadingOnes());
2618 KnownOne.clearAllBits();
2619 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2622 case ISD::EXTRACT_ELEMENT: {
2623 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2624 const unsigned Index = Op.getConstantOperandVal(1);
2625 const unsigned BitWidth = Op.getValueSizeInBits();
2627 // Remove low part of known bits mask
2628 KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
2629 KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
2631 // Remove high part of known bit mask
2632 KnownZero = KnownZero.trunc(BitWidth);
2633 KnownOne = KnownOne.trunc(BitWidth);
2636 case ISD::EXTRACT_VECTOR_ELT: {
2637 SDValue InVec = Op.getOperand(0);
2638 SDValue EltNo = Op.getOperand(1);
2639 EVT VecVT = InVec.getValueType();
2640 const unsigned BitWidth = Op.getValueSizeInBits();
2641 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
2642 const unsigned NumSrcElts = VecVT.getVectorNumElements();
2643 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2644 // anything about the extended bits.
2645 if (BitWidth > EltBitWidth) {
2646 KnownZero = KnownZero.trunc(EltBitWidth);
2647 KnownOne = KnownOne.trunc(EltBitWidth);
2649 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
2650 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
2651 // If we know the element index, just demand that vector element.
2652 unsigned Idx = ConstEltNo->getZExtValue();
2653 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
2654 computeKnownBits(InVec, KnownZero, KnownOne, DemandedElt, Depth + 1);
2656 // Unknown element index, so ignore DemandedElts and demand them all.
2657 computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1);
2659 if (BitWidth > EltBitWidth) {
2660 KnownZero = KnownZero.zext(BitWidth);
2661 KnownOne = KnownOne.zext(BitWidth);
2665 case ISD::INSERT_VECTOR_ELT: {
2666 SDValue InVec = Op.getOperand(0);
2667 SDValue InVal = Op.getOperand(1);
2668 SDValue EltNo = Op.getOperand(2);
2670 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
2671 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
2672 // If we know the element index, split the demand between the
2673 // source vector and the inserted element.
2674 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2675 unsigned EltIdx = CEltNo->getZExtValue();
2677 // If we demand the inserted element then add its common known bits.
2678 if (DemandedElts[EltIdx]) {
2679 computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1);
2680 KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth());
2681 KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());;
2684 // If we demand the source vector then add its common known bits, ensuring
2685 // that we don't demand the inserted element.
2686 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
2688 computeKnownBits(InVec, KnownZero2, KnownOne2, VectorElts, Depth + 1);
2689 KnownOne &= KnownOne2;
2690 KnownZero &= KnownZero2;
2693 // Unknown element index, so ignore DemandedElts and demand them all.
2694 computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1);
2695 computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1);
2696 KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth());
2697 KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());;
2702 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2704 KnownZero = KnownZero2.byteSwap();
2705 KnownOne = KnownOne2.byteSwap();
2712 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2714 // If we don't know any bits, early out.
2715 if (!KnownOne && !KnownZero)
2717 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2719 KnownZero &= KnownZero2;
2720 KnownOne &= KnownOne2;
2723 case ISD::FrameIndex:
2724 case ISD::TargetFrameIndex:
2725 if (unsigned Align = InferPtrAlignment(Op)) {
2726 // The low bits are known zero if the pointer is aligned.
2727 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2733 if (Opcode < ISD::BUILTIN_OP_END)
2736 case ISD::INTRINSIC_WO_CHAIN:
2737 case ISD::INTRINSIC_W_CHAIN:
2738 case ISD::INTRINSIC_VOID:
2739 // Allow the target to implement this method for its nodes.
2740 TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2744 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2747 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
2748 EVT OpVT = Val.getValueType();
2749 unsigned BitWidth = OpVT.getScalarSizeInBits();
2751 // Is the constant a known power of 2?
2752 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
2753 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2755 // A left-shift of a constant one will have exactly one bit set because
2756 // shifting the bit off the end is undefined.
2757 if (Val.getOpcode() == ISD::SHL) {
2758 auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
2759 if (C && C->getAPIntValue() == 1)
2763 // Similarly, a logical right-shift of a constant sign-bit will have exactly
2765 if (Val.getOpcode() == ISD::SRL) {
2766 auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
2767 if (C && C->getAPIntValue().isSignBit())
2771 // Are all operands of a build vector constant powers of two?
2772 if (Val.getOpcode() == ISD::BUILD_VECTOR)
2773 if (llvm::all_of(Val->ops(), [this, BitWidth](SDValue E) {
2774 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
2775 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2780 // More could be done here, though the above checks are enough
2781 // to handle some common cases.
2783 // Fall back to computeKnownBits to catch other known cases.
2784 APInt KnownZero, KnownOne;
2785 computeKnownBits(Val, KnownZero, KnownOne);
2786 return (KnownZero.countPopulation() == BitWidth - 1) &&
2787 (KnownOne.countPopulation() == 1);
2790 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
2791 EVT VT = Op.getValueType();
2792 assert(VT.isInteger() && "Invalid VT!");
2793 unsigned VTBits = VT.getScalarSizeInBits();
2795 unsigned FirstAnswer = 1;
2798 return 1; // Limit search depth.
2800 switch (Op.getOpcode()) {
2802 case ISD::AssertSext:
2803 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2804 return VTBits-Tmp+1;
2805 case ISD::AssertZext:
2806 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2809 case ISD::Constant: {
2810 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2811 return Val.getNumSignBits();
2814 case ISD::SIGN_EXTEND:
2815 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
2816 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2818 case ISD::SIGN_EXTEND_INREG:
2819 // Max of the input and what this extends.
2820 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
2823 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2824 return std::max(Tmp, Tmp2);
2827 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2828 // SRA X, C -> adds C sign bits.
2829 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
2830 APInt ShiftVal = C->getAPIntValue();
2832 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
2836 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
2837 // shl destroys sign bits.
2838 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2839 if (C->getAPIntValue().uge(VTBits) || // Bad shift.
2840 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
2841 return Tmp - C->getZExtValue();
2846 case ISD::XOR: // NOT is handled here.
2847 // Logical binary ops preserve the number of sign bits at the worst.
2848 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2850 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2851 FirstAnswer = std::min(Tmp, Tmp2);
2852 // We computed what we know about the sign bits as our first
2853 // answer. Now proceed to the generic code that uses
2854 // computeKnownBits, and pick whichever answer is better.
2859 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2860 if (Tmp == 1) return 1; // Early out.
2861 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2862 return std::min(Tmp, Tmp2);
2863 case ISD::SELECT_CC:
2864 Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2865 if (Tmp == 1) return 1; // Early out.
2866 Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1);
2867 return std::min(Tmp, Tmp2);
2872 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2874 return 1; // Early out.
2875 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
2876 return std::min(Tmp, Tmp2);
2883 if (Op.getResNo() != 1)
2885 // The boolean result conforms to getBooleanContents. Fall through.
2886 // If setcc returns 0/-1, all bits are sign bits.
2887 // We know that we have an integer-based boolean since these operations
2888 // are only available for integer.
2889 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2890 TargetLowering::ZeroOrNegativeOneBooleanContent)
2894 // If setcc returns 0/-1, all bits are sign bits.
2895 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2896 TargetLowering::ZeroOrNegativeOneBooleanContent)
2901 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2902 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2904 // Handle rotate right by N like a rotate left by 32-N.
2905 if (Op.getOpcode() == ISD::ROTR)
2906 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2908 // If we aren't rotating out all of the known-in sign bits, return the
2909 // number that are left. This handles rotl(sext(x), 1) for example.
2910 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2911 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2915 // Add can have at most one carry bit. Thus we know that the output
2916 // is, at worst, one more bit than the inputs.
2917 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2918 if (Tmp == 1) return 1; // Early out.
2920 // Special case decrementing a value (ADD X, -1):
2921 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2922 if (CRHS->isAllOnesValue()) {
2923 APInt KnownZero, KnownOne;
2924 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2926 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2928 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2931 // If we are subtracting one from a positive number, there is no carry
2932 // out of the result.
2933 if (KnownZero.isNegative())
2937 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2938 if (Tmp2 == 1) return 1;
2939 return std::min(Tmp, Tmp2)-1;
2942 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2943 if (Tmp2 == 1) return 1;
2946 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
2947 if (CLHS->isNullValue()) {
2948 APInt KnownZero, KnownOne;
2949 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2950 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2952 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2955 // If the input is known to be positive (the sign bit is known clear),
2956 // the output of the NEG has the same number of sign bits as the input.
2957 if (KnownZero.isNegative())
2960 // Otherwise, we treat this like a SUB.
2963 // Sub can have at most one carry bit. Thus we know that the output
2964 // is, at worst, one more bit than the inputs.
2965 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2966 if (Tmp == 1) return 1; // Early out.
2967 return std::min(Tmp, Tmp2)-1;
2968 case ISD::TRUNCATE: {
2969 // Check if the sign bits of source go down as far as the truncated value.
2970 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
2971 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2972 if (NumSrcSignBits > (NumSrcBits - VTBits))
2973 return NumSrcSignBits - (NumSrcBits - VTBits);
2976 case ISD::EXTRACT_ELEMENT: {
2977 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2978 const int BitWidth = Op.getValueSizeInBits();
2979 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
2981 // Get reverse index (starting from 1), Op1 value indexes elements from
2982 // little end. Sign starts at big end.
2983 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
2985 // If the sign portion ends in our element the subtraction gives correct
2986 // result. Otherwise it gives either negative or > bitwidth result
2987 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
2989 case ISD::EXTRACT_VECTOR_ELT: {
2990 // At the moment we keep this simple and skip tracking the specific
2991 // element. This way we get the lowest common denominator for all elements
2993 // TODO: get information for given vector element
2994 const unsigned BitWidth = Op.getValueSizeInBits();
2995 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
2996 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
2997 // anything about sign bits. But if the sizes match we can derive knowledge
2998 // about sign bits from the vector operand.
2999 if (BitWidth == EltBitWidth)
3000 return ComputeNumSignBits(Op.getOperand(0), Depth+1);
3003 case ISD::EXTRACT_SUBVECTOR:
3004 return ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3005 case ISD::CONCAT_VECTORS:
3006 // Determine the minimum number of sign bits across all input vectors.
3007 // Early out if the result is already 1.
3008 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3009 for (unsigned i = 1, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i)
3010 Tmp = std::min(Tmp, ComputeNumSignBits(Op.getOperand(i), Depth + 1));
3014 // If we are looking at the loaded value of the SDNode.
3015 if (Op.getResNo() == 0) {
3016 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3017 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3018 unsigned ExtType = LD->getExtensionType();
3021 case ISD::SEXTLOAD: // '17' bits known
3022 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3023 return VTBits-Tmp+1;
3024 case ISD::ZEXTLOAD: // '16' bits known
3025 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3031 // Allow the target to implement this method for its nodes.
3032 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3033 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3034 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3035 Op.getOpcode() == ISD::INTRINSIC_VOID) {
3036 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
3037 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
3040 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3041 // use this information.
3042 APInt KnownZero, KnownOne;
3043 computeKnownBits(Op, KnownZero, KnownOne, Depth);
3046 if (KnownZero.isNegative()) { // sign bit is 0
3048 } else if (KnownOne.isNegative()) { // sign bit is 1;
3055 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3056 // the number of identical bits in the top of the input value.
3058 Mask <<= Mask.getBitWidth()-VTBits;
3059 // Return # leading zeros. We use 'min' here in case Val was zero before
3060 // shifting. We don't want to return '64' as for an i32 "0".
3061 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3064 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
3065 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3066 !isa<ConstantSDNode>(Op.getOperand(1)))
3069 if (Op.getOpcode() == ISD::OR &&
3070 !MaskedValueIsZero(Op.getOperand(0),
3071 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
3077 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
3078 // If we're told that NaNs won't happen, assume they won't.
3079 if (getTarget().Options.NoNaNsFPMath)
3082 // If the value is a constant, we can obviously see if it is a NaN or not.
3083 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3084 return !C->getValueAPF().isNaN();
3086 // TODO: Recognize more cases here.
3091 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
3092 // If the value is a constant, we can obviously see if it is a zero or not.
3093 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3094 return !C->isZero();
3096 // TODO: Recognize more cases here.
3097 switch (Op.getOpcode()) {
3100 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3101 return !C->isNullValue();
3108 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
3109 // Check the obvious case.
3110 if (A == B) return true;
3112 // For for negative and positive zero.
3113 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
3114 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
3115 if (CA->isZero() && CB->isZero()) return true;
3117 // Otherwise they may not be equal.
3121 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
3122 assert(A.getValueType() == B.getValueType() &&
3123 "Values must have the same type");
3126 computeKnownBits(A, AZero, AOne);
3127 computeKnownBits(B, BZero, BOne);
3128 return (AZero | BZero).isAllOnesValue();
3131 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
3132 ArrayRef<SDValue> Ops,
3133 llvm::SelectionDAG &DAG) {
3134 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
3135 assert(llvm::all_of(Ops,
3137 return Ops[0].getValueType() == Op.getValueType();
3139 "Concatenation of vectors with inconsistent value types!");
3140 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
3141 VT.getVectorNumElements() &&
3142 "Incorrect element count in vector concatenation!");
3144 if (Ops.size() == 1)
3147 // Concat of UNDEFs is UNDEF.
3148 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
3149 return DAG.getUNDEF(VT);
3151 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
3152 // simplified to one big BUILD_VECTOR.
3153 // FIXME: Add support for SCALAR_TO_VECTOR as well.
3154 EVT SVT = VT.getScalarType();
3155 SmallVector<SDValue, 16> Elts;
3156 for (SDValue Op : Ops) {
3157 EVT OpVT = Op.getValueType();
3159 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
3160 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
3161 Elts.append(Op->op_begin(), Op->op_end());
3166 // BUILD_VECTOR requires all inputs to be of the same type, find the
3167 // maximum type and extend them all.
3168 for (SDValue Op : Elts)
3169 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
3171 if (SVT.bitsGT(VT.getScalarType()))
3172 for (SDValue &Op : Elts)
3173 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
3174 ? DAG.getZExtOrTrunc(Op, DL, SVT)
3175 : DAG.getSExtOrTrunc(Op, DL, SVT);
3177 return DAG.getBuildVector(VT, DL, Elts);
3180 /// Gets or creates the specified node.
3181 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
3182 FoldingSetNodeID ID;
3183 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
3185 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3186 return SDValue(E, 0);
3188 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
3190 CSEMap.InsertNode(N, IP);
3193 return SDValue(N, 0);
3196 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3198 // Constant fold unary operations with an integer constant operand. Even
3199 // opaque constant will be folded, because the folding of unary operations
3200 // doesn't create new constants with different values. Nevertheless, the
3201 // opaque flag is preserved during folding to prevent future folding with
3203 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
3204 const APInt &Val = C->getAPIntValue();
3207 case ISD::SIGN_EXTEND:
3208 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
3209 C->isTargetOpcode(), C->isOpaque());
3210 case ISD::ANY_EXTEND:
3211 case ISD::ZERO_EXTEND:
3213 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
3214 C->isTargetOpcode(), C->isOpaque());
3215 case ISD::UINT_TO_FP:
3216 case ISD::SINT_TO_FP: {
3217 APFloat apf(EVTToAPFloatSemantics(VT),
3218 APInt::getNullValue(VT.getSizeInBits()));
3219 (void)apf.convertFromAPInt(Val,
3220 Opcode==ISD::SINT_TO_FP,
3221 APFloat::rmNearestTiesToEven);
3222 return getConstantFP(apf, DL, VT);
3225 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
3226 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
3227 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
3228 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
3229 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
3230 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
3231 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
3232 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
3235 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
3238 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
3241 case ISD::CTLZ_ZERO_UNDEF:
3242 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
3245 case ISD::CTTZ_ZERO_UNDEF:
3246 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
3251 // Constant fold unary operations with a floating point constant operand.
3252 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
3253 APFloat V = C->getValueAPF(); // make copy
3257 return getConstantFP(V, DL, VT);
3260 return getConstantFP(V, DL, VT);
3262 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
3263 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3264 return getConstantFP(V, DL, VT);
3268 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
3269 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3270 return getConstantFP(V, DL, VT);
3274 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
3275 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3276 return getConstantFP(V, DL, VT);
3279 case ISD::FP_EXTEND: {
3281 // This can return overflow, underflow, or inexact; we don't care.
3282 // FIXME need to be more flexible about rounding mode.
3283 (void)V.convert(EVTToAPFloatSemantics(VT),
3284 APFloat::rmNearestTiesToEven, &ignored);
3285 return getConstantFP(V, DL, VT);
3287 case ISD::FP_TO_SINT:
3288 case ISD::FP_TO_UINT: {
3291 static_assert(integerPartWidth >= 64, "APFloat parts too small!");
3292 // FIXME need to be more flexible about rounding mode.
3293 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
3294 Opcode==ISD::FP_TO_SINT,
3295 APFloat::rmTowardZero, &ignored);
3296 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
3298 APInt api(VT.getSizeInBits(), x);
3299 return getConstant(api, DL, VT);
3302 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
3303 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3304 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
3305 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3306 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
3307 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
3312 // Constant fold unary operations with a vector integer or float operand.
3313 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
3314 if (BV->isConstant()) {
3317 // FIXME: Entirely reasonable to perform folding of other unary
3318 // operations here as the need arises.
3325 case ISD::FP_EXTEND:
3326 case ISD::FP_TO_SINT:
3327 case ISD::FP_TO_UINT:
3329 case ISD::UINT_TO_FP:
3330 case ISD::SINT_TO_FP:
3333 case ISD::CTLZ_ZERO_UNDEF:
3335 case ISD::CTTZ_ZERO_UNDEF:
3337 SDValue Ops = { Operand };
3338 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
3345 unsigned OpOpcode = Operand.getNode()->getOpcode();
3347 case ISD::TokenFactor:
3348 case ISD::MERGE_VALUES:
3349 case ISD::CONCAT_VECTORS:
3350 return Operand; // Factor, merge or concat of one node? No need.
3351 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
3352 case ISD::FP_EXTEND:
3353 assert(VT.isFloatingPoint() &&
3354 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
3355 if (Operand.getValueType() == VT) return Operand; // noop conversion.
3356 assert((!VT.isVector() ||
3357 VT.getVectorNumElements() ==
3358 Operand.getValueType().getVectorNumElements()) &&
3359 "Vector element count mismatch!");
3360 assert(Operand.getValueType().bitsLT(VT) &&
3361 "Invalid fpext node, dst < src!");
3362 if (Operand.isUndef())
3363 return getUNDEF(VT);
3365 case ISD::SIGN_EXTEND:
3366 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3367 "Invalid SIGN_EXTEND!");
3368 if (Operand.getValueType() == VT) return Operand; // noop extension
3369 assert((!VT.isVector() ||
3370 VT.getVectorNumElements() ==
3371 Operand.getValueType().getVectorNumElements()) &&
3372 "Vector element count mismatch!");
3373 assert(Operand.getValueType().bitsLT(VT) &&
3374 "Invalid sext node, dst < src!");
3375 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
3376 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3377 else if (OpOpcode == ISD::UNDEF)
3378 // sext(undef) = 0, because the top bits will all be the same.
3379 return getConstant(0, DL, VT);
3381 case ISD::ZERO_EXTEND:
3382 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3383 "Invalid ZERO_EXTEND!");
3384 if (Operand.getValueType() == VT) return Operand; // noop extension
3385 assert((!VT.isVector() ||
3386 VT.getVectorNumElements() ==
3387 Operand.getValueType().getVectorNumElements()) &&
3388 "Vector element count mismatch!");
3389 assert(Operand.getValueType().bitsLT(VT) &&
3390 "Invalid zext node, dst < src!");
3391 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
3392 return getNode(ISD::ZERO_EXTEND, DL, VT,
3393 Operand.getNode()->getOperand(0));
3394 else if (OpOpcode == ISD::UNDEF)
3395 // zext(undef) = 0, because the top bits will be zero.
3396 return getConstant(0, DL, VT);
3398 case ISD::ANY_EXTEND:
3399 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3400 "Invalid ANY_EXTEND!");
3401 if (Operand.getValueType() == VT) return Operand; // noop extension
3402 assert((!VT.isVector() ||
3403 VT.getVectorNumElements() ==
3404 Operand.getValueType().getVectorNumElements()) &&
3405 "Vector element count mismatch!");
3406 assert(Operand.getValueType().bitsLT(VT) &&
3407 "Invalid anyext node, dst < src!");
3409 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3410 OpOpcode == ISD::ANY_EXTEND)
3411 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
3412 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3413 else if (OpOpcode == ISD::UNDEF)
3414 return getUNDEF(VT);
3416 // (ext (trunx x)) -> x
3417 if (OpOpcode == ISD::TRUNCATE) {
3418 SDValue OpOp = Operand.getNode()->getOperand(0);
3419 if (OpOp.getValueType() == VT)
3424 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3425 "Invalid TRUNCATE!");
3426 if (Operand.getValueType() == VT) return Operand; // noop truncate
3427 assert((!VT.isVector() ||
3428 VT.getVectorNumElements() ==
3429 Operand.getValueType().getVectorNumElements()) &&
3430 "Vector element count mismatch!");
3431 assert(Operand.getValueType().bitsGT(VT) &&
3432 "Invalid truncate node, src < dst!");
3433 if (OpOpcode == ISD::TRUNCATE)
3434 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3435 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3436 OpOpcode == ISD::ANY_EXTEND) {
3437 // If the source is smaller than the dest, we still need an extend.
3438 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
3439 .bitsLT(VT.getScalarType()))
3440 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3441 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
3442 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3443 return Operand.getNode()->getOperand(0);
3445 if (OpOpcode == ISD::UNDEF)
3446 return getUNDEF(VT);
3449 assert(VT.isInteger() && VT == Operand.getValueType() &&
3451 assert((VT.getScalarSizeInBits() % 16 == 0) &&
3452 "BSWAP types must be a multiple of 16 bits!");
3453 if (OpOpcode == ISD::UNDEF)
3454 return getUNDEF(VT);
3456 case ISD::BITREVERSE:
3457 assert(VT.isInteger() && VT == Operand.getValueType() &&
3458 "Invalid BITREVERSE!");
3459 if (OpOpcode == ISD::UNDEF)
3460 return getUNDEF(VT);
3463 // Basic sanity checking.
3464 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
3465 "Cannot BITCAST between types of different sizes!");
3466 if (VT == Operand.getValueType()) return Operand; // noop conversion.
3467 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
3468 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
3469 if (OpOpcode == ISD::UNDEF)
3470 return getUNDEF(VT);
3472 case ISD::SCALAR_TO_VECTOR:
3473 assert(VT.isVector() && !Operand.getValueType().isVector() &&
3474 (VT.getVectorElementType() == Operand.getValueType() ||
3475 (VT.getVectorElementType().isInteger() &&
3476 Operand.getValueType().isInteger() &&
3477 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
3478 "Illegal SCALAR_TO_VECTOR node!");
3479 if (OpOpcode == ISD::UNDEF)
3480 return getUNDEF(VT);
3481 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
3482 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
3483 isa<ConstantSDNode>(Operand.getOperand(1)) &&
3484 Operand.getConstantOperandVal(1) == 0 &&
3485 Operand.getOperand(0).getValueType() == VT)
3486 return Operand.getOperand(0);
3489 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
3490 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
3491 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags?
3492 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
3493 Operand.getNode()->getOperand(0),
3494 &cast<BinaryWithFlagsSDNode>(Operand.getNode())->Flags);
3495 if (OpOpcode == ISD::FNEG) // --X -> X
3496 return Operand.getNode()->getOperand(0);
3499 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
3500 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
3505 SDVTList VTs = getVTList(VT);
3506 SDValue Ops[] = {Operand};
3507 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
3508 FoldingSetNodeID ID;
3509 AddNodeIDNode(ID, Opcode, VTs, Ops);
3511 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3512 return SDValue(E, 0);
3514 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3515 createOperands(N, Ops);
3516 CSEMap.InsertNode(N, IP);
3518 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3519 createOperands(N, Ops);
3523 return SDValue(N, 0);
3526 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
3529 case ISD::ADD: return std::make_pair(C1 + C2, true);
3530 case ISD::SUB: return std::make_pair(C1 - C2, true);
3531 case ISD::MUL: return std::make_pair(C1 * C2, true);
3532 case ISD::AND: return std::make_pair(C1 & C2, true);
3533 case ISD::OR: return std::make_pair(C1 | C2, true);
3534 case ISD::XOR: return std::make_pair(C1 ^ C2, true);
3535 case ISD::SHL: return std::make_pair(C1 << C2, true);
3536 case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
3537 case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
3538 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
3539 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
3540 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
3541 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
3542 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
3543 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
3545 if (!C2.getBoolValue())
3547 return std::make_pair(C1.udiv(C2), true);
3549 if (!C2.getBoolValue())
3551 return std::make_pair(C1.urem(C2), true);
3553 if (!C2.getBoolValue())
3555 return std::make_pair(C1.sdiv(C2), true);
3557 if (!C2.getBoolValue())
3559 return std::make_pair(C1.srem(C2), true);
3561 return std::make_pair(APInt(1, 0), false);
3564 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
3565 EVT VT, const ConstantSDNode *Cst1,
3566 const ConstantSDNode *Cst2) {
3567 if (Cst1->isOpaque() || Cst2->isOpaque())
3570 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
3571 Cst2->getAPIntValue());
3574 return getConstant(Folded.first, DL, VT);
3577 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
3578 const GlobalAddressSDNode *GA,
3580 if (GA->getOpcode() != ISD::GlobalAddress)
3582 if (!TLI->isOffsetFoldingLegal(GA))
3584 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2);
3587 int64_t Offset = Cst2->getSExtValue();
3589 case ISD::ADD: break;
3590 case ISD::SUB: Offset = -uint64_t(Offset); break;
3591 default: return SDValue();
3593 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT,
3594 GA->getOffset() + uint64_t(Offset));
3597 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
3598 EVT VT, SDNode *Cst1,
3600 // If the opcode is a target-specific ISD node, there's nothing we can
3601 // do here and the operand rules may not line up with the below, so
3603 if (Opcode >= ISD::BUILTIN_OP_END)
3606 // Handle the case of two scalars.
3607 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
3608 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
3609 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2);
3610 assert((!Folded || !VT.isVector()) &&
3611 "Can't fold vectors ops with scalar operands");
3616 // fold (add Sym, c) -> Sym+c
3617 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1))
3618 return FoldSymbolOffset(Opcode, VT, GA, Cst2);
3619 if (isCommutativeBinOp(Opcode))
3620 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2))
3621 return FoldSymbolOffset(Opcode, VT, GA, Cst1);
3623 // For vectors extract each constant element into Inputs so we can constant
3624 // fold them individually.
3625 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
3626 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
3630 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
3632 EVT SVT = VT.getScalarType();
3633 SmallVector<SDValue, 4> Outputs;
3634 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
3635 SDValue V1 = BV1->getOperand(I);
3636 SDValue V2 = BV2->getOperand(I);
3638 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
3639 // FIXME: This is valid and could be handled by truncation.
3640 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
3643 // Fold one vector element.
3644 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
3646 // Scalar folding only succeeded if the result is a constant or UNDEF.
3647 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
3648 ScalarResult.getOpcode() != ISD::ConstantFP)
3650 Outputs.push_back(ScalarResult);
3653 assert(VT.getVectorNumElements() == Outputs.size() &&
3654 "Vector size mismatch!");
3656 // We may have a vector type but a scalar result. Create a splat.
3657 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3659 // Build a big vector out of the scalar elements we generated.
3660 return getBuildVector(VT, SDLoc(), Outputs);
3663 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
3664 const SDLoc &DL, EVT VT,
3665 ArrayRef<SDValue> Ops,
3666 const SDNodeFlags *Flags) {
3667 // If the opcode is a target-specific ISD node, there's nothing we can
3668 // do here and the operand rules may not line up with the below, so
3670 if (Opcode >= ISD::BUILTIN_OP_END)
3673 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
3677 unsigned NumElts = VT.getVectorNumElements();
3679 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
3680 return !Op.getValueType().isVector() ||
3681 Op.getValueType().getVectorNumElements() == NumElts;
3684 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
3685 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
3686 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
3687 (BV && BV->isConstant());
3690 // All operands must be vector types with the same number of elements as
3691 // the result type and must be either UNDEF or a build vector of constant
3692 // or UNDEF scalars.
3693 if (!all_of(Ops, IsConstantBuildVectorOrUndef) ||
3694 !all_of(Ops, IsScalarOrSameVectorSize))
3697 // If we are comparing vectors, then the result needs to be a i1 boolean
3698 // that is then sign-extended back to the legal result type.
3699 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
3701 // Find legal integer scalar type for constant promotion and
3702 // ensure that its scalar size is at least as large as source.
3703 EVT LegalSVT = VT.getScalarType();
3704 if (LegalSVT.isInteger()) {
3705 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3706 if (LegalSVT.bitsLT(VT.getScalarType()))
3710 // Constant fold each scalar lane separately.
3711 SmallVector<SDValue, 4> ScalarResults;
3712 for (unsigned i = 0; i != NumElts; i++) {
3713 SmallVector<SDValue, 4> ScalarOps;
3714 for (SDValue Op : Ops) {
3715 EVT InSVT = Op.getValueType().getScalarType();
3716 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
3718 // We've checked that this is UNDEF or a constant of some kind.
3720 ScalarOps.push_back(getUNDEF(InSVT));
3722 ScalarOps.push_back(Op);
3726 SDValue ScalarOp = InBV->getOperand(i);
3727 EVT ScalarVT = ScalarOp.getValueType();
3729 // Build vector (integer) scalar operands may need implicit
3730 // truncation - do this before constant folding.
3731 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
3732 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
3734 ScalarOps.push_back(ScalarOp);
3737 // Constant fold the scalar operands.
3738 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
3740 // Legalize the (integer) scalar constant if necessary.
3741 if (LegalSVT != SVT)
3742 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
3744 // Scalar folding only succeeded if the result is a constant or UNDEF.
3745 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
3746 ScalarResult.getOpcode() != ISD::ConstantFP)
3748 ScalarResults.push_back(ScalarResult);
3751 return getBuildVector(VT, DL, ScalarResults);
3754 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3755 SDValue N1, SDValue N2,
3756 const SDNodeFlags *Flags) {
3757 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3758 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3759 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3760 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3762 // Canonicalize constant to RHS if commutative.
3763 if (isCommutativeBinOp(Opcode)) {
3765 std::swap(N1C, N2C);
3767 } else if (N1CFP && !N2CFP) {
3768 std::swap(N1CFP, N2CFP);
3775 case ISD::TokenFactor:
3776 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3777 N2.getValueType() == MVT::Other && "Invalid token factor!");
3778 // Fold trivial token factors.
3779 if (N1.getOpcode() == ISD::EntryToken) return N2;
3780 if (N2.getOpcode() == ISD::EntryToken) return N1;
3781 if (N1 == N2) return N1;
3783 case ISD::CONCAT_VECTORS: {
3784 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
3785 SDValue Ops[] = {N1, N2};
3786 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
3791 assert(VT.isInteger() && "This operator does not apply to FP types!");
3792 assert(N1.getValueType() == N2.getValueType() &&
3793 N1.getValueType() == VT && "Binary operator types must match!");
3794 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
3795 // worth handling here.
3796 if (N2C && N2C->isNullValue())
3798 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
3805 assert(VT.isInteger() && "This operator does not apply to FP types!");
3806 assert(N1.getValueType() == N2.getValueType() &&
3807 N1.getValueType() == VT && "Binary operator types must match!");
3808 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
3809 // it's worth handling here.
3810 if (N2C && N2C->isNullValue())
3824 assert(VT.isInteger() && "This operator does not apply to FP types!");
3825 assert(N1.getValueType() == N2.getValueType() &&
3826 N1.getValueType() == VT && "Binary operator types must match!");
3833 if (getTarget().Options.UnsafeFPMath) {
3834 if (Opcode == ISD::FADD) {
3836 if (N2CFP && N2CFP->getValueAPF().isZero())
3838 } else if (Opcode == ISD::FSUB) {
3840 if (N2CFP && N2CFP->getValueAPF().isZero())
3842 } else if (Opcode == ISD::FMUL) {
3844 if (N2CFP && N2CFP->isZero())
3847 if (N2CFP && N2CFP->isExactlyValue(1.0))
3851 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3852 assert(N1.getValueType() == N2.getValueType() &&
3853 N1.getValueType() == VT && "Binary operator types must match!");
3855 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3856 assert(N1.getValueType() == VT &&
3857 N1.getValueType().isFloatingPoint() &&
3858 N2.getValueType().isFloatingPoint() &&
3859 "Invalid FCOPYSIGN!");
3866 assert(VT == N1.getValueType() &&
3867 "Shift operators return type must be the same as their first arg");
3868 assert(VT.isInteger() && N2.getValueType().isInteger() &&
3869 "Shifts only work on integers");
3870 assert((!VT.isVector() || VT == N2.getValueType()) &&
3871 "Vector shift amounts must be in the same as their first arg");
3872 // Verify that the shift amount VT is bit enough to hold valid shift
3873 // amounts. This catches things like trying to shift an i1024 value by an
3874 // i8, which is easy to fall into in generic code that uses
3875 // TLI.getShiftAmount().
3876 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
3877 "Invalid use of small shift amount with oversized value!");
3879 // Always fold shifts of i1 values so the code generator doesn't need to
3880 // handle them. Since we know the size of the shift has to be less than the
3881 // size of the value, the shift/rotate count is guaranteed to be zero.
3884 if (N2C && N2C->isNullValue())
3887 case ISD::FP_ROUND_INREG: {
3888 EVT EVT = cast<VTSDNode>(N2)->getVT();
3889 assert(VT == N1.getValueType() && "Not an inreg round!");
3890 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3891 "Cannot FP_ROUND_INREG integer types");
3892 assert(EVT.isVector() == VT.isVector() &&
3893 "FP_ROUND_INREG type should be vector iff the operand "
3895 assert((!EVT.isVector() ||
3896 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3897 "Vector element counts must match in FP_ROUND_INREG");
3898 assert(EVT.bitsLE(VT) && "Not rounding down!");
3900 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3904 assert(VT.isFloatingPoint() &&
3905 N1.getValueType().isFloatingPoint() &&
3906 VT.bitsLE(N1.getValueType()) &&
3907 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
3908 "Invalid FP_ROUND!");
3909 if (N1.getValueType() == VT) return N1; // noop conversion.
3911 case ISD::AssertSext:
3912 case ISD::AssertZext: {
3913 EVT EVT = cast<VTSDNode>(N2)->getVT();
3914 assert(VT == N1.getValueType() && "Not an inreg extend!");
3915 assert(VT.isInteger() && EVT.isInteger() &&
3916 "Cannot *_EXTEND_INREG FP types");
3917 assert(!EVT.isVector() &&
3918 "AssertSExt/AssertZExt type should be the vector element type "
3919 "rather than the vector type!");
3920 assert(EVT.bitsLE(VT) && "Not extending!");
3921 if (VT == EVT) return N1; // noop assertion.
3924 case ISD::SIGN_EXTEND_INREG: {
3925 EVT EVT = cast<VTSDNode>(N2)->getVT();
3926 assert(VT == N1.getValueType() && "Not an inreg extend!");
3927 assert(VT.isInteger() && EVT.isInteger() &&
3928 "Cannot *_EXTEND_INREG FP types");
3929 assert(EVT.isVector() == VT.isVector() &&
3930 "SIGN_EXTEND_INREG type should be vector iff the operand "
3932 assert((!EVT.isVector() ||
3933 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3934 "Vector element counts must match in SIGN_EXTEND_INREG");
3935 assert(EVT.bitsLE(VT) && "Not extending!");
3936 if (EVT == VT) return N1; // Not actually extending
3938 auto SignExtendInReg = [&](APInt Val) {
3939 unsigned FromBits = EVT.getScalarSizeInBits();
3940 Val <<= Val.getBitWidth() - FromBits;
3941 Val = Val.ashr(Val.getBitWidth() - FromBits);
3942 return getConstant(Val, DL, VT.getScalarType());
3946 const APInt &Val = N1C->getAPIntValue();
3947 return SignExtendInReg(Val);
3949 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
3950 SmallVector<SDValue, 8> Ops;
3951 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
3952 SDValue Op = N1.getOperand(i);
3954 Ops.push_back(getUNDEF(VT.getScalarType()));
3957 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3958 APInt Val = C->getAPIntValue();
3959 Val = Val.zextOrTrunc(VT.getScalarSizeInBits());
3960 Ops.push_back(SignExtendInReg(Val));
3965 if (Ops.size() == VT.getVectorNumElements())
3966 return getBuildVector(VT, DL, Ops);
3970 case ISD::EXTRACT_VECTOR_ELT:
3971 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3973 return getUNDEF(VT);
3975 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
3976 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
3977 return getUNDEF(VT);
3979 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3980 // expanding copies of large vectors from registers.
3982 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3983 N1.getNumOperands() > 0) {
3985 N1.getOperand(0).getValueType().getVectorNumElements();
3986 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3987 N1.getOperand(N2C->getZExtValue() / Factor),
3988 getConstant(N2C->getZExtValue() % Factor, DL,
3989 N2.getValueType()));
3992 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3993 // expanding large vector constants.
3994 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3995 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3997 if (VT != Elt.getValueType())
3998 // If the vector element type is not legal, the BUILD_VECTOR operands
3999 // are promoted and implicitly truncated, and the result implicitly
4000 // extended. Make that explicit here.
4001 Elt = getAnyExtOrTrunc(Elt, DL, VT);
4006 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
4007 // operations are lowered to scalars.
4008 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
4009 // If the indices are the same, return the inserted element else
4010 // if the indices are known different, extract the element from
4011 // the original vector.
4012 SDValue N1Op2 = N1.getOperand(2);
4013 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
4015 if (N1Op2C && N2C) {
4016 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
4017 if (VT == N1.getOperand(1).getValueType())
4018 return N1.getOperand(1);
4020 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
4023 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
4027 case ISD::EXTRACT_ELEMENT:
4028 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
4029 assert(!N1.getValueType().isVector() && !VT.isVector() &&
4030 (N1.getValueType().isInteger() == VT.isInteger()) &&
4031 N1.getValueType() != VT &&
4032 "Wrong types for EXTRACT_ELEMENT!");
4034 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
4035 // 64-bit integers into 32-bit parts. Instead of building the extract of
4036 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
4037 if (N1.getOpcode() == ISD::BUILD_PAIR)
4038 return N1.getOperand(N2C->getZExtValue());
4040 // EXTRACT_ELEMENT of a constant int is also very common.
4042 unsigned ElementSize = VT.getSizeInBits();
4043 unsigned Shift = ElementSize * N2C->getZExtValue();
4044 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
4045 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
4048 case ISD::EXTRACT_SUBVECTOR:
4049 if (VT.isSimple() && N1.getValueType().isSimple()) {
4050 assert(VT.isVector() && N1.getValueType().isVector() &&
4051 "Extract subvector VTs must be a vectors!");
4052 assert(VT.getVectorElementType() ==
4053 N1.getValueType().getVectorElementType() &&
4054 "Extract subvector VTs must have the same element type!");
4055 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
4056 "Extract subvector must be from larger vector to smaller vector!");
4059 assert((VT.getVectorNumElements() + N2C->getZExtValue()
4060 <= N1.getValueType().getVectorNumElements())
4061 && "Extract subvector overflow!");
4064 // Trivial extraction.
4065 if (VT.getSimpleVT() == N1.getSimpleValueType())
4068 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
4069 // during shuffle legalization.
4070 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
4071 VT == N1.getOperand(1).getValueType())
4072 return N1.getOperand(1);
4077 // Perform trivial constant folding.
4079 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
4082 // Constant fold FP operations.
4083 bool HasFPExceptions = TLI->hasFloatingPointExceptions();
4086 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
4087 APFloat::opStatus s;
4090 s = V1.add(V2, APFloat::rmNearestTiesToEven);
4091 if (!HasFPExceptions || s != APFloat::opInvalidOp)
4092 return getConstantFP(V1, DL, VT);
4095 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
4096 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4097 return getConstantFP(V1, DL, VT);
4100 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
4101 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4102 return getConstantFP(V1, DL, VT);
4105 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
4106 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4107 s!=APFloat::opDivByZero)) {
4108 return getConstantFP(V1, DL, VT);
4113 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4114 s!=APFloat::opDivByZero)) {
4115 return getConstantFP(V1, DL, VT);
4118 case ISD::FCOPYSIGN:
4120 return getConstantFP(V1, DL, VT);
4125 if (Opcode == ISD::FP_ROUND) {
4126 APFloat V = N1CFP->getValueAPF(); // make copy
4128 // This can return overflow, underflow, or inexact; we don't care.
4129 // FIXME need to be more flexible about rounding mode.
4130 (void)V.convert(EVTToAPFloatSemantics(VT),
4131 APFloat::rmNearestTiesToEven, &ignored);
4132 return getConstantFP(V, DL, VT);
4136 // Canonicalize an UNDEF to the RHS, even over a constant.
4138 if (isCommutativeBinOp(Opcode)) {
4142 case ISD::FP_ROUND_INREG:
4143 case ISD::SIGN_EXTEND_INREG:
4149 return N1; // fold op(undef, arg2) -> undef
4157 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
4158 // For vectors, we can't easily build an all zero vector, just return
4165 // Fold a bunch of operators when the RHS is undef.
4170 // Handle undef ^ undef -> 0 special case. This is a common
4172 return getConstant(0, DL, VT);
4182 return N2; // fold op(arg1, undef) -> undef
4188 if (getTarget().Options.UnsafeFPMath)
4196 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
4197 // For vectors, we can't easily build an all zero vector, just return
4202 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
4203 // For vectors, we can't easily build an all one vector, just return
4211 // Memoize this node if possible.
4213 SDVTList VTs = getVTList(VT);
4214 if (VT != MVT::Glue) {
4215 SDValue Ops[] = {N1, N2};
4216 FoldingSetNodeID ID;
4217 AddNodeIDNode(ID, Opcode, VTs, Ops);
4219 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4221 E->intersectFlagsWith(Flags);
4222 return SDValue(E, 0);
4225 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
4226 CSEMap.InsertNode(N, IP);
4228 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
4232 return SDValue(N, 0);
4235 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4236 SDValue N1, SDValue N2, SDValue N3) {
4237 // Perform various simplifications.
4240 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4241 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4242 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
4243 if (N1CFP && N2CFP && N3CFP) {
4244 APFloat V1 = N1CFP->getValueAPF();
4245 const APFloat &V2 = N2CFP->getValueAPF();
4246 const APFloat &V3 = N3CFP->getValueAPF();
4247 APFloat::opStatus s =
4248 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
4249 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
4250 return getConstantFP(V1, DL, VT);
4254 case ISD::CONCAT_VECTORS: {
4255 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4256 SDValue Ops[] = {N1, N2, N3};
4257 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4262 // Use FoldSetCC to simplify SETCC's.
4263 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
4265 // Vector constant folding.
4266 SDValue Ops[] = {N1, N2, N3};
4267 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4272 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
4273 if (N1C->getZExtValue())
4274 return N2; // select true, X, Y -> X
4275 return N3; // select false, X, Y -> Y
4278 if (N2 == N3) return N2; // select C, X, X -> X
4280 case ISD::VECTOR_SHUFFLE:
4281 llvm_unreachable("should use getVectorShuffle constructor!");
4282 case ISD::INSERT_VECTOR_ELT: {
4283 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
4284 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
4285 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
4286 return getUNDEF(VT);
4289 case ISD::INSERT_SUBVECTOR: {
4291 if (VT.isSimple() && N1.getValueType().isSimple()
4292 && N2.getValueType().isSimple()) {
4293 assert(VT.isVector() && N1.getValueType().isVector() &&
4294 N2.getValueType().isVector() &&
4295 "Insert subvector VTs must be a vectors");
4296 assert(VT == N1.getValueType() &&
4297 "Dest and insert subvector source types must match!");
4298 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
4299 "Insert subvector must be from smaller vector to larger vector!");
4300 if (isa<ConstantSDNode>(Index)) {
4301 assert((N2.getValueType().getVectorNumElements() +
4302 cast<ConstantSDNode>(Index)->getZExtValue()
4303 <= VT.getVectorNumElements())
4304 && "Insert subvector overflow!");
4307 // Trivial insertion.
4308 if (VT.getSimpleVT() == N2.getSimpleValueType())
4314 // Fold bit_convert nodes from a type to themselves.
4315 if (N1.getValueType() == VT)
4320 // Memoize node if it doesn't produce a flag.
4322 SDVTList VTs = getVTList(VT);
4323 SDValue Ops[] = {N1, N2, N3};
4324 if (VT != MVT::Glue) {
4325 FoldingSetNodeID ID;
4326 AddNodeIDNode(ID, Opcode, VTs, Ops);
4328 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4329 return SDValue(E, 0);
4331 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4332 createOperands(N, Ops);
4333 CSEMap.InsertNode(N, IP);
4335 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4336 createOperands(N, Ops);
4340 return SDValue(N, 0);
4343 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4344 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
4345 SDValue Ops[] = { N1, N2, N3, N4 };
4346 return getNode(Opcode, DL, VT, Ops);
4349 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4350 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
4352 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4353 return getNode(Opcode, DL, VT, Ops);
4356 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
4357 /// the incoming stack arguments to be loaded from the stack.
4358 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
4359 SmallVector<SDValue, 8> ArgChains;
4361 // Include the original chain at the beginning of the list. When this is
4362 // used by target LowerCall hooks, this helps legalize find the
4363 // CALLSEQ_BEGIN node.
4364 ArgChains.push_back(Chain);
4366 // Add a chain value for each stack argument.
4367 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
4368 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
4369 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
4370 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
4371 if (FI->getIndex() < 0)
4372 ArgChains.push_back(SDValue(L, 1));
4374 // Build a tokenfactor for all the chains.
4375 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
4378 /// getMemsetValue - Vectorized representation of the memset value
4380 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
4382 assert(!Value.isUndef());
4384 unsigned NumBits = VT.getScalarSizeInBits();
4385 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
4386 assert(C->getAPIntValue().getBitWidth() == 8);
4387 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
4389 return DAG.getConstant(Val, dl, VT);
4390 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
4394 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
4395 EVT IntVT = VT.getScalarType();
4396 if (!IntVT.isInteger())
4397 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
4399 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
4401 // Use a multiplication with 0x010101... to extend the input to the
4403 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
4404 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
4405 DAG.getConstant(Magic, dl, IntVT));
4408 if (VT != Value.getValueType() && !VT.isInteger())
4409 Value = DAG.getBitcast(VT.getScalarType(), Value);
4410 if (VT != Value.getValueType())
4411 Value = DAG.getSplatBuildVector(VT, dl, Value);
4416 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
4417 /// used when a memcpy is turned into a memset when the source is a constant
4419 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
4420 const TargetLowering &TLI, StringRef Str) {
4421 // Handle vector with all elements zero.
4424 return DAG.getConstant(0, dl, VT);
4425 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
4426 return DAG.getConstantFP(0.0, dl, VT);
4427 else if (VT.isVector()) {
4428 unsigned NumElts = VT.getVectorNumElements();
4429 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
4430 return DAG.getNode(ISD::BITCAST, dl, VT,
4431 DAG.getConstant(0, dl,
4432 EVT::getVectorVT(*DAG.getContext(),
4435 llvm_unreachable("Expected type!");
4438 assert(!VT.isVector() && "Can't handle vector type here!");
4439 unsigned NumVTBits = VT.getSizeInBits();
4440 unsigned NumVTBytes = NumVTBits / 8;
4441 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
4443 APInt Val(NumVTBits, 0);
4444 if (DAG.getDataLayout().isLittleEndian()) {
4445 for (unsigned i = 0; i != NumBytes; ++i)
4446 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
4448 for (unsigned i = 0; i != NumBytes; ++i)
4449 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
4452 // If the "cost" of materializing the integer immediate is less than the cost
4453 // of a load, then it is cost effective to turn the load into the immediate.
4454 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
4455 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
4456 return DAG.getConstant(Val, dl, VT);
4457 return SDValue(nullptr, 0);
4460 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
4462 EVT VT = Base.getValueType();
4463 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
4466 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
4468 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
4469 uint64_t SrcDelta = 0;
4470 GlobalAddressSDNode *G = nullptr;
4471 if (Src.getOpcode() == ISD::GlobalAddress)
4472 G = cast<GlobalAddressSDNode>(Src);
4473 else if (Src.getOpcode() == ISD::ADD &&
4474 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
4475 Src.getOperand(1).getOpcode() == ISD::Constant) {
4476 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
4477 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
4482 return getConstantStringInfo(G->getGlobal(), Str,
4483 SrcDelta + G->getOffset(), false);
4486 /// Determines the optimal series of memory ops to replace the memset / memcpy.
4487 /// Return true if the number of memory ops is below the threshold (Limit).
4488 /// It returns the types of the sequence of memory ops to perform
4489 /// memset / memcpy by reference.
4490 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
4491 unsigned Limit, uint64_t Size,
4492 unsigned DstAlign, unsigned SrcAlign,
4497 unsigned DstAS, unsigned SrcAS,
4499 const TargetLowering &TLI) {
4500 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
4501 "Expecting memcpy / memset source to meet alignment requirement!");
4502 // If 'SrcAlign' is zero, that means the memory operation does not need to
4503 // load the value, i.e. memset or memcpy from constant string. Otherwise,
4504 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
4505 // is the specified alignment of the memory operation. If it is zero, that
4506 // means it's possible to change the alignment of the destination.
4507 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
4508 // not need to be loaded.
4509 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
4510 IsMemset, ZeroMemset, MemcpyStrSrc,
4511 DAG.getMachineFunction());
4513 if (VT == MVT::Other) {
4514 if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(DstAS) ||
4515 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) {
4516 VT = TLI.getPointerTy(DAG.getDataLayout(), DstAS);
4518 switch (DstAlign & 7) {
4519 case 0: VT = MVT::i64; break;
4520 case 4: VT = MVT::i32; break;
4521 case 2: VT = MVT::i16; break;
4522 default: VT = MVT::i8; break;
4527 while (!TLI.isTypeLegal(LVT))
4528 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
4529 assert(LVT.isInteger());
4535 unsigned NumMemOps = 0;
4537 unsigned VTSize = VT.getSizeInBits() / 8;
4538 while (VTSize > Size) {
4539 // For now, only use non-vector load / store's for the left-over pieces.
4544 if (VT.isVector() || VT.isFloatingPoint()) {
4545 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
4546 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
4547 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
4549 else if (NewVT == MVT::i64 &&
4550 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
4551 TLI.isSafeMemOpType(MVT::f64)) {
4552 // i64 is usually not legal on 32-bit targets, but f64 may be.
4560 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
4561 if (NewVT == MVT::i8)
4563 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
4565 NewVTSize = NewVT.getSizeInBits() / 8;
4567 // If the new VT cannot cover all of the remaining bits, then consider
4568 // issuing a (or a pair of) unaligned and overlapping load / store.
4569 // FIXME: Only does this for 64-bit or more since we don't have proper
4570 // cost model for unaligned load / store.
4572 if (NumMemOps && AllowOverlap &&
4573 VTSize >= 8 && NewVTSize < Size &&
4574 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast)
4582 if (++NumMemOps > Limit)
4585 MemOps.push_back(VT);
4592 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
4593 // On Darwin, -Os means optimize for size without hurting performance, so
4594 // only really optimize for size when -Oz (MinSize) is used.
4595 if (MF.getTarget().getTargetTriple().isOSDarwin())
4596 return MF.getFunction()->optForMinSize();
4597 return MF.getFunction()->optForSize();
4600 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
4601 SDValue Chain, SDValue Dst, SDValue Src,
4602 uint64_t Size, unsigned Align,
4603 bool isVol, bool AlwaysInline,
4604 MachinePointerInfo DstPtrInfo,
4605 MachinePointerInfo SrcPtrInfo) {
4606 // Turn a memcpy of undef to nop.
4610 // Expand memcpy to a series of load and store ops if the size operand falls
4611 // below a certain threshold.
4612 // TODO: In the AlwaysInline case, if the size is big then generate a loop
4613 // rather than maybe a humongous number of loads and stores.
4614 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4615 std::vector<EVT> MemOps;
4616 bool DstAlignCanChange = false;
4617 MachineFunction &MF = DAG.getMachineFunction();
4618 MachineFrameInfo &MFI = MF.getFrameInfo();
4619 bool OptSize = shouldLowerMemFuncForSize(MF);
4620 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4621 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4622 DstAlignCanChange = true;
4623 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4624 if (Align > SrcAlign)
4627 bool CopyFromStr = isMemSrcFromString(Src, Str);
4628 bool isZeroStr = CopyFromStr && Str.empty();
4629 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
4631 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4632 (DstAlignCanChange ? 0 : Align),
4633 (isZeroStr ? 0 : SrcAlign),
4634 false, false, CopyFromStr, true,
4635 DstPtrInfo.getAddrSpace(),
4636 SrcPtrInfo.getAddrSpace(),
4640 if (DstAlignCanChange) {
4641 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4642 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4644 // Don't promote to an alignment that would require dynamic stack
4646 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
4647 if (!TRI->needsStackRealignment(MF))
4648 while (NewAlign > Align &&
4649 DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
4652 if (NewAlign > Align) {
4653 // Give the stack frame object a larger alignment if needed.
4654 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4655 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4660 MachineMemOperand::Flags MMOFlags =
4661 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
4662 SmallVector<SDValue, 8> OutChains;
4663 unsigned NumMemOps = MemOps.size();
4664 uint64_t SrcOff = 0, DstOff = 0;
4665 for (unsigned i = 0; i != NumMemOps; ++i) {
4667 unsigned VTSize = VT.getSizeInBits() / 8;
4668 SDValue Value, Store;
4670 if (VTSize > Size) {
4671 // Issuing an unaligned load / store pair that overlaps with the previous
4672 // pair. Adjust the offset accordingly.
4673 assert(i == NumMemOps-1 && i != 0);
4674 SrcOff -= VTSize - Size;
4675 DstOff -= VTSize - Size;
4679 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
4680 // It's unlikely a store of a vector immediate can be done in a single
4681 // instruction. It would require a load from a constantpool first.
4682 // We only handle zero vectors here.
4683 // FIXME: Handle other cases where store of vector immediate is done in
4684 // a single instruction.
4685 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
4686 if (Value.getNode())
4687 Store = DAG.getStore(Chain, dl, Value,
4688 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4689 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
4692 if (!Store.getNode()) {
4693 // The type might not be legal for the target. This should only happen
4694 // if the type is smaller than a legal type, as on PPC, so the right
4695 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
4696 // to Load/Store if NVT==VT.
4697 // FIXME does the case above also need this?
4698 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
4699 assert(NVT.bitsGE(VT));
4700 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
4701 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
4702 SrcPtrInfo.getWithOffset(SrcOff), VT,
4703 MinAlign(SrcAlign, SrcOff), MMOFlags);
4704 OutChains.push_back(Value.getValue(1));
4705 Store = DAG.getTruncStore(
4706 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4707 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
4709 OutChains.push_back(Store);
4715 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4718 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
4719 SDValue Chain, SDValue Dst, SDValue Src,
4720 uint64_t Size, unsigned Align,
4721 bool isVol, bool AlwaysInline,
4722 MachinePointerInfo DstPtrInfo,
4723 MachinePointerInfo SrcPtrInfo) {
4724 // Turn a memmove of undef to nop.
4728 // Expand memmove to a series of load and store ops if the size operand falls
4729 // below a certain threshold.
4730 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4731 std::vector<EVT> MemOps;
4732 bool DstAlignCanChange = false;
4733 MachineFunction &MF = DAG.getMachineFunction();
4734 MachineFrameInfo &MFI = MF.getFrameInfo();
4735 bool OptSize = shouldLowerMemFuncForSize(MF);
4736 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4737 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4738 DstAlignCanChange = true;
4739 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4740 if (Align > SrcAlign)
4742 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4744 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4745 (DstAlignCanChange ? 0 : Align), SrcAlign,
4746 false, false, false, false,
4747 DstPtrInfo.getAddrSpace(),
4748 SrcPtrInfo.getAddrSpace(),
4752 if (DstAlignCanChange) {
4753 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4754 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4755 if (NewAlign > Align) {
4756 // Give the stack frame object a larger alignment if needed.
4757 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4758 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4763 MachineMemOperand::Flags MMOFlags =
4764 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
4765 uint64_t SrcOff = 0, DstOff = 0;
4766 SmallVector<SDValue, 8> LoadValues;
4767 SmallVector<SDValue, 8> LoadChains;
4768 SmallVector<SDValue, 8> OutChains;
4769 unsigned NumMemOps = MemOps.size();
4770 for (unsigned i = 0; i < NumMemOps; i++) {
4772 unsigned VTSize = VT.getSizeInBits() / 8;
4776 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
4777 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, MMOFlags);
4778 LoadValues.push_back(Value);
4779 LoadChains.push_back(Value.getValue(1));
4782 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4784 for (unsigned i = 0; i < NumMemOps; i++) {
4786 unsigned VTSize = VT.getSizeInBits() / 8;
4789 Store = DAG.getStore(Chain, dl, LoadValues[i],
4790 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4791 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
4792 OutChains.push_back(Store);
4796 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4799 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4802 /// \param DAG Selection DAG where lowered code is placed.
4803 /// \param dl Link to corresponding IR location.
4804 /// \param Chain Control flow dependency.
4805 /// \param Dst Pointer to destination memory location.
4806 /// \param Src Value of byte to write into the memory.
4807 /// \param Size Number of bytes to write.
4808 /// \param Align Alignment of the destination in bytes.
4809 /// \param isVol True if destination is volatile.
4810 /// \param DstPtrInfo IR information on the memory pointer.
4811 /// \returns New head in the control flow, if lowering was successful, empty
4812 /// SDValue otherwise.
4814 /// The function tries to replace 'llvm.memset' intrinsic with several store
4815 /// operations and value calculation code. This is usually profitable for small
4817 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
4818 SDValue Chain, SDValue Dst, SDValue Src,
4819 uint64_t Size, unsigned Align, bool isVol,
4820 MachinePointerInfo DstPtrInfo) {
4821 // Turn a memset of undef to nop.
4825 // Expand memset to a series of load/store ops if the size operand
4826 // falls below a certain threshold.
4827 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4828 std::vector<EVT> MemOps;
4829 bool DstAlignCanChange = false;
4830 MachineFunction &MF = DAG.getMachineFunction();
4831 MachineFrameInfo &MFI = MF.getFrameInfo();
4832 bool OptSize = shouldLowerMemFuncForSize(MF);
4833 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4834 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4835 DstAlignCanChange = true;
4837 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4838 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4839 Size, (DstAlignCanChange ? 0 : Align), 0,
4840 true, IsZeroVal, false, true,
4841 DstPtrInfo.getAddrSpace(), ~0u,
4845 if (DstAlignCanChange) {
4846 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4847 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4848 if (NewAlign > Align) {
4849 // Give the stack frame object a larger alignment if needed.
4850 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4851 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4856 SmallVector<SDValue, 8> OutChains;
4857 uint64_t DstOff = 0;
4858 unsigned NumMemOps = MemOps.size();
4860 // Find the largest store and generate the bit pattern for it.
4861 EVT LargestVT = MemOps[0];
4862 for (unsigned i = 1; i < NumMemOps; i++)
4863 if (MemOps[i].bitsGT(LargestVT))
4864 LargestVT = MemOps[i];
4865 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4867 for (unsigned i = 0; i < NumMemOps; i++) {
4869 unsigned VTSize = VT.getSizeInBits() / 8;
4870 if (VTSize > Size) {
4871 // Issuing an unaligned load / store pair that overlaps with the previous
4872 // pair. Adjust the offset accordingly.
4873 assert(i == NumMemOps-1 && i != 0);
4874 DstOff -= VTSize - Size;
4877 // If this store is smaller than the largest store see whether we can get
4878 // the smaller value for free with a truncate.
4879 SDValue Value = MemSetValue;
4880 if (VT.bitsLT(LargestVT)) {
4881 if (!LargestVT.isVector() && !VT.isVector() &&
4882 TLI.isTruncateFree(LargestVT, VT))
4883 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4885 Value = getMemsetValue(Src, VT, DAG, dl);
4887 assert(Value.getValueType() == VT && "Value with wrong type.");
4888 SDValue Store = DAG.getStore(
4889 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4890 DstPtrInfo.getWithOffset(DstOff), Align,
4891 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
4892 OutChains.push_back(Store);
4893 DstOff += VT.getSizeInBits() / 8;
4897 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4900 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
4902 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
4903 // pointer operands can be losslessly bitcasted to pointers of address space 0
4904 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
4905 report_fatal_error("cannot lower memory intrinsic in address space " +
4910 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
4911 SDValue Src, SDValue Size, unsigned Align,
4912 bool isVol, bool AlwaysInline, bool isTailCall,
4913 MachinePointerInfo DstPtrInfo,
4914 MachinePointerInfo SrcPtrInfo) {
4915 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4917 // Check to see if we should lower the memcpy to loads and stores first.
4918 // For cases within the target-specified limits, this is the best choice.
4919 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4921 // Memcpy with size zero? Just return the original chain.
4922 if (ConstantSize->isNullValue())
4925 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4926 ConstantSize->getZExtValue(),Align,
4927 isVol, false, DstPtrInfo, SrcPtrInfo);
4928 if (Result.getNode())
4932 // Then check to see if we should lower the memcpy with target-specific
4933 // code. If the target chooses to do this, this is the next best.
4935 SDValue Result = TSI->EmitTargetCodeForMemcpy(
4936 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
4937 DstPtrInfo, SrcPtrInfo);
4938 if (Result.getNode())
4942 // If we really need inline code and the target declined to provide it,
4943 // use a (potentially long) sequence of loads and stores.
4945 assert(ConstantSize && "AlwaysInline requires a constant size!");
4946 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4947 ConstantSize->getZExtValue(), Align, isVol,
4948 true, DstPtrInfo, SrcPtrInfo);
4951 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4952 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4954 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4955 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4956 // respect volatile, so they may do things like read or write memory
4957 // beyond the given memory regions. But fixing this isn't easy, and most
4958 // people don't care.
4960 // Emit a library call.
4961 TargetLowering::ArgListTy Args;
4962 TargetLowering::ArgListEntry Entry;
4963 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
4964 Entry.Node = Dst; Args.push_back(Entry);
4965 Entry.Node = Src; Args.push_back(Entry);
4966 Entry.Node = Size; Args.push_back(Entry);
4967 // FIXME: pass in SDLoc
4968 TargetLowering::CallLoweringInfo CLI(*this);
4971 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4972 Dst.getValueType().getTypeForEVT(*getContext()),
4973 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4974 TLI->getPointerTy(getDataLayout())),
4977 .setTailCall(isTailCall);
4979 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4980 return CallResult.second;
4983 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
4984 SDValue Src, SDValue Size, unsigned Align,
4985 bool isVol, bool isTailCall,
4986 MachinePointerInfo DstPtrInfo,
4987 MachinePointerInfo SrcPtrInfo) {
4988 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4990 // Check to see if we should lower the memmove to loads and stores first.
4991 // For cases within the target-specified limits, this is the best choice.
4992 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4994 // Memmove with size zero? Just return the original chain.
4995 if (ConstantSize->isNullValue())
4999 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
5000 ConstantSize->getZExtValue(), Align, isVol,
5001 false, DstPtrInfo, SrcPtrInfo);
5002 if (Result.getNode())
5006 // Then check to see if we should lower the memmove with target-specific
5007 // code. If the target chooses to do this, this is the next best.
5009 SDValue Result = TSI->EmitTargetCodeForMemmove(
5010 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
5011 if (Result.getNode())
5015 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5016 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
5018 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
5019 // not be safe. See memcpy above for more details.
5021 // Emit a library call.
5022 TargetLowering::ArgListTy Args;
5023 TargetLowering::ArgListEntry Entry;
5024 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5025 Entry.Node = Dst; Args.push_back(Entry);
5026 Entry.Node = Src; Args.push_back(Entry);
5027 Entry.Node = Size; Args.push_back(Entry);
5028 // FIXME: pass in SDLoc
5029 TargetLowering::CallLoweringInfo CLI(*this);
5032 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
5033 Dst.getValueType().getTypeForEVT(*getContext()),
5034 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
5035 TLI->getPointerTy(getDataLayout())),
5038 .setTailCall(isTailCall);
5040 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5041 return CallResult.second;
5044 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
5045 SDValue Src, SDValue Size, unsigned Align,
5046 bool isVol, bool isTailCall,
5047 MachinePointerInfo DstPtrInfo) {
5048 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5050 // Check to see if we should lower the memset to stores first.
5051 // For cases within the target-specified limits, this is the best choice.
5052 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5054 // Memset with size zero? Just return the original chain.
5055 if (ConstantSize->isNullValue())
5059 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
5060 Align, isVol, DstPtrInfo);
5062 if (Result.getNode())
5066 // Then check to see if we should lower the memset with target-specific
5067 // code. If the target chooses to do this, this is the next best.
5069 SDValue Result = TSI->EmitTargetCodeForMemset(
5070 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
5071 if (Result.getNode())
5075 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5077 // Emit a library call.
5078 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
5079 TargetLowering::ArgListTy Args;
5080 TargetLowering::ArgListEntry Entry;
5081 Entry.Node = Dst; Entry.Ty = IntPtrTy;
5082 Args.push_back(Entry);
5084 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
5085 Args.push_back(Entry);
5087 Entry.Ty = IntPtrTy;
5088 Args.push_back(Entry);
5090 // FIXME: pass in SDLoc
5091 TargetLowering::CallLoweringInfo CLI(*this);
5094 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
5095 Dst.getValueType().getTypeForEVT(*getContext()),
5096 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
5097 TLI->getPointerTy(getDataLayout())),
5100 .setTailCall(isTailCall);
5102 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5103 return CallResult.second;
5106 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5107 SDVTList VTList, ArrayRef<SDValue> Ops,
5108 MachineMemOperand *MMO) {
5109 FoldingSetNodeID ID;
5110 ID.AddInteger(MemVT.getRawBits());
5111 AddNodeIDNode(ID, Opcode, VTList, Ops);
5112 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5114 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5115 cast<AtomicSDNode>(E)->refineAlignment(MMO);
5116 return SDValue(E, 0);
5119 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5120 VTList, MemVT, MMO);
5121 createOperands(N, Ops);
5123 CSEMap.InsertNode(N, IP);
5125 return SDValue(N, 0);
5128 SDValue SelectionDAG::getAtomicCmpSwap(
5129 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
5130 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
5131 unsigned Alignment, AtomicOrdering SuccessOrdering,
5132 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
5133 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5134 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
5135 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5137 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5138 Alignment = getEVTAlignment(MemVT);
5140 MachineFunction &MF = getMachineFunction();
5142 // FIXME: Volatile isn't really correct; we should keep track of atomic
5143 // orderings in the memoperand.
5144 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad |
5145 MachineMemOperand::MOStore;
5146 MachineMemOperand *MMO =
5147 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
5148 AAMDNodes(), nullptr, SynchScope, SuccessOrdering,
5151 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
5154 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
5155 EVT MemVT, SDVTList VTs, SDValue Chain,
5156 SDValue Ptr, SDValue Cmp, SDValue Swp,
5157 MachineMemOperand *MMO) {
5158 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5159 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
5160 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5162 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
5163 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5166 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5167 SDValue Chain, SDValue Ptr, SDValue Val,
5168 const Value *PtrVal, unsigned Alignment,
5169 AtomicOrdering Ordering,
5170 SynchronizationScope SynchScope) {
5171 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5172 Alignment = getEVTAlignment(MemVT);
5174 MachineFunction &MF = getMachineFunction();
5175 // An atomic store does not load. An atomic load does not store.
5176 // (An atomicrmw obviously both loads and stores.)
5177 // For now, atomics are considered to be volatile always, and they are
5179 // FIXME: Volatile isn't really correct; we should keep track of atomic
5180 // orderings in the memoperand.
5181 auto Flags = MachineMemOperand::MOVolatile;
5182 if (Opcode != ISD::ATOMIC_STORE)
5183 Flags |= MachineMemOperand::MOLoad;
5184 if (Opcode != ISD::ATOMIC_LOAD)
5185 Flags |= MachineMemOperand::MOStore;
5187 MachineMemOperand *MMO =
5188 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
5189 MemVT.getStoreSize(), Alignment, AAMDNodes(),
5190 nullptr, SynchScope, Ordering);
5192 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
5195 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5196 SDValue Chain, SDValue Ptr, SDValue Val,
5197 MachineMemOperand *MMO) {
5198 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
5199 Opcode == ISD::ATOMIC_LOAD_SUB ||
5200 Opcode == ISD::ATOMIC_LOAD_AND ||
5201 Opcode == ISD::ATOMIC_LOAD_OR ||
5202 Opcode == ISD::ATOMIC_LOAD_XOR ||
5203 Opcode == ISD::ATOMIC_LOAD_NAND ||
5204 Opcode == ISD::ATOMIC_LOAD_MIN ||
5205 Opcode == ISD::ATOMIC_LOAD_MAX ||
5206 Opcode == ISD::ATOMIC_LOAD_UMIN ||
5207 Opcode == ISD::ATOMIC_LOAD_UMAX ||
5208 Opcode == ISD::ATOMIC_SWAP ||
5209 Opcode == ISD::ATOMIC_STORE) &&
5210 "Invalid Atomic Op");
5212 EVT VT = Val.getValueType();
5214 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
5215 getVTList(VT, MVT::Other);
5216 SDValue Ops[] = {Chain, Ptr, Val};
5217 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5220 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5221 EVT VT, SDValue Chain, SDValue Ptr,
5222 MachineMemOperand *MMO) {
5223 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
5225 SDVTList VTs = getVTList(VT, MVT::Other);
5226 SDValue Ops[] = {Chain, Ptr};
5227 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5230 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
5231 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
5232 if (Ops.size() == 1)
5235 SmallVector<EVT, 4> VTs;
5236 VTs.reserve(Ops.size());
5237 for (unsigned i = 0; i < Ops.size(); ++i)
5238 VTs.push_back(Ops[i].getValueType());
5239 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
5242 SDValue SelectionDAG::getMemIntrinsicNode(
5243 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
5244 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol,
5245 bool ReadMem, bool WriteMem, unsigned Size) {
5246 if (Align == 0) // Ensure that codegen never sees alignment 0
5247 Align = getEVTAlignment(MemVT);
5249 MachineFunction &MF = getMachineFunction();
5250 auto Flags = MachineMemOperand::MONone;
5252 Flags |= MachineMemOperand::MOStore;
5254 Flags |= MachineMemOperand::MOLoad;
5256 Flags |= MachineMemOperand::MOVolatile;
5258 Size = MemVT.getStoreSize();
5259 MachineMemOperand *MMO =
5260 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
5262 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
5265 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
5267 ArrayRef<SDValue> Ops, EVT MemVT,
5268 MachineMemOperand *MMO) {
5269 assert((Opcode == ISD::INTRINSIC_VOID ||
5270 Opcode == ISD::INTRINSIC_W_CHAIN ||
5271 Opcode == ISD::PREFETCH ||
5272 Opcode == ISD::LIFETIME_START ||
5273 Opcode == ISD::LIFETIME_END ||
5274 (Opcode <= INT_MAX &&
5275 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
5276 "Opcode is not a memory-accessing opcode!");
5278 // Memoize the node unless it returns a flag.
5279 MemIntrinsicSDNode *N;
5280 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5281 FoldingSetNodeID ID;
5282 AddNodeIDNode(ID, Opcode, VTList, Ops);
5283 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5285 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5286 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
5287 return SDValue(E, 0);
5290 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5291 VTList, MemVT, MMO);
5292 createOperands(N, Ops);
5294 CSEMap.InsertNode(N, IP);
5296 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5297 VTList, MemVT, MMO);
5298 createOperands(N, Ops);
5301 return SDValue(N, 0);
5304 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5305 /// MachinePointerInfo record from it. This is particularly useful because the
5306 /// code generator has many cases where it doesn't bother passing in a
5307 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5308 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr,
5309 int64_t Offset = 0) {
5310 // If this is FI+Offset, we can model it.
5311 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
5312 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
5313 FI->getIndex(), Offset);
5315 // If this is (FI+Offset1)+Offset2, we can model it.
5316 if (Ptr.getOpcode() != ISD::ADD ||
5317 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
5318 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
5319 return MachinePointerInfo();
5321 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5322 return MachinePointerInfo::getFixedStack(
5323 DAG.getMachineFunction(), FI,
5324 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
5327 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5328 /// MachinePointerInfo record from it. This is particularly useful because the
5329 /// code generator has many cases where it doesn't bother passing in a
5330 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5331 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr,
5333 // If the 'Offset' value isn't a constant, we can't handle this.
5334 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
5335 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue());
5336 if (OffsetOp.isUndef())
5337 return InferPointerInfo(DAG, Ptr);
5338 return MachinePointerInfo();
5341 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5342 EVT VT, const SDLoc &dl, SDValue Chain,
5343 SDValue Ptr, SDValue Offset,
5344 MachinePointerInfo PtrInfo, EVT MemVT,
5346 MachineMemOperand::Flags MMOFlags,
5347 const AAMDNodes &AAInfo, const MDNode *Ranges) {
5348 assert(Chain.getValueType() == MVT::Other &&
5349 "Invalid chain type");
5350 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5351 Alignment = getEVTAlignment(MemVT);
5353 MMOFlags |= MachineMemOperand::MOLoad;
5354 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
5355 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
5357 if (PtrInfo.V.isNull())
5358 PtrInfo = InferPointerInfo(*this, Ptr, Offset);
5360 MachineFunction &MF = getMachineFunction();
5361 MachineMemOperand *MMO = MF.getMachineMemOperand(
5362 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
5363 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
5366 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5367 EVT VT, const SDLoc &dl, SDValue Chain,
5368 SDValue Ptr, SDValue Offset, EVT MemVT,
5369 MachineMemOperand *MMO) {
5371 ExtType = ISD::NON_EXTLOAD;
5372 } else if (ExtType == ISD::NON_EXTLOAD) {
5373 assert(VT == MemVT && "Non-extending load from different memory type!");
5376 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
5377 "Should only be an extending load, not truncating!");
5378 assert(VT.isInteger() == MemVT.isInteger() &&
5379 "Cannot convert from FP to Int or Int -> FP!");
5380 assert(VT.isVector() == MemVT.isVector() &&
5381 "Cannot use an ext load to convert to or from a vector!");
5382 assert((!VT.isVector() ||
5383 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
5384 "Cannot use an ext load to change the number of vector elements!");
5387 bool Indexed = AM != ISD::UNINDEXED;
5388 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
5390 SDVTList VTs = Indexed ?
5391 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
5392 SDValue Ops[] = { Chain, Ptr, Offset };
5393 FoldingSetNodeID ID;
5394 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
5395 ID.AddInteger(MemVT.getRawBits());
5396 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
5397 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
5398 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5400 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5401 cast<LoadSDNode>(E)->refineAlignment(MMO);
5402 return SDValue(E, 0);
5404 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5405 ExtType, MemVT, MMO);
5406 createOperands(N, Ops);
5408 CSEMap.InsertNode(N, IP);
5410 return SDValue(N, 0);
5413 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5414 SDValue Ptr, MachinePointerInfo PtrInfo,
5416 MachineMemOperand::Flags MMOFlags,
5417 const AAMDNodes &AAInfo, const MDNode *Ranges) {
5418 SDValue Undef = getUNDEF(Ptr.getValueType());
5419 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5420 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
5423 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5424 SDValue Ptr, MachineMemOperand *MMO) {
5425 SDValue Undef = getUNDEF(Ptr.getValueType());
5426 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5430 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
5431 EVT VT, SDValue Chain, SDValue Ptr,
5432 MachinePointerInfo PtrInfo, EVT MemVT,
5434 MachineMemOperand::Flags MMOFlags,
5435 const AAMDNodes &AAInfo) {
5436 SDValue Undef = getUNDEF(Ptr.getValueType());
5437 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
5438 MemVT, Alignment, MMOFlags, AAInfo);
5441 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
5442 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
5443 MachineMemOperand *MMO) {
5444 SDValue Undef = getUNDEF(Ptr.getValueType());
5445 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
5449 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
5450 SDValue Base, SDValue Offset,
5451 ISD::MemIndexedMode AM) {
5452 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
5453 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
5454 // Don't propagate the invariant or dereferenceable flags.
5456 LD->getMemOperand()->getFlags() &
5457 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
5458 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
5459 LD->getChain(), Base, Offset, LD->getPointerInfo(),
5460 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
5464 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5465 SDValue Ptr, MachinePointerInfo PtrInfo,
5467 MachineMemOperand::Flags MMOFlags,
5468 const AAMDNodes &AAInfo) {
5469 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
5470 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5471 Alignment = getEVTAlignment(Val.getValueType());
5473 MMOFlags |= MachineMemOperand::MOStore;
5474 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
5476 if (PtrInfo.V.isNull())
5477 PtrInfo = InferPointerInfo(*this, Ptr);
5479 MachineFunction &MF = getMachineFunction();
5480 MachineMemOperand *MMO = MF.getMachineMemOperand(
5481 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
5482 return getStore(Chain, dl, Val, Ptr, MMO);
5485 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5486 SDValue Ptr, MachineMemOperand *MMO) {
5487 assert(Chain.getValueType() == MVT::Other &&
5488 "Invalid chain type");
5489 EVT VT = Val.getValueType();
5490 SDVTList VTs = getVTList(MVT::Other);
5491 SDValue Undef = getUNDEF(Ptr.getValueType());
5492 SDValue Ops[] = { Chain, Val, Ptr, Undef };
5493 FoldingSetNodeID ID;
5494 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5495 ID.AddInteger(VT.getRawBits());
5496 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
5497 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
5498 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5500 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5501 cast<StoreSDNode>(E)->refineAlignment(MMO);
5502 return SDValue(E, 0);
5504 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5505 ISD::UNINDEXED, false, VT, MMO);
5506 createOperands(N, Ops);
5508 CSEMap.InsertNode(N, IP);
5510 return SDValue(N, 0);
5513 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5514 SDValue Ptr, MachinePointerInfo PtrInfo,
5515 EVT SVT, unsigned Alignment,
5516 MachineMemOperand::Flags MMOFlags,
5517 const AAMDNodes &AAInfo) {
5518 assert(Chain.getValueType() == MVT::Other &&
5519 "Invalid chain type");
5520 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5521 Alignment = getEVTAlignment(SVT);
5523 MMOFlags |= MachineMemOperand::MOStore;
5524 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
5526 if (PtrInfo.V.isNull())
5527 PtrInfo = InferPointerInfo(*this, Ptr);
5529 MachineFunction &MF = getMachineFunction();
5530 MachineMemOperand *MMO = MF.getMachineMemOperand(
5531 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
5532 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
5535 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5536 SDValue Ptr, EVT SVT,
5537 MachineMemOperand *MMO) {
5538 EVT VT = Val.getValueType();
5540 assert(Chain.getValueType() == MVT::Other &&
5541 "Invalid chain type");
5543 return getStore(Chain, dl, Val, Ptr, MMO);
5545 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
5546 "Should only be a truncating store, not extending!");
5547 assert(VT.isInteger() == SVT.isInteger() &&
5548 "Can't do FP-INT conversion!");
5549 assert(VT.isVector() == SVT.isVector() &&
5550 "Cannot use trunc store to convert to or from a vector!");
5551 assert((!VT.isVector() ||
5552 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
5553 "Cannot use trunc store to change the number of vector elements!");
5555 SDVTList VTs = getVTList(MVT::Other);
5556 SDValue Undef = getUNDEF(Ptr.getValueType());
5557 SDValue Ops[] = { Chain, Val, Ptr, Undef };
5558 FoldingSetNodeID ID;
5559 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5560 ID.AddInteger(SVT.getRawBits());
5561 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
5562 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
5563 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5565 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5566 cast<StoreSDNode>(E)->refineAlignment(MMO);
5567 return SDValue(E, 0);
5569 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5570 ISD::UNINDEXED, true, SVT, MMO);
5571 createOperands(N, Ops);
5573 CSEMap.InsertNode(N, IP);
5575 return SDValue(N, 0);
5578 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
5579 SDValue Base, SDValue Offset,
5580 ISD::MemIndexedMode AM) {
5581 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
5582 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
5583 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
5584 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
5585 FoldingSetNodeID ID;
5586 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5587 ID.AddInteger(ST->getMemoryVT().getRawBits());
5588 ID.AddInteger(ST->getRawSubclassData());
5589 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
5591 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
5592 return SDValue(E, 0);
5594 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5595 ST->isTruncatingStore(), ST->getMemoryVT(),
5596 ST->getMemOperand());
5597 createOperands(N, Ops);
5599 CSEMap.InsertNode(N, IP);
5601 return SDValue(N, 0);
5604 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5605 SDValue Ptr, SDValue Mask, SDValue Src0,
5606 EVT MemVT, MachineMemOperand *MMO,
5607 ISD::LoadExtType ExtTy, bool isExpanding) {
5609 SDVTList VTs = getVTList(VT, MVT::Other);
5610 SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
5611 FoldingSetNodeID ID;
5612 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
5613 ID.AddInteger(VT.getRawBits());
5614 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
5615 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
5616 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5618 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5619 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
5620 return SDValue(E, 0);
5622 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5623 ExtTy, isExpanding, MemVT, MMO);
5624 createOperands(N, Ops);
5626 CSEMap.InsertNode(N, IP);
5628 return SDValue(N, 0);
5631 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
5632 SDValue Val, SDValue Ptr, SDValue Mask,
5633 EVT MemVT, MachineMemOperand *MMO,
5634 bool IsTruncating, bool IsCompressing) {
5635 assert(Chain.getValueType() == MVT::Other &&
5636 "Invalid chain type");
5637 EVT VT = Val.getValueType();
5638 SDVTList VTs = getVTList(MVT::Other);
5639 SDValue Ops[] = { Chain, Ptr, Mask, Val };
5640 FoldingSetNodeID ID;
5641 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
5642 ID.AddInteger(VT.getRawBits());
5643 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
5644 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
5645 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5647 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5648 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
5649 return SDValue(E, 0);
5651 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5652 IsTruncating, IsCompressing, MemVT, MMO);
5653 createOperands(N, Ops);
5655 CSEMap.InsertNode(N, IP);
5657 return SDValue(N, 0);
5660 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
5661 ArrayRef<SDValue> Ops,
5662 MachineMemOperand *MMO) {
5663 assert(Ops.size() == 5 && "Incompatible number of operands");
5665 FoldingSetNodeID ID;
5666 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
5667 ID.AddInteger(VT.getRawBits());
5668 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
5669 dl.getIROrder(), VTs, VT, MMO));
5670 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5672 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5673 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
5674 return SDValue(E, 0);
5677 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
5679 createOperands(N, Ops);
5681 assert(N->getValue().getValueType() == N->getValueType(0) &&
5682 "Incompatible type of the PassThru value in MaskedGatherSDNode");
5683 assert(N->getMask().getValueType().getVectorNumElements() ==
5684 N->getValueType(0).getVectorNumElements() &&
5685 "Vector width mismatch between mask and data");
5686 assert(N->getIndex().getValueType().getVectorNumElements() ==
5687 N->getValueType(0).getVectorNumElements() &&
5688 "Vector width mismatch between index and data");
5690 CSEMap.InsertNode(N, IP);
5692 return SDValue(N, 0);
5695 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
5696 ArrayRef<SDValue> Ops,
5697 MachineMemOperand *MMO) {
5698 assert(Ops.size() == 5 && "Incompatible number of operands");
5700 FoldingSetNodeID ID;
5701 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
5702 ID.AddInteger(VT.getRawBits());
5703 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
5704 dl.getIROrder(), VTs, VT, MMO));
5705 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5707 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5708 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
5709 return SDValue(E, 0);
5711 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
5713 createOperands(N, Ops);
5715 assert(N->getMask().getValueType().getVectorNumElements() ==
5716 N->getValue().getValueType().getVectorNumElements() &&
5717 "Vector width mismatch between mask and data");
5718 assert(N->getIndex().getValueType().getVectorNumElements() ==
5719 N->getValue().getValueType().getVectorNumElements() &&
5720 "Vector width mismatch between index and data");
5722 CSEMap.InsertNode(N, IP);
5724 return SDValue(N, 0);
5727 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
5728 SDValue Ptr, SDValue SV, unsigned Align) {
5729 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
5730 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
5733 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5734 ArrayRef<SDUse> Ops) {
5735 switch (Ops.size()) {
5736 case 0: return getNode(Opcode, DL, VT);
5737 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
5738 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5739 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5743 // Copy from an SDUse array into an SDValue array for use with
5744 // the regular getNode logic.
5745 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
5746 return getNode(Opcode, DL, VT, NewOps);
5749 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5750 ArrayRef<SDValue> Ops, const SDNodeFlags *Flags) {
5751 unsigned NumOps = Ops.size();
5753 case 0: return getNode(Opcode, DL, VT);
5754 case 1: return getNode(Opcode, DL, VT, Ops[0]);
5755 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
5756 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5762 case ISD::CONCAT_VECTORS: {
5763 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
5764 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
5768 case ISD::SELECT_CC: {
5769 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
5770 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
5771 "LHS and RHS of condition must have same type!");
5772 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5773 "True and False arms of SelectCC must have same type!");
5774 assert(Ops[2].getValueType() == VT &&
5775 "select_cc node must be of same type as true and false value!");
5779 assert(NumOps == 5 && "BR_CC takes 5 operands!");
5780 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5781 "LHS/RHS of comparison should match types!");
5788 SDVTList VTs = getVTList(VT);
5790 if (VT != MVT::Glue) {
5791 FoldingSetNodeID ID;
5792 AddNodeIDNode(ID, Opcode, VTs, Ops);
5795 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5796 return SDValue(E, 0);
5798 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5799 createOperands(N, Ops);
5801 CSEMap.InsertNode(N, IP);
5803 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5804 createOperands(N, Ops);
5808 return SDValue(N, 0);
5811 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
5812 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5813 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5816 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5817 ArrayRef<SDValue> Ops) {
5818 if (VTList.NumVTs == 1)
5819 return getNode(Opcode, DL, VTList.VTs[0], Ops);
5823 // FIXME: figure out how to safely handle things like
5824 // int foo(int x) { return 1 << (x & 255); }
5825 // int bar() { return foo(256); }
5826 case ISD::SRA_PARTS:
5827 case ISD::SRL_PARTS:
5828 case ISD::SHL_PARTS:
5829 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5830 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5831 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5832 else if (N3.getOpcode() == ISD::AND)
5833 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5834 // If the and is only masking out bits that cannot effect the shift,
5835 // eliminate the and.
5836 unsigned NumBits = VT.getScalarSizeInBits()*2;
5837 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5838 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5844 // Memoize the node unless it returns a flag.
5846 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5847 FoldingSetNodeID ID;
5848 AddNodeIDNode(ID, Opcode, VTList, Ops);
5850 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5851 return SDValue(E, 0);
5853 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
5854 createOperands(N, Ops);
5855 CSEMap.InsertNode(N, IP);
5857 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
5858 createOperands(N, Ops);
5861 return SDValue(N, 0);
5864 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
5866 return getNode(Opcode, DL, VTList, None);
5869 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5871 SDValue Ops[] = { N1 };
5872 return getNode(Opcode, DL, VTList, Ops);
5875 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5876 SDValue N1, SDValue N2) {
5877 SDValue Ops[] = { N1, N2 };
5878 return getNode(Opcode, DL, VTList, Ops);
5881 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5882 SDValue N1, SDValue N2, SDValue N3) {
5883 SDValue Ops[] = { N1, N2, N3 };
5884 return getNode(Opcode, DL, VTList, Ops);
5887 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5888 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5889 SDValue Ops[] = { N1, N2, N3, N4 };
5890 return getNode(Opcode, DL, VTList, Ops);
5893 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5894 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5896 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5897 return getNode(Opcode, DL, VTList, Ops);
5900 SDVTList SelectionDAG::getVTList(EVT VT) {
5901 return makeVTList(SDNode::getValueTypeList(VT), 1);
5904 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5905 FoldingSetNodeID ID;
5907 ID.AddInteger(VT1.getRawBits());
5908 ID.AddInteger(VT2.getRawBits());
5911 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5913 EVT *Array = Allocator.Allocate<EVT>(2);
5916 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5917 VTListMap.InsertNode(Result, IP);
5919 return Result->getSDVTList();
5922 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5923 FoldingSetNodeID ID;
5925 ID.AddInteger(VT1.getRawBits());
5926 ID.AddInteger(VT2.getRawBits());
5927 ID.AddInteger(VT3.getRawBits());
5930 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5932 EVT *Array = Allocator.Allocate<EVT>(3);
5936 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5937 VTListMap.InsertNode(Result, IP);
5939 return Result->getSDVTList();
5942 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5943 FoldingSetNodeID ID;
5945 ID.AddInteger(VT1.getRawBits());
5946 ID.AddInteger(VT2.getRawBits());
5947 ID.AddInteger(VT3.getRawBits());
5948 ID.AddInteger(VT4.getRawBits());
5951 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5953 EVT *Array = Allocator.Allocate<EVT>(4);
5958 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5959 VTListMap.InsertNode(Result, IP);
5961 return Result->getSDVTList();
5964 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5965 unsigned NumVTs = VTs.size();
5966 FoldingSetNodeID ID;
5967 ID.AddInteger(NumVTs);
5968 for (unsigned index = 0; index < NumVTs; index++) {
5969 ID.AddInteger(VTs[index].getRawBits());
5973 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5975 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5976 std::copy(VTs.begin(), VTs.end(), Array);
5977 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5978 VTListMap.InsertNode(Result, IP);
5980 return Result->getSDVTList();
5984 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5985 /// specified operands. If the resultant node already exists in the DAG,
5986 /// this does not modify the specified node, instead it returns the node that
5987 /// already exists. If the resultant node does not exist in the DAG, the
5988 /// input node is returned. As a degenerate case, if you specify the same
5989 /// input operands as the node already has, the input node is returned.
5990 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5991 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5993 // Check to see if there is no change.
5994 if (Op == N->getOperand(0)) return N;
5996 // See if the modified node already exists.
5997 void *InsertPos = nullptr;
5998 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
6001 // Nope it doesn't. Remove the node from its current place in the maps.
6003 if (!RemoveNodeFromCSEMaps(N))
6004 InsertPos = nullptr;
6006 // Now we update the operands.
6007 N->OperandList[0].set(Op);
6009 // If this gets put into a CSE map, add it.
6010 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6014 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
6015 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
6017 // Check to see if there is no change.
6018 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
6019 return N; // No operands changed, just return the input node.
6021 // See if the modified node already exists.
6022 void *InsertPos = nullptr;
6023 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
6026 // Nope it doesn't. Remove the node from its current place in the maps.
6028 if (!RemoveNodeFromCSEMaps(N))
6029 InsertPos = nullptr;
6031 // Now we update the operands.
6032 if (N->OperandList[0] != Op1)
6033 N->OperandList[0].set(Op1);
6034 if (N->OperandList[1] != Op2)
6035 N->OperandList[1].set(Op2);
6037 // If this gets put into a CSE map, add it.
6038 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6042 SDNode *SelectionDAG::
6043 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
6044 SDValue Ops[] = { Op1, Op2, Op3 };
6045 return UpdateNodeOperands(N, Ops);
6048 SDNode *SelectionDAG::
6049 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
6050 SDValue Op3, SDValue Op4) {
6051 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
6052 return UpdateNodeOperands(N, Ops);
6055 SDNode *SelectionDAG::
6056 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
6057 SDValue Op3, SDValue Op4, SDValue Op5) {
6058 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
6059 return UpdateNodeOperands(N, Ops);
6062 SDNode *SelectionDAG::
6063 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
6064 unsigned NumOps = Ops.size();
6065 assert(N->getNumOperands() == NumOps &&
6066 "Update with wrong number of operands");
6068 // If no operands changed just return the input node.
6069 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
6072 // See if the modified node already exists.
6073 void *InsertPos = nullptr;
6074 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
6077 // Nope it doesn't. Remove the node from its current place in the maps.
6079 if (!RemoveNodeFromCSEMaps(N))
6080 InsertPos = nullptr;
6082 // Now we update the operands.
6083 for (unsigned i = 0; i != NumOps; ++i)
6084 if (N->OperandList[i] != Ops[i])
6085 N->OperandList[i].set(Ops[i]);
6087 // If this gets put into a CSE map, add it.
6088 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6092 /// DropOperands - Release the operands and set this node to have
6094 void SDNode::DropOperands() {
6095 // Unlike the code in MorphNodeTo that does this, we don't need to
6096 // watch for dead nodes here.
6097 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
6103 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
6106 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6108 SDVTList VTs = getVTList(VT);
6109 return SelectNodeTo(N, MachineOpc, VTs, None);
6112 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6113 EVT VT, SDValue Op1) {
6114 SDVTList VTs = getVTList(VT);
6115 SDValue Ops[] = { Op1 };
6116 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6119 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6120 EVT VT, SDValue Op1,
6122 SDVTList VTs = getVTList(VT);
6123 SDValue Ops[] = { Op1, Op2 };
6124 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6127 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6128 EVT VT, SDValue Op1,
6129 SDValue Op2, SDValue Op3) {
6130 SDVTList VTs = getVTList(VT);
6131 SDValue Ops[] = { Op1, Op2, Op3 };
6132 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6135 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6136 EVT VT, ArrayRef<SDValue> Ops) {
6137 SDVTList VTs = getVTList(VT);
6138 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6141 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6142 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
6143 SDVTList VTs = getVTList(VT1, VT2);
6144 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6147 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6149 SDVTList VTs = getVTList(VT1, VT2);
6150 return SelectNodeTo(N, MachineOpc, VTs, None);
6153 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6154 EVT VT1, EVT VT2, EVT VT3,
6155 ArrayRef<SDValue> Ops) {
6156 SDVTList VTs = getVTList(VT1, VT2, VT3);
6157 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6160 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6162 SDValue Op1, SDValue Op2) {
6163 SDVTList VTs = getVTList(VT1, VT2);
6164 SDValue Ops[] = { Op1, Op2 };
6165 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6168 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6169 SDVTList VTs,ArrayRef<SDValue> Ops) {
6170 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
6171 // Reset the NodeID to -1.
6174 ReplaceAllUsesWith(N, New);
6180 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
6181 /// the line number information on the merged node since it is not possible to
6182 /// preserve the information that operation is associated with multiple lines.
6183 /// This will make the debugger working better at -O0, were there is a higher
6184 /// probability having other instructions associated with that line.
6186 /// For IROrder, we keep the smaller of the two
6187 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
6188 DebugLoc NLoc = N->getDebugLoc();
6189 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
6190 N->setDebugLoc(DebugLoc());
6192 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
6193 N->setIROrder(Order);
6197 /// MorphNodeTo - This *mutates* the specified node to have the specified
6198 /// return type, opcode, and operands.
6200 /// Note that MorphNodeTo returns the resultant node. If there is already a
6201 /// node of the specified opcode and operands, it returns that node instead of
6202 /// the current one. Note that the SDLoc need not be the same.
6204 /// Using MorphNodeTo is faster than creating a new node and swapping it in
6205 /// with ReplaceAllUsesWith both because it often avoids allocating a new
6206 /// node, and because it doesn't require CSE recalculation for any of
6207 /// the node's users.
6209 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
6210 /// As a consequence it isn't appropriate to use from within the DAG combiner or
6211 /// the legalizer which maintain worklists that would need to be updated when
6212 /// deleting things.
6213 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
6214 SDVTList VTs, ArrayRef<SDValue> Ops) {
6215 // If an identical node already exists, use it.
6217 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
6218 FoldingSetNodeID ID;
6219 AddNodeIDNode(ID, Opc, VTs, Ops);
6220 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
6221 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
6224 if (!RemoveNodeFromCSEMaps(N))
6227 // Start the morphing.
6229 N->ValueList = VTs.VTs;
6230 N->NumValues = VTs.NumVTs;
6232 // Clear the operands list, updating used nodes to remove this from their
6233 // use list. Keep track of any operands that become dead as a result.
6234 SmallPtrSet<SDNode*, 16> DeadNodeSet;
6235 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
6237 SDNode *Used = Use.getNode();
6239 if (Used->use_empty())
6240 DeadNodeSet.insert(Used);
6243 // For MachineNode, initialize the memory references information.
6244 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
6245 MN->setMemRefs(nullptr, nullptr);
6247 // Swap for an appropriately sized array from the recycler.
6249 createOperands(N, Ops);
6251 // Delete any nodes that are still dead after adding the uses for the
6253 if (!DeadNodeSet.empty()) {
6254 SmallVector<SDNode *, 16> DeadNodes;
6255 for (SDNode *N : DeadNodeSet)
6257 DeadNodes.push_back(N);
6258 RemoveDeadNodes(DeadNodes);
6262 CSEMap.InsertNode(N, IP); // Memoize the new node.
6267 /// getMachineNode - These are used for target selectors to create a new node
6268 /// with specified return type(s), MachineInstr opcode, and operands.
6270 /// Note that getMachineNode returns the resultant node. If there is already a
6271 /// node of the specified opcode and operands, it returns that node instead of
6272 /// the current one.
6273 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6275 SDVTList VTs = getVTList(VT);
6276 return getMachineNode(Opcode, dl, VTs, None);
6279 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6280 EVT VT, SDValue Op1) {
6281 SDVTList VTs = getVTList(VT);
6282 SDValue Ops[] = { Op1 };
6283 return getMachineNode(Opcode, dl, VTs, Ops);
6286 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6287 EVT VT, SDValue Op1, SDValue Op2) {
6288 SDVTList VTs = getVTList(VT);
6289 SDValue Ops[] = { Op1, Op2 };
6290 return getMachineNode(Opcode, dl, VTs, Ops);
6293 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6294 EVT VT, SDValue Op1, SDValue Op2,
6296 SDVTList VTs = getVTList(VT);
6297 SDValue Ops[] = { Op1, Op2, Op3 };
6298 return getMachineNode(Opcode, dl, VTs, Ops);
6301 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6302 EVT VT, ArrayRef<SDValue> Ops) {
6303 SDVTList VTs = getVTList(VT);
6304 return getMachineNode(Opcode, dl, VTs, Ops);
6307 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6308 EVT VT1, EVT VT2, SDValue Op1,
6310 SDVTList VTs = getVTList(VT1, VT2);
6311 SDValue Ops[] = { Op1, Op2 };
6312 return getMachineNode(Opcode, dl, VTs, Ops);
6315 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6316 EVT VT1, EVT VT2, SDValue Op1,
6317 SDValue Op2, SDValue Op3) {
6318 SDVTList VTs = getVTList(VT1, VT2);
6319 SDValue Ops[] = { Op1, Op2, Op3 };
6320 return getMachineNode(Opcode, dl, VTs, Ops);
6323 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6325 ArrayRef<SDValue> Ops) {
6326 SDVTList VTs = getVTList(VT1, VT2);
6327 return getMachineNode(Opcode, dl, VTs, Ops);
6330 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6331 EVT VT1, EVT VT2, EVT VT3,
6332 SDValue Op1, SDValue Op2) {
6333 SDVTList VTs = getVTList(VT1, VT2, VT3);
6334 SDValue Ops[] = { Op1, Op2 };
6335 return getMachineNode(Opcode, dl, VTs, Ops);
6338 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6339 EVT VT1, EVT VT2, EVT VT3,
6340 SDValue Op1, SDValue Op2,
6342 SDVTList VTs = getVTList(VT1, VT2, VT3);
6343 SDValue Ops[] = { Op1, Op2, Op3 };
6344 return getMachineNode(Opcode, dl, VTs, Ops);
6347 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6348 EVT VT1, EVT VT2, EVT VT3,
6349 ArrayRef<SDValue> Ops) {
6350 SDVTList VTs = getVTList(VT1, VT2, VT3);
6351 return getMachineNode(Opcode, dl, VTs, Ops);
6354 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6355 ArrayRef<EVT> ResultTys,
6356 ArrayRef<SDValue> Ops) {
6357 SDVTList VTs = getVTList(ResultTys);
6358 return getMachineNode(Opcode, dl, VTs, Ops);
6361 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
6363 ArrayRef<SDValue> Ops) {
6364 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
6369 FoldingSetNodeID ID;
6370 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
6372 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6373 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
6377 // Allocate a new MachineSDNode.
6378 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6379 createOperands(N, Ops);
6382 CSEMap.InsertNode(N, IP);
6388 /// getTargetExtractSubreg - A convenience function for creating
6389 /// TargetOpcode::EXTRACT_SUBREG nodes.
6390 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
6392 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6393 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
6394 VT, Operand, SRIdxVal);
6395 return SDValue(Subreg, 0);
6398 /// getTargetInsertSubreg - A convenience function for creating
6399 /// TargetOpcode::INSERT_SUBREG nodes.
6400 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
6401 SDValue Operand, SDValue Subreg) {
6402 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6403 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
6404 VT, Operand, Subreg, SRIdxVal);
6405 return SDValue(Result, 0);
6408 /// getNodeIfExists - Get the specified node if it's already available, or
6409 /// else return NULL.
6410 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
6411 ArrayRef<SDValue> Ops,
6412 const SDNodeFlags *Flags) {
6413 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
6414 FoldingSetNodeID ID;
6415 AddNodeIDNode(ID, Opcode, VTList, Ops);
6417 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
6419 E->intersectFlagsWith(Flags);
6426 /// getDbgValue - Creates a SDDbgValue node.
6429 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
6430 unsigned R, bool IsIndirect, uint64_t Off,
6431 const DebugLoc &DL, unsigned O) {
6432 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6433 "Expected inlined-at fields to agree");
6434 return new (DbgInfo->getAlloc())
6435 SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
6439 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
6440 const Value *C, uint64_t Off,
6441 const DebugLoc &DL, unsigned O) {
6442 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6443 "Expected inlined-at fields to agree");
6444 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O);
6448 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
6449 unsigned FI, uint64_t Off,
6452 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6453 "Expected inlined-at fields to agree");
6454 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O);
6459 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
6460 /// pointed to by a use iterator is deleted, increment the use iterator
6461 /// so that it doesn't dangle.
6463 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
6464 SDNode::use_iterator &UI;
6465 SDNode::use_iterator &UE;
6467 void NodeDeleted(SDNode *N, SDNode *E) override {
6468 // Increment the iterator as needed.
6469 while (UI != UE && N == *UI)
6474 RAUWUpdateListener(SelectionDAG &d,
6475 SDNode::use_iterator &ui,
6476 SDNode::use_iterator &ue)
6477 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
6482 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6483 /// This can cause recursive merging of nodes in the DAG.
6485 /// This version assumes From has a single result value.
6487 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
6488 SDNode *From = FromN.getNode();
6489 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
6490 "Cannot replace with this method!");
6491 assert(From != To.getNode() && "Cannot replace uses of with self");
6493 // Preserve Debug Values
6494 TransferDbgValues(FromN, To);
6496 // Iterate over all the existing uses of From. New uses will be added
6497 // to the beginning of the use list, which we avoid visiting.
6498 // This specifically avoids visiting uses of From that arise while the
6499 // replacement is happening, because any such uses would be the result
6500 // of CSE: If an existing node looks like From after one of its operands
6501 // is replaced by To, we don't want to replace of all its users with To
6502 // too. See PR3018 for more info.
6503 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6504 RAUWUpdateListener Listener(*this, UI, UE);
6508 // This node is about to morph, remove its old self from the CSE maps.
6509 RemoveNodeFromCSEMaps(User);
6511 // A user can appear in a use list multiple times, and when this
6512 // happens the uses are usually next to each other in the list.
6513 // To help reduce the number of CSE recomputations, process all
6514 // the uses of this user that we can find this way.
6516 SDUse &Use = UI.getUse();
6519 } while (UI != UE && *UI == User);
6521 // Now that we have modified User, add it back to the CSE maps. If it
6522 // already exists there, recursively merge the results together.
6523 AddModifiedNodeToCSEMaps(User);
6527 // If we just RAUW'd the root, take note.
6528 if (FromN == getRoot())
6532 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6533 /// This can cause recursive merging of nodes in the DAG.
6535 /// This version assumes that for each value of From, there is a
6536 /// corresponding value in To in the same position with the same type.
6538 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
6540 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6541 assert((!From->hasAnyUseOfValue(i) ||
6542 From->getValueType(i) == To->getValueType(i)) &&
6543 "Cannot use this version of ReplaceAllUsesWith!");
6546 // Handle the trivial case.
6550 // Preserve Debug Info. Only do this if there's a use.
6551 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6552 if (From->hasAnyUseOfValue(i)) {
6553 assert((i < To->getNumValues()) && "Invalid To location");
6554 TransferDbgValues(SDValue(From, i), SDValue(To, i));
6557 // Iterate over just the existing users of From. See the comments in
6558 // the ReplaceAllUsesWith above.
6559 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6560 RAUWUpdateListener Listener(*this, UI, UE);
6564 // This node is about to morph, remove its old self from the CSE maps.
6565 RemoveNodeFromCSEMaps(User);
6567 // A user can appear in a use list multiple times, and when this
6568 // happens the uses are usually next to each other in the list.
6569 // To help reduce the number of CSE recomputations, process all
6570 // the uses of this user that we can find this way.
6572 SDUse &Use = UI.getUse();
6575 } while (UI != UE && *UI == User);
6577 // Now that we have modified User, add it back to the CSE maps. If it
6578 // already exists there, recursively merge the results together.
6579 AddModifiedNodeToCSEMaps(User);
6582 // If we just RAUW'd the root, take note.
6583 if (From == getRoot().getNode())
6584 setRoot(SDValue(To, getRoot().getResNo()));
6587 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6588 /// This can cause recursive merging of nodes in the DAG.
6590 /// This version can replace From with any result values. To must match the
6591 /// number and types of values returned by From.
6592 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
6593 if (From->getNumValues() == 1) // Handle the simple case efficiently.
6594 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
6596 // Preserve Debug Info.
6597 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6598 TransferDbgValues(SDValue(From, i), *To);
6600 // Iterate over just the existing users of From. See the comments in
6601 // the ReplaceAllUsesWith above.
6602 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6603 RAUWUpdateListener Listener(*this, UI, UE);
6607 // This node is about to morph, remove its old self from the CSE maps.
6608 RemoveNodeFromCSEMaps(User);
6610 // A user can appear in a use list multiple times, and when this
6611 // happens the uses are usually next to each other in the list.
6612 // To help reduce the number of CSE recomputations, process all
6613 // the uses of this user that we can find this way.
6615 SDUse &Use = UI.getUse();
6616 const SDValue &ToOp = To[Use.getResNo()];
6619 } while (UI != UE && *UI == User);
6621 // Now that we have modified User, add it back to the CSE maps. If it
6622 // already exists there, recursively merge the results together.
6623 AddModifiedNodeToCSEMaps(User);
6626 // If we just RAUW'd the root, take note.
6627 if (From == getRoot().getNode())
6628 setRoot(SDValue(To[getRoot().getResNo()]));
6631 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
6632 /// uses of other values produced by From.getNode() alone. The Deleted
6633 /// vector is handled the same way as for ReplaceAllUsesWith.
6634 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
6635 // Handle the really simple, really trivial case efficiently.
6636 if (From == To) return;
6638 // Handle the simple, trivial, case efficiently.
6639 if (From.getNode()->getNumValues() == 1) {
6640 ReplaceAllUsesWith(From, To);
6644 // Preserve Debug Info.
6645 TransferDbgValues(From, To);
6647 // Iterate over just the existing users of From. See the comments in
6648 // the ReplaceAllUsesWith above.
6649 SDNode::use_iterator UI = From.getNode()->use_begin(),
6650 UE = From.getNode()->use_end();
6651 RAUWUpdateListener Listener(*this, UI, UE);
6654 bool UserRemovedFromCSEMaps = false;
6656 // A user can appear in a use list multiple times, and when this
6657 // happens the uses are usually next to each other in the list.
6658 // To help reduce the number of CSE recomputations, process all
6659 // the uses of this user that we can find this way.
6661 SDUse &Use = UI.getUse();
6663 // Skip uses of different values from the same node.
6664 if (Use.getResNo() != From.getResNo()) {
6669 // If this node hasn't been modified yet, it's still in the CSE maps,
6670 // so remove its old self from the CSE maps.
6671 if (!UserRemovedFromCSEMaps) {
6672 RemoveNodeFromCSEMaps(User);
6673 UserRemovedFromCSEMaps = true;
6678 } while (UI != UE && *UI == User);
6680 // We are iterating over all uses of the From node, so if a use
6681 // doesn't use the specific value, no changes are made.
6682 if (!UserRemovedFromCSEMaps)
6685 // Now that we have modified User, add it back to the CSE maps. If it
6686 // already exists there, recursively merge the results together.
6687 AddModifiedNodeToCSEMaps(User);
6690 // If we just RAUW'd the root, take note.
6691 if (From == getRoot())
6696 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
6697 /// to record information about a use.
6704 /// operator< - Sort Memos by User.
6705 bool operator<(const UseMemo &L, const UseMemo &R) {
6706 return (intptr_t)L.User < (intptr_t)R.User;
6710 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6711 /// uses of other values produced by From.getNode() alone. The same value
6712 /// may appear in both the From and To list. The Deleted vector is
6713 /// handled the same way as for ReplaceAllUsesWith.
6714 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
6717 // Handle the simple, trivial case efficiently.
6719 return ReplaceAllUsesOfValueWith(*From, *To);
6721 TransferDbgValues(*From, *To);
6723 // Read up all the uses and make records of them. This helps
6724 // processing new uses that are introduced during the
6725 // replacement process.
6726 SmallVector<UseMemo, 4> Uses;
6727 for (unsigned i = 0; i != Num; ++i) {
6728 unsigned FromResNo = From[i].getResNo();
6729 SDNode *FromNode = From[i].getNode();
6730 for (SDNode::use_iterator UI = FromNode->use_begin(),
6731 E = FromNode->use_end(); UI != E; ++UI) {
6732 SDUse &Use = UI.getUse();
6733 if (Use.getResNo() == FromResNo) {
6734 UseMemo Memo = { *UI, i, &Use };
6735 Uses.push_back(Memo);
6740 // Sort the uses, so that all the uses from a given User are together.
6741 std::sort(Uses.begin(), Uses.end());
6743 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6744 UseIndex != UseIndexEnd; ) {
6745 // We know that this user uses some value of From. If it is the right
6746 // value, update it.
6747 SDNode *User = Uses[UseIndex].User;
6749 // This node is about to morph, remove its old self from the CSE maps.
6750 RemoveNodeFromCSEMaps(User);
6752 // The Uses array is sorted, so all the uses for a given User
6753 // are next to each other in the list.
6754 // To help reduce the number of CSE recomputations, process all
6755 // the uses of this user that we can find this way.
6757 unsigned i = Uses[UseIndex].Index;
6758 SDUse &Use = *Uses[UseIndex].Use;
6762 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6764 // Now that we have modified User, add it back to the CSE maps. If it
6765 // already exists there, recursively merge the results together.
6766 AddModifiedNodeToCSEMaps(User);
6770 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6771 /// based on their topological order. It returns the maximum id and a vector
6772 /// of the SDNodes* in assigned order by reference.
6773 unsigned SelectionDAG::AssignTopologicalOrder() {
6775 unsigned DAGSize = 0;
6777 // SortedPos tracks the progress of the algorithm. Nodes before it are
6778 // sorted, nodes after it are unsorted. When the algorithm completes
6779 // it is at the end of the list.
6780 allnodes_iterator SortedPos = allnodes_begin();
6782 // Visit all the nodes. Move nodes with no operands to the front of
6783 // the list immediately. Annotate nodes that do have operands with their
6784 // operand count. Before we do this, the Node Id fields of the nodes
6785 // may contain arbitrary values. After, the Node Id fields for nodes
6786 // before SortedPos will contain the topological sort index, and the
6787 // Node Id fields for nodes At SortedPos and after will contain the
6788 // count of outstanding operands.
6789 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6791 checkForCycles(N, this);
6792 unsigned Degree = N->getNumOperands();
6794 // A node with no uses, add it to the result array immediately.
6795 N->setNodeId(DAGSize++);
6796 allnodes_iterator Q(N);
6798 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6799 assert(SortedPos != AllNodes.end() && "Overran node list");
6802 // Temporarily use the Node Id as scratch space for the degree count.
6803 N->setNodeId(Degree);
6807 // Visit all the nodes. As we iterate, move nodes into sorted order,
6808 // such that by the time the end is reached all nodes will be sorted.
6809 for (SDNode &Node : allnodes()) {
6811 checkForCycles(N, this);
6812 // N is in sorted position, so all its uses have one less operand
6813 // that needs to be sorted.
6814 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6817 unsigned Degree = P->getNodeId();
6818 assert(Degree != 0 && "Invalid node degree");
6821 // All of P's operands are sorted, so P may sorted now.
6822 P->setNodeId(DAGSize++);
6823 if (P->getIterator() != SortedPos)
6824 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6825 assert(SortedPos != AllNodes.end() && "Overran node list");
6828 // Update P's outstanding operand count.
6829 P->setNodeId(Degree);
6832 if (Node.getIterator() == SortedPos) {
6834 allnodes_iterator I(N);
6836 dbgs() << "Overran sorted position:\n";
6837 S->dumprFull(this); dbgs() << "\n";
6838 dbgs() << "Checking if this is due to cycles\n";
6839 checkForCycles(this, true);
6841 llvm_unreachable(nullptr);
6845 assert(SortedPos == AllNodes.end() &&
6846 "Topological sort incomplete!");
6847 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6848 "First node in topological sort is not the entry token!");
6849 assert(AllNodes.front().getNodeId() == 0 &&
6850 "First node in topological sort has non-zero id!");
6851 assert(AllNodes.front().getNumOperands() == 0 &&
6852 "First node in topological sort has operands!");
6853 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6854 "Last node in topologic sort has unexpected id!");
6855 assert(AllNodes.back().use_empty() &&
6856 "Last node in topologic sort has users!");
6857 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6861 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6862 /// value is produced by SD.
6863 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6865 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
6866 SD->setHasDebugValue(true);
6868 DbgInfo->add(DB, SD, isParameter);
6871 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes.
6872 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6873 if (From == To || !From.getNode()->getHasDebugValue())
6875 SDNode *FromNode = From.getNode();
6876 SDNode *ToNode = To.getNode();
6877 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6878 SmallVector<SDDbgValue *, 2> ClonedDVs;
6879 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6881 SDDbgValue *Dbg = *I;
6882 // Only add Dbgvalues attached to same ResNo.
6883 if (Dbg->getKind() == SDDbgValue::SDNODE &&
6884 Dbg->getSDNode() == From.getNode() &&
6885 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) {
6886 assert(FromNode != ToNode &&
6887 "Should not transfer Debug Values intranode");
6889 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
6890 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
6891 Dbg->getDebugLoc(), Dbg->getOrder());
6892 ClonedDVs.push_back(Clone);
6893 Dbg->setIsInvalidated();
6896 for (SDDbgValue *I : ClonedDVs)
6897 AddDbgValue(I, ToNode, false);
6900 //===----------------------------------------------------------------------===//
6902 //===----------------------------------------------------------------------===//
6904 bool llvm::isNullConstant(SDValue V) {
6905 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6906 return Const != nullptr && Const->isNullValue();
6909 bool llvm::isNullFPConstant(SDValue V) {
6910 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
6911 return Const != nullptr && Const->isZero() && !Const->isNegative();
6914 bool llvm::isAllOnesConstant(SDValue V) {
6915 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6916 return Const != nullptr && Const->isAllOnesValue();
6919 bool llvm::isOneConstant(SDValue V) {
6920 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6921 return Const != nullptr && Const->isOne();
6924 bool llvm::isBitwiseNot(SDValue V) {
6925 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1));
6928 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) {
6929 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
6932 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
6933 BitVector UndefElements;
6934 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
6936 // BuildVectors can truncate their operands. Ignore that case here.
6937 // FIXME: We blindly ignore splats which include undef which is overly
6939 if (CN && UndefElements.none() &&
6940 CN->getValueType(0) == N.getValueType().getScalarType())
6947 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) {
6948 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
6951 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
6952 BitVector UndefElements;
6953 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
6955 if (CN && UndefElements.none())
6962 HandleSDNode::~HandleSDNode() {
6966 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6968 const GlobalValue *GA, EVT VT,
6969 int64_t o, unsigned char TF)
6970 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6974 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
6975 EVT VT, unsigned SrcAS,
6977 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
6978 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6980 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
6981 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
6982 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6983 MemSDNodeBits.IsVolatile = MMO->isVolatile();
6984 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
6985 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
6986 MemSDNodeBits.IsInvariant = MMO->isInvariant();
6988 // We check here that the size of the memory operand fits within the size of
6989 // the MMO. This is because the MMO might indicate only a possible address
6990 // range instead of specifying the affected memory addresses precisely.
6991 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6994 /// Profile - Gather unique data for the node.
6996 void SDNode::Profile(FoldingSetNodeID &ID) const {
6997 AddNodeIDNode(ID, this);
7002 std::vector<EVT> VTs;
7005 VTs.reserve(MVT::LAST_VALUETYPE);
7006 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
7007 VTs.push_back(MVT((MVT::SimpleValueType)i));
7012 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
7013 static ManagedStatic<EVTArray> SimpleVTArray;
7014 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
7016 /// getValueTypeList - Return a pointer to the specified value type.
7018 const EVT *SDNode::getValueTypeList(EVT VT) {
7019 if (VT.isExtended()) {
7020 sys::SmartScopedLock<true> Lock(*VTMutex);
7021 return &(*EVTs->insert(VT).first);
7023 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
7024 "Value type out of range!");
7025 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
7029 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
7030 /// indicated value. This method ignores uses of other values defined by this
7032 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
7033 assert(Value < getNumValues() && "Bad value!");
7035 // TODO: Only iterate over uses of a given value of the node
7036 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
7037 if (UI.getUse().getResNo() == Value) {
7044 // Found exactly the right number of uses?
7049 /// hasAnyUseOfValue - Return true if there are any use of the indicated
7050 /// value. This method ignores uses of other values defined by this operation.
7051 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
7052 assert(Value < getNumValues() && "Bad value!");
7054 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
7055 if (UI.getUse().getResNo() == Value)
7062 /// isOnlyUserOf - Return true if this node is the only use of N.
7064 bool SDNode::isOnlyUserOf(const SDNode *N) const {
7066 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
7077 /// isOperand - Return true if this node is an operand of N.
7079 bool SDValue::isOperandOf(const SDNode *N) const {
7080 for (const SDValue &Op : N->op_values())
7086 bool SDNode::isOperandOf(const SDNode *N) const {
7087 for (const SDValue &Op : N->op_values())
7088 if (this == Op.getNode())
7093 /// reachesChainWithoutSideEffects - Return true if this operand (which must
7094 /// be a chain) reaches the specified operand without crossing any
7095 /// side-effecting instructions on any chain path. In practice, this looks
7096 /// through token factors and non-volatile loads. In order to remain efficient,
7097 /// this only looks a couple of nodes in, it does not do an exhaustive search.
7098 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
7099 unsigned Depth) const {
7100 if (*this == Dest) return true;
7102 // Don't search too deeply, we just want to be able to see through
7103 // TokenFactor's etc.
7104 if (Depth == 0) return false;
7106 // If this is a token factor, all inputs to the TF happen in parallel. If any
7107 // of the operands of the TF does not reach dest, then we cannot do the xform.
7108 if (getOpcode() == ISD::TokenFactor) {
7109 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
7110 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
7115 // Loads don't have side effects, look through them.
7116 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
7117 if (!Ld->isVolatile())
7118 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
7123 bool SDNode::hasPredecessor(const SDNode *N) const {
7124 SmallPtrSet<const SDNode *, 32> Visited;
7125 SmallVector<const SDNode *, 16> Worklist;
7126 Worklist.push_back(this);
7127 return hasPredecessorHelper(N, Visited, Worklist);
7130 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
7131 assert(Num < NumOperands && "Invalid child # of SDNode!");
7132 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
7135 const SDNodeFlags *SDNode::getFlags() const {
7136 if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
7137 return &FlagsNode->Flags;
7141 void SDNode::intersectFlagsWith(const SDNodeFlags *Flags) {
7142 if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
7143 FlagsNode->Flags.intersectWith(Flags);
7146 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
7147 assert(N->getNumValues() == 1 &&
7148 "Can't unroll a vector with multiple results!");
7150 EVT VT = N->getValueType(0);
7151 unsigned NE = VT.getVectorNumElements();
7152 EVT EltVT = VT.getVectorElementType();
7155 SmallVector<SDValue, 8> Scalars;
7156 SmallVector<SDValue, 4> Operands(N->getNumOperands());
7158 // If ResNE is 0, fully unroll the vector op.
7161 else if (NE > ResNE)
7165 for (i= 0; i != NE; ++i) {
7166 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
7167 SDValue Operand = N->getOperand(j);
7168 EVT OperandVT = Operand.getValueType();
7169 if (OperandVT.isVector()) {
7170 // A vector operand; extract a single element.
7171 EVT OperandEltVT = OperandVT.getVectorElementType();
7173 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
7174 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
7176 // A scalar operand; just use it as is.
7177 Operands[j] = Operand;
7181 switch (N->getOpcode()) {
7183 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
7188 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
7195 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
7196 getShiftAmountOperand(Operands[0].getValueType(),
7199 case ISD::SIGN_EXTEND_INREG:
7200 case ISD::FP_ROUND_INREG: {
7201 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
7202 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
7204 getValueType(ExtVT)));
7209 for (; i < ResNE; ++i)
7210 Scalars.push_back(getUNDEF(EltVT));
7212 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
7213 return getBuildVector(VecVT, dl, Scalars);
7216 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
7220 if (LD->isVolatile() || Base->isVolatile())
7222 if (LD->isIndexed() || Base->isIndexed())
7224 if (LD->getChain() != Base->getChain())
7226 EVT VT = LD->getValueType(0);
7227 if (VT.getSizeInBits() / 8 != Bytes)
7230 SDValue Loc = LD->getOperand(1);
7231 SDValue BaseLoc = Base->getOperand(1);
7232 if (Loc.getOpcode() == ISD::FrameIndex) {
7233 if (BaseLoc.getOpcode() != ISD::FrameIndex)
7235 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
7236 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
7237 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
7238 int FS = MFI.getObjectSize(FI);
7239 int BFS = MFI.getObjectSize(BFI);
7240 if (FS != BFS || FS != (int)Bytes) return false;
7241 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
7245 if (isBaseWithConstantOffset(Loc)) {
7246 int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
7247 if (Loc.getOperand(0) == BaseLoc) {
7248 // If the base location is a simple address with no offset itself, then
7249 // the second load's first add operand should be the base address.
7250 if (LocOffset == Dist * (int)Bytes)
7252 } else if (isBaseWithConstantOffset(BaseLoc)) {
7253 // The base location itself has an offset, so subtract that value from the
7254 // second load's offset before comparing to distance * size.
7256 cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
7257 if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
7258 if ((LocOffset - BOffset) == Dist * (int)Bytes)
7263 const GlobalValue *GV1 = nullptr;
7264 const GlobalValue *GV2 = nullptr;
7265 int64_t Offset1 = 0;
7266 int64_t Offset2 = 0;
7267 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
7268 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
7269 if (isGA1 && isGA2 && GV1 == GV2)
7270 return Offset1 == (Offset2 + Dist*Bytes);
7275 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
7276 /// it cannot be inferred.
7277 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
7278 // If this is a GlobalAddress + cst, return the alignment.
7279 const GlobalValue *GV;
7280 int64_t GVOffset = 0;
7281 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
7282 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
7283 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
7284 llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
7286 unsigned AlignBits = KnownZero.countTrailingOnes();
7287 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
7289 return MinAlign(Align, GVOffset);
7292 // If this is a direct reference to a stack slot, use information about the
7293 // stack slot's alignment.
7294 int FrameIdx = 1 << 31;
7295 int64_t FrameOffset = 0;
7296 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
7297 FrameIdx = FI->getIndex();
7298 } else if (isBaseWithConstantOffset(Ptr) &&
7299 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7301 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7302 FrameOffset = Ptr.getConstantOperandVal(1);
7305 if (FrameIdx != (1 << 31)) {
7306 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
7307 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
7315 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
7316 /// which is split (or expanded) into two not necessarily identical pieces.
7317 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
7318 // Currently all types are split in half.
7320 if (!VT.isVector()) {
7321 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
7323 unsigned NumElements = VT.getVectorNumElements();
7324 assert(!(NumElements & 1) && "Splitting vector, but not in half!");
7325 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
7328 return std::make_pair(LoVT, HiVT);
7331 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
7333 std::pair<SDValue, SDValue>
7334 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
7336 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
7337 N.getValueType().getVectorNumElements() &&
7338 "More vector elements requested than available!");
7340 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
7341 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
7342 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
7343 getConstant(LoVT.getVectorNumElements(), DL,
7344 TLI->getVectorIdxTy(getDataLayout())));
7345 return std::make_pair(Lo, Hi);
7348 void SelectionDAG::ExtractVectorElements(SDValue Op,
7349 SmallVectorImpl<SDValue> &Args,
7350 unsigned Start, unsigned Count) {
7351 EVT VT = Op.getValueType();
7353 Count = VT.getVectorNumElements();
7355 EVT EltVT = VT.getVectorElementType();
7356 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
7358 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
7359 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7360 Op, getConstant(i, SL, IdxTy)));
7364 // getAddressSpace - Return the address space this GlobalAddress belongs to.
7365 unsigned GlobalAddressSDNode::getAddressSpace() const {
7366 return getGlobal()->getType()->getAddressSpace();
7370 Type *ConstantPoolSDNode::getType() const {
7371 if (isMachineConstantPoolEntry())
7372 return Val.MachineCPVal->getType();
7373 return Val.ConstVal->getType();
7376 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
7378 unsigned &SplatBitSize,
7380 unsigned MinSplatBits,
7381 bool isBigEndian) const {
7382 EVT VT = getValueType(0);
7383 assert(VT.isVector() && "Expected a vector type");
7384 unsigned sz = VT.getSizeInBits();
7385 if (MinSplatBits > sz)
7388 SplatValue = APInt(sz, 0);
7389 SplatUndef = APInt(sz, 0);
7391 // Get the bits. Bits with undefined values (when the corresponding element
7392 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
7393 // in SplatValue. If any of the values are not constant, give up and return
7395 unsigned int nOps = getNumOperands();
7396 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
7397 unsigned EltBitSize = VT.getScalarSizeInBits();
7399 for (unsigned j = 0; j < nOps; ++j) {
7400 unsigned i = isBigEndian ? nOps-1-j : j;
7401 SDValue OpVal = getOperand(i);
7402 unsigned BitPos = j * EltBitSize;
7404 if (OpVal.isUndef())
7405 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
7406 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
7407 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
7408 zextOrTrunc(sz) << BitPos;
7409 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
7410 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
7415 // The build_vector is all constants or undefs. Find the smallest element
7416 // size that splats the vector.
7418 HasAnyUndefs = (SplatUndef != 0);
7421 unsigned HalfSize = sz / 2;
7422 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
7423 APInt LowValue = SplatValue.trunc(HalfSize);
7424 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
7425 APInt LowUndef = SplatUndef.trunc(HalfSize);
7427 // If the two halves do not match (ignoring undef bits), stop here.
7428 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
7429 MinSplatBits > HalfSize)
7432 SplatValue = HighValue | LowValue;
7433 SplatUndef = HighUndef & LowUndef;
7442 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
7443 if (UndefElements) {
7444 UndefElements->clear();
7445 UndefElements->resize(getNumOperands());
7448 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
7449 SDValue Op = getOperand(i);
7452 (*UndefElements)[i] = true;
7453 } else if (!Splatted) {
7455 } else if (Splatted != Op) {
7461 assert(getOperand(0).isUndef() &&
7462 "Can only have a splat without a constant for all undefs.");
7463 return getOperand(0);
7470 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
7471 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
7475 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
7476 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
7480 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
7481 uint32_t BitWidth) const {
7482 if (ConstantFPSDNode *CN =
7483 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
7485 APSInt IntVal(BitWidth);
7486 const APFloat &APF = CN->getValueAPF();
7487 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
7492 return IntVal.exactLogBase2();
7497 bool BuildVectorSDNode::isConstant() const {
7498 for (const SDValue &Op : op_values()) {
7499 unsigned Opc = Op.getOpcode();
7500 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
7506 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
7507 // Find the first non-undef value in the shuffle mask.
7509 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
7512 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
7514 // Make sure all remaining elements are either undef or the same as the first
7516 for (int Idx = Mask[i]; i != e; ++i)
7517 if (Mask[i] >= 0 && Mask[i] != Idx)
7522 // \brief Returns the SDNode if it is a constant integer BuildVector
7523 // or constant integer.
7524 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
7525 if (isa<ConstantSDNode>(N))
7527 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
7529 // Treat a GlobalAddress supporting constant offset folding as a
7530 // constant integer.
7531 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
7532 if (GA->getOpcode() == ISD::GlobalAddress &&
7533 TLI->isOffsetFoldingLegal(GA))
7538 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
7539 if (isa<ConstantFPSDNode>(N))
7542 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
7549 static void checkForCyclesHelper(const SDNode *N,
7550 SmallPtrSetImpl<const SDNode*> &Visited,
7551 SmallPtrSetImpl<const SDNode*> &Checked,
7552 const llvm::SelectionDAG *DAG) {
7553 // If this node has already been checked, don't check it again.
7554 if (Checked.count(N))
7557 // If a node has already been visited on this depth-first walk, reject it as
7559 if (!Visited.insert(N).second) {
7560 errs() << "Detected cycle in SelectionDAG\n";
7561 dbgs() << "Offending node:\n";
7562 N->dumprFull(DAG); dbgs() << "\n";
7566 for (const SDValue &Op : N->op_values())
7567 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
7574 void llvm::checkForCycles(const llvm::SDNode *N,
7575 const llvm::SelectionDAG *DAG,
7579 #ifdef EXPENSIVE_CHECKS
7581 #endif // EXPENSIVE_CHECKS
7583 assert(N && "Checking nonexistent SDNode");
7584 SmallPtrSet<const SDNode*, 32> visited;
7585 SmallPtrSet<const SDNode*, 32> checked;
7586 checkForCyclesHelper(N, visited, checked, DAG);
7591 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
7592 checkForCycles(DAG->getRoot().getNode(), DAG, force);