1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/ManagedStatic.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Mutex.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Target/TargetInstrInfo.h"
44 #include "llvm/Target/TargetIntrinsicInfo.h"
45 #include "llvm/Target/TargetLowering.h"
46 #include "llvm/Target/TargetMachine.h"
47 #include "llvm/Target/TargetOptions.h"
48 #include "llvm/Target/TargetRegisterInfo.h"
49 #include "llvm/Target/TargetSubtargetInfo.h"
56 /// makeVTList - Return an instance of the SDVTList struct initialized with the
57 /// specified members.
58 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
59 SDVTList Res = {VTs, NumVTs};
63 // Default null implementations of the callbacks.
64 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
65 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
67 //===----------------------------------------------------------------------===//
68 // ConstantFPSDNode Class
69 //===----------------------------------------------------------------------===//
71 /// isExactlyValue - We don't rely on operator== working on double values, as
72 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
73 /// As such, this method can be used to do an exact bit-for-bit comparison of
74 /// two floating point values.
75 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
76 return getValueAPF().bitwiseIsEqual(V);
79 bool ConstantFPSDNode::isValueValidForType(EVT VT,
81 assert(VT.isFloatingPoint() && "Can only convert between FP types");
83 // convert modifies in place, so make a copy.
84 APFloat Val2 = APFloat(Val);
86 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
87 APFloat::rmNearestTiesToEven,
92 //===----------------------------------------------------------------------===//
94 //===----------------------------------------------------------------------===//
96 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
97 auto *BV = dyn_cast<BuildVectorSDNode>(N);
102 unsigned SplatBitSize;
104 EVT EltVT = N->getValueType(0).getVectorElementType();
105 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs) &&
106 EltVT.getSizeInBits() >= SplatBitSize;
109 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
110 // specializations of the more general isConstantSplatVector()?
112 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
113 // Look through a bit convert.
114 while (N->getOpcode() == ISD::BITCAST)
115 N = N->getOperand(0).getNode();
117 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
119 unsigned i = 0, e = N->getNumOperands();
121 // Skip over all of the undef values.
122 while (i != e && N->getOperand(i).isUndef())
125 // Do not accept an all-undef vector.
126 if (i == e) return false;
128 // Do not accept build_vectors that aren't all constants or which have non-~0
129 // elements. We have to be a bit careful here, as the type of the constant
130 // may not be the same as the type of the vector elements due to type
131 // legalization (the elements are promoted to a legal type for the target and
132 // a vector of a type may be legal when the base element type is not).
133 // We only want to check enough bits to cover the vector elements, because
134 // we care if the resultant vector is all ones, not whether the individual
136 SDValue NotZero = N->getOperand(i);
137 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
138 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
139 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
141 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
142 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
147 // Okay, we have at least one ~0 value, check to see if the rest match or are
148 // undefs. Even with the above element type twiddling, this should be OK, as
149 // the same type legalization should have applied to all the elements.
150 for (++i; i != e; ++i)
151 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
156 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
157 // Look through a bit convert.
158 while (N->getOpcode() == ISD::BITCAST)
159 N = N->getOperand(0).getNode();
161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
163 bool IsAllUndef = true;
164 for (const SDValue &Op : N->op_values()) {
168 // Do not accept build_vectors that aren't all constants or which have non-0
169 // elements. We have to be a bit careful here, as the type of the constant
170 // may not be the same as the type of the vector elements due to type
171 // legalization (the elements are promoted to a legal type for the target
172 // and a vector of a type may be legal when the base element type is not).
173 // We only want to check enough bits to cover the vector elements, because
174 // we care if the resultant vector is all zeros, not whether the individual
176 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
177 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
178 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
180 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
181 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
187 // Do not accept an all-undef vector.
193 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
194 if (N->getOpcode() != ISD::BUILD_VECTOR)
197 for (const SDValue &Op : N->op_values()) {
200 if (!isa<ConstantSDNode>(Op))
206 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
207 if (N->getOpcode() != ISD::BUILD_VECTOR)
210 for (const SDValue &Op : N->op_values()) {
213 if (!isa<ConstantFPSDNode>(Op))
219 bool ISD::allOperandsUndef(const SDNode *N) {
220 // Return false if the node has no operands.
221 // This is "logically inconsistent" with the definition of "all" but
222 // is probably the desired behavior.
223 if (N->getNumOperands() == 0)
226 for (const SDValue &Op : N->op_values())
233 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
236 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
238 return ISD::SIGN_EXTEND;
240 return ISD::ZERO_EXTEND;
245 llvm_unreachable("Invalid LoadExtType");
248 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
249 // To perform this operation, we just need to swap the L and G bits of the
251 unsigned OldL = (Operation >> 2) & 1;
252 unsigned OldG = (Operation >> 1) & 1;
253 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
254 (OldL << 1) | // New G bit
255 (OldG << 2)); // New L bit.
258 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
259 unsigned Operation = Op;
261 Operation ^= 7; // Flip L, G, E bits, but not U.
263 Operation ^= 15; // Flip all of the condition bits.
265 if (Operation > ISD::SETTRUE2)
266 Operation &= ~8; // Don't let N and U bits get set.
268 return ISD::CondCode(Operation);
272 /// For an integer comparison, return 1 if the comparison is a signed operation
273 /// and 2 if the result is an unsigned comparison. Return zero if the operation
274 /// does not depend on the sign of the input (setne and seteq).
275 static int isSignedOp(ISD::CondCode Opcode) {
277 default: llvm_unreachable("Illegal integer setcc operation!");
279 case ISD::SETNE: return 0;
283 case ISD::SETGE: return 1;
287 case ISD::SETUGE: return 2;
291 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
293 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
294 // Cannot fold a signed integer setcc with an unsigned integer setcc.
295 return ISD::SETCC_INVALID;
297 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
299 // If the N and U bits get set then the resultant comparison DOES suddenly
300 // care about orderedness, and is true when ordered.
301 if (Op > ISD::SETTRUE2)
302 Op &= ~16; // Clear the U bit if the N bit is set.
304 // Canonicalize illegal integer setcc's.
305 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
308 return ISD::CondCode(Op);
311 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
313 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
314 // Cannot fold a signed setcc with an unsigned setcc.
315 return ISD::SETCC_INVALID;
317 // Combine all of the condition bits.
318 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
320 // Canonicalize illegal integer setcc's.
324 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
325 case ISD::SETOEQ: // SETEQ & SETU[LG]E
326 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
327 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
328 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
335 //===----------------------------------------------------------------------===//
336 // SDNode Profile Support
337 //===----------------------------------------------------------------------===//
339 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
341 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
345 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
346 /// solely with their pointer.
347 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
348 ID.AddPointer(VTList.VTs);
351 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
353 static void AddNodeIDOperands(FoldingSetNodeID &ID,
354 ArrayRef<SDValue> Ops) {
355 for (auto& Op : Ops) {
356 ID.AddPointer(Op.getNode());
357 ID.AddInteger(Op.getResNo());
361 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
363 static void AddNodeIDOperands(FoldingSetNodeID &ID,
364 ArrayRef<SDUse> Ops) {
365 for (auto& Op : Ops) {
366 ID.AddPointer(Op.getNode());
367 ID.AddInteger(Op.getResNo());
371 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
372 SDVTList VTList, ArrayRef<SDValue> OpList) {
373 AddNodeIDOpcode(ID, OpC);
374 AddNodeIDValueTypes(ID, VTList);
375 AddNodeIDOperands(ID, OpList);
378 /// If this is an SDNode with special info, add this info to the NodeID data.
379 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
380 switch (N->getOpcode()) {
381 case ISD::TargetExternalSymbol:
382 case ISD::ExternalSymbol:
384 llvm_unreachable("Should only be used on nodes with operands");
385 default: break; // Normal nodes don't need extra info.
386 case ISD::TargetConstant:
387 case ISD::Constant: {
388 const ConstantSDNode *C = cast<ConstantSDNode>(N);
389 ID.AddPointer(C->getConstantIntValue());
390 ID.AddBoolean(C->isOpaque());
393 case ISD::TargetConstantFP:
394 case ISD::ConstantFP: {
395 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
398 case ISD::TargetGlobalAddress:
399 case ISD::GlobalAddress:
400 case ISD::TargetGlobalTLSAddress:
401 case ISD::GlobalTLSAddress: {
402 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
403 ID.AddPointer(GA->getGlobal());
404 ID.AddInteger(GA->getOffset());
405 ID.AddInteger(GA->getTargetFlags());
408 case ISD::BasicBlock:
409 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
412 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
414 case ISD::RegisterMask:
415 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
418 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
420 case ISD::FrameIndex:
421 case ISD::TargetFrameIndex:
422 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
425 case ISD::TargetJumpTable:
426 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
427 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
429 case ISD::ConstantPool:
430 case ISD::TargetConstantPool: {
431 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
432 ID.AddInteger(CP->getAlignment());
433 ID.AddInteger(CP->getOffset());
434 if (CP->isMachineConstantPoolEntry())
435 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
437 ID.AddPointer(CP->getConstVal());
438 ID.AddInteger(CP->getTargetFlags());
441 case ISD::TargetIndex: {
442 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
443 ID.AddInteger(TI->getIndex());
444 ID.AddInteger(TI->getOffset());
445 ID.AddInteger(TI->getTargetFlags());
449 const LoadSDNode *LD = cast<LoadSDNode>(N);
450 ID.AddInteger(LD->getMemoryVT().getRawBits());
451 ID.AddInteger(LD->getRawSubclassData());
452 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
456 const StoreSDNode *ST = cast<StoreSDNode>(N);
457 ID.AddInteger(ST->getMemoryVT().getRawBits());
458 ID.AddInteger(ST->getRawSubclassData());
459 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
462 case ISD::ATOMIC_CMP_SWAP:
463 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
464 case ISD::ATOMIC_SWAP:
465 case ISD::ATOMIC_LOAD_ADD:
466 case ISD::ATOMIC_LOAD_SUB:
467 case ISD::ATOMIC_LOAD_AND:
468 case ISD::ATOMIC_LOAD_OR:
469 case ISD::ATOMIC_LOAD_XOR:
470 case ISD::ATOMIC_LOAD_NAND:
471 case ISD::ATOMIC_LOAD_MIN:
472 case ISD::ATOMIC_LOAD_MAX:
473 case ISD::ATOMIC_LOAD_UMIN:
474 case ISD::ATOMIC_LOAD_UMAX:
475 case ISD::ATOMIC_LOAD:
476 case ISD::ATOMIC_STORE: {
477 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
478 ID.AddInteger(AT->getMemoryVT().getRawBits());
479 ID.AddInteger(AT->getRawSubclassData());
480 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
483 case ISD::PREFETCH: {
484 const MemSDNode *PF = cast<MemSDNode>(N);
485 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
488 case ISD::VECTOR_SHUFFLE: {
489 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
490 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
492 ID.AddInteger(SVN->getMaskElt(i));
495 case ISD::TargetBlockAddress:
496 case ISD::BlockAddress: {
497 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
498 ID.AddPointer(BA->getBlockAddress());
499 ID.AddInteger(BA->getOffset());
500 ID.AddInteger(BA->getTargetFlags());
503 } // end switch (N->getOpcode())
505 // Target specific memory nodes could also have address spaces to check.
506 if (N->isTargetMemoryOpcode())
507 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
510 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
512 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
513 AddNodeIDOpcode(ID, N->getOpcode());
514 // Add the return value info.
515 AddNodeIDValueTypes(ID, N->getVTList());
516 // Add the operand info.
517 AddNodeIDOperands(ID, N->ops());
519 // Handle SDNode leafs with special info.
520 AddNodeIDCustom(ID, N);
523 //===----------------------------------------------------------------------===//
524 // SelectionDAG Class
525 //===----------------------------------------------------------------------===//
527 /// doNotCSE - Return true if CSE should not be performed for this node.
528 static bool doNotCSE(SDNode *N) {
529 if (N->getValueType(0) == MVT::Glue)
530 return true; // Never CSE anything that produces a flag.
532 switch (N->getOpcode()) {
534 case ISD::HANDLENODE:
536 return true; // Never CSE these nodes.
539 // Check that remaining values produced are not flags.
540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
541 if (N->getValueType(i) == MVT::Glue)
542 return true; // Never CSE anything that produces a flag.
547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
549 void SelectionDAG::RemoveDeadNodes() {
550 // Create a dummy node (which is not added to allnodes), that adds a reference
551 // to the root node, preventing it from being deleted.
552 HandleSDNode Dummy(getRoot());
554 SmallVector<SDNode*, 128> DeadNodes;
556 // Add all obviously-dead nodes to the DeadNodes worklist.
557 for (SDNode &Node : allnodes())
558 if (Node.use_empty())
559 DeadNodes.push_back(&Node);
561 RemoveDeadNodes(DeadNodes);
563 // If the root changed (e.g. it was a dead load, update the root).
564 setRoot(Dummy.getValue());
567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
568 /// given list, and any nodes that become unreachable as a result.
569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
571 // Process the worklist, deleting the nodes and adding their uses to the
573 while (!DeadNodes.empty()) {
574 SDNode *N = DeadNodes.pop_back_val();
576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
577 DUL->NodeDeleted(N, nullptr);
579 // Take the node out of the appropriate CSE map.
580 RemoveNodeFromCSEMaps(N);
582 // Next, brutally remove the operand list. This is safe to do, as there are
583 // no cycles in the graph.
584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
586 SDNode *Operand = Use.getNode();
589 // Now that we removed this operand, see if there are no uses of it left.
590 if (Operand->use_empty())
591 DeadNodes.push_back(Operand);
598 void SelectionDAG::RemoveDeadNode(SDNode *N){
599 SmallVector<SDNode*, 16> DeadNodes(1, N);
601 // Create a dummy node that adds a reference to the root node, preventing
602 // it from being deleted. (This matters if the root is an operand of the
604 HandleSDNode Dummy(getRoot());
606 RemoveDeadNodes(DeadNodes);
609 void SelectionDAG::DeleteNode(SDNode *N) {
610 // First take this out of the appropriate CSE map.
611 RemoveNodeFromCSEMaps(N);
613 // Finally, remove uses due to operands of this node, remove from the
614 // AllNodes list, and delete the node.
615 DeleteNodeNotInCSEMaps(N);
618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
619 assert(N->getIterator() != AllNodes.begin() &&
620 "Cannot delete the entry node!");
621 assert(N->use_empty() && "Cannot delete a node that is not dead!");
623 // Drop all of the operands and decrement used node's use counts.
629 void SDDbgInfo::erase(const SDNode *Node) {
630 DbgValMapType::iterator I = DbgValMap.find(Node);
631 if (I == DbgValMap.end())
633 for (auto &Val: I->second)
634 Val->setIsInvalidated();
638 void SelectionDAG::DeallocateNode(SDNode *N) {
639 // If we have operands, deallocate them.
642 // Set the opcode to DELETED_NODE to help catch bugs when node
643 // memory is reallocated.
644 N->NodeType = ISD::DELETED_NODE;
646 NodeAllocator.Deallocate(AllNodes.remove(N));
648 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
649 // them and forget about that node.
654 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
655 static void VerifySDNode(SDNode *N) {
656 switch (N->getOpcode()) {
659 case ISD::BUILD_PAIR: {
660 EVT VT = N->getValueType(0);
661 assert(N->getNumValues() == 1 && "Too many results!");
662 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
663 "Wrong return type!");
664 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
665 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
666 "Mismatched operand types!");
667 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
668 "Wrong operand type!");
669 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
670 "Wrong return type size");
673 case ISD::BUILD_VECTOR: {
674 assert(N->getNumValues() == 1 && "Too many results!");
675 assert(N->getValueType(0).isVector() && "Wrong return type!");
676 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
677 "Wrong number of operands!");
678 EVT EltVT = N->getValueType(0).getVectorElementType();
679 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
680 assert((I->getValueType() == EltVT ||
681 (EltVT.isInteger() && I->getValueType().isInteger() &&
682 EltVT.bitsLE(I->getValueType()))) &&
683 "Wrong operand type!");
684 assert(I->getValueType() == N->getOperand(0).getValueType() &&
685 "Operands must all have the same type");
693 /// \brief Insert a newly allocated node into the DAG.
695 /// Handles insertion into the all nodes list and CSE map, as well as
696 /// verification and other common operations when a new node is allocated.
697 void SelectionDAG::InsertNode(SDNode *N) {
698 AllNodes.push_back(N);
700 N->PersistentId = NextPersistentId++;
705 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
706 /// correspond to it. This is useful when we're about to delete or repurpose
707 /// the node. We don't want future request for structurally identical nodes
708 /// to return N anymore.
709 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
711 switch (N->getOpcode()) {
712 case ISD::HANDLENODE: return false; // noop.
714 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
715 "Cond code doesn't exist!");
716 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
717 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
719 case ISD::ExternalSymbol:
720 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
722 case ISD::TargetExternalSymbol: {
723 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
724 Erased = TargetExternalSymbols.erase(
725 std::pair<std::string,unsigned char>(ESN->getSymbol(),
726 ESN->getTargetFlags()));
729 case ISD::MCSymbol: {
730 auto *MCSN = cast<MCSymbolSDNode>(N);
731 Erased = MCSymbols.erase(MCSN->getMCSymbol());
734 case ISD::VALUETYPE: {
735 EVT VT = cast<VTSDNode>(N)->getVT();
736 if (VT.isExtended()) {
737 Erased = ExtendedValueTypeNodes.erase(VT);
739 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
740 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
745 // Remove it from the CSE Map.
746 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
747 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
748 Erased = CSEMap.RemoveNode(N);
752 // Verify that the node was actually in one of the CSE maps, unless it has a
753 // flag result (which cannot be CSE'd) or is one of the special cases that are
754 // not subject to CSE.
755 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
756 !N->isMachineOpcode() && !doNotCSE(N)) {
759 llvm_unreachable("Node is not in map!");
765 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
766 /// maps and modified in place. Add it back to the CSE maps, unless an identical
767 /// node already exists, in which case transfer all its users to the existing
768 /// node. This transfer can potentially trigger recursive merging.
771 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
772 // For node types that aren't CSE'd, just act as if no identical node
775 SDNode *Existing = CSEMap.GetOrInsertNode(N);
777 // If there was already an existing matching node, use ReplaceAllUsesWith
778 // to replace the dead one with the existing one. This can cause
779 // recursive merging of other unrelated nodes down the line.
780 ReplaceAllUsesWith(N, Existing);
782 // N is now dead. Inform the listeners and delete it.
783 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
784 DUL->NodeDeleted(N, Existing);
785 DeleteNodeNotInCSEMaps(N);
790 // If the node doesn't already exist, we updated it. Inform listeners.
791 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
795 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
796 /// were replaced with those specified. If this node is never memoized,
797 /// return null, otherwise return a pointer to the slot it would take. If a
798 /// node already exists with these operands, the slot will be non-null.
799 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
804 SDValue Ops[] = { Op };
806 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
807 AddNodeIDCustom(ID, N);
808 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
810 if (const SDNodeFlags *Flags = N->getFlags())
811 Node->intersectFlagsWith(Flags);
815 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
816 /// were replaced with those specified. If this node is never memoized,
817 /// return null, otherwise return a pointer to the slot it would take. If a
818 /// node already exists with these operands, the slot will be non-null.
819 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
820 SDValue Op1, SDValue Op2,
825 SDValue Ops[] = { Op1, Op2 };
827 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
828 AddNodeIDCustom(ID, N);
829 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
831 if (const SDNodeFlags *Flags = N->getFlags())
832 Node->intersectFlagsWith(Flags);
837 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
838 /// were replaced with those specified. If this node is never memoized,
839 /// return null, otherwise return a pointer to the slot it would take. If a
840 /// node already exists with these operands, the slot will be non-null.
841 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
847 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
848 AddNodeIDCustom(ID, N);
849 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
851 if (const SDNodeFlags *Flags = N->getFlags())
852 Node->intersectFlagsWith(Flags);
856 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
857 Type *Ty = VT == MVT::iPTR ?
858 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
859 VT.getTypeForEVT(*getContext());
861 return getDataLayout().getABITypeAlignment(Ty);
864 // EntryNode could meaningfully have debug info if we can find it...
865 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
866 : TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
867 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
868 Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
869 UpdateListeners(nullptr) {
870 InsertNode(&EntryNode);
871 DbgInfo = new SDDbgInfo();
874 void SelectionDAG::init(MachineFunction &mf) {
876 TLI = getSubtarget().getTargetLowering();
877 TSI = getSubtarget().getSelectionDAGInfo();
878 Context = &mf.getFunction()->getContext();
881 SelectionDAG::~SelectionDAG() {
882 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
884 OperandRecycler.clear(OperandAllocator);
888 void SelectionDAG::allnodes_clear() {
889 assert(&*AllNodes.begin() == &EntryNode);
890 AllNodes.remove(AllNodes.begin());
891 while (!AllNodes.empty())
892 DeallocateNode(&AllNodes.front());
894 NextPersistentId = 0;
898 SDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, const SDLoc &DL,
899 SDVTList VTs, SDValue N1, SDValue N2,
900 const SDNodeFlags *Flags) {
901 SDValue Ops[] = {N1, N2};
903 if (isBinOpWithFlags(Opcode)) {
904 // If no flags were passed in, use a default flags object.
906 if (Flags == nullptr)
909 auto *FN = newSDNode<BinaryWithFlagsSDNode>(Opcode, DL.getIROrder(),
910 DL.getDebugLoc(), VTs, *Flags);
911 createOperands(FN, Ops);
916 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
917 createOperands(N, Ops);
921 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
923 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
925 switch (N->getOpcode()) {
928 case ISD::ConstantFP:
929 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
930 "debug location. Use another overload.");
936 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
937 const SDLoc &DL, void *&InsertPos) {
938 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
940 switch (N->getOpcode()) {
942 case ISD::ConstantFP:
943 // Erase debug location from the node if the node is used at several
944 // different places. Do not propagate one location to all uses as it
945 // will cause a worse single stepping debugging experience.
946 if (N->getDebugLoc() != DL.getDebugLoc())
947 N->setDebugLoc(DebugLoc());
950 // When the node's point of use is located earlier in the instruction
951 // sequence than its prior point of use, update its debug info to the
953 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
954 N->setDebugLoc(DL.getDebugLoc());
961 void SelectionDAG::clear() {
963 OperandRecycler.clear(OperandAllocator);
964 OperandAllocator.Reset();
967 ExtendedValueTypeNodes.clear();
968 ExternalSymbols.clear();
969 TargetExternalSymbols.clear();
971 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
972 static_cast<CondCodeSDNode*>(nullptr));
973 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
974 static_cast<SDNode*>(nullptr));
976 EntryNode.UseList = nullptr;
977 InsertNode(&EntryNode);
978 Root = getEntryNode();
982 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
983 return VT.bitsGT(Op.getValueType()) ?
984 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
985 getNode(ISD::TRUNCATE, DL, VT, Op);
988 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
989 return VT.bitsGT(Op.getValueType()) ?
990 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
991 getNode(ISD::TRUNCATE, DL, VT, Op);
994 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
995 return VT.bitsGT(Op.getValueType()) ?
996 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
997 getNode(ISD::TRUNCATE, DL, VT, Op);
1000 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1002 if (VT.bitsLE(Op.getValueType()))
1003 return getNode(ISD::TRUNCATE, SL, VT, Op);
1005 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1006 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1009 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1010 assert(!VT.isVector() &&
1011 "getZeroExtendInReg should use the vector element type instead of "
1012 "the vector type!");
1013 if (Op.getValueType() == VT) return Op;
1014 unsigned BitWidth = Op.getScalarValueSizeInBits();
1015 APInt Imm = APInt::getLowBitsSet(BitWidth,
1016 VT.getSizeInBits());
1017 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1018 getConstant(Imm, DL, Op.getValueType()));
1021 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
1023 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1024 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1025 "The sizes of the input and result must match in order to perform the "
1026 "extend in-register.");
1027 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1028 "The destination vector type must have fewer lanes than the input.");
1029 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1032 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
1034 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1035 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1036 "The sizes of the input and result must match in order to perform the "
1037 "extend in-register.");
1038 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1039 "The destination vector type must have fewer lanes than the input.");
1040 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1043 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
1045 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1046 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1047 "The sizes of the input and result must match in order to perform the "
1048 "extend in-register.");
1049 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1050 "The destination vector type must have fewer lanes than the input.");
1051 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1054 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1056 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1057 EVT EltVT = VT.getScalarType();
1059 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1060 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1063 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1064 EVT EltVT = VT.getScalarType();
1066 switch (TLI->getBooleanContents(VT)) {
1067 case TargetLowering::ZeroOrOneBooleanContent:
1068 case TargetLowering::UndefinedBooleanContent:
1069 TrueValue = getConstant(1, DL, VT);
1071 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1072 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
1076 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1079 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1080 bool isT, bool isO) {
1081 EVT EltVT = VT.getScalarType();
1082 assert((EltVT.getSizeInBits() >= 64 ||
1083 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1084 "getConstant with a uint64_t value that doesn't fit in the type!");
1085 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1088 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1089 bool isT, bool isO) {
1090 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1093 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1094 EVT VT, bool isT, bool isO) {
1095 assert(VT.isInteger() && "Cannot create FP integer constant!");
1097 EVT EltVT = VT.getScalarType();
1098 const ConstantInt *Elt = &Val;
1100 // In some cases the vector type is legal but the element type is illegal and
1101 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1102 // inserted value (the type does not need to match the vector element type).
1103 // Any extra bits introduced will be truncated away.
1104 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1105 TargetLowering::TypePromoteInteger) {
1106 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1107 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1108 Elt = ConstantInt::get(*getContext(), NewVal);
1110 // In other cases the element type is illegal and needs to be expanded, for
1111 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1112 // the value into n parts and use a vector type with n-times the elements.
1113 // Then bitcast to the type requested.
1114 // Legalizing constants too early makes the DAGCombiner's job harder so we
1115 // only legalize if the DAG tells us we must produce legal types.
1116 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1117 TLI->getTypeAction(*getContext(), EltVT) ==
1118 TargetLowering::TypeExpandInteger) {
1119 const APInt &NewVal = Elt->getValue();
1120 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1121 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1122 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1123 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1125 // Check the temporary vector is the correct size. If this fails then
1126 // getTypeToTransformTo() probably returned a type whose size (in bits)
1127 // isn't a power-of-2 factor of the requested type size.
1128 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1130 SmallVector<SDValue, 2> EltParts;
1131 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1132 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1133 .zextOrTrunc(ViaEltSizeInBits), DL,
1134 ViaEltVT, isT, isO));
1137 // EltParts is currently in little endian order. If we actually want
1138 // big-endian order then reverse it now.
1139 if (getDataLayout().isBigEndian())
1140 std::reverse(EltParts.begin(), EltParts.end());
1142 // The elements must be reversed when the element order is different
1143 // to the endianness of the elements (because the BITCAST is itself a
1144 // vector shuffle in this situation). However, we do not need any code to
1145 // perform this reversal because getConstant() is producing a vector
1147 // This situation occurs in MIPS MSA.
1149 SmallVector<SDValue, 8> Ops;
1150 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1151 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1152 return getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1155 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1156 "APInt size does not match type size!");
1157 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1158 FoldingSetNodeID ID;
1159 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1163 SDNode *N = nullptr;
1164 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1166 return SDValue(N, 0);
1169 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT);
1170 CSEMap.InsertNode(N, IP);
1174 SDValue Result(N, 0);
1176 Result = getSplatBuildVector(VT, DL, Result);
1180 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1182 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1185 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1187 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1190 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1191 EVT VT, bool isTarget) {
1192 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1194 EVT EltVT = VT.getScalarType();
1196 // Do the map lookup using the actual bit pattern for the floating point
1197 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1198 // we don't have issues with SNANs.
1199 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1200 FoldingSetNodeID ID;
1201 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1204 SDNode *N = nullptr;
1205 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1207 return SDValue(N, 0);
1210 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT);
1211 CSEMap.InsertNode(N, IP);
1215 SDValue Result(N, 0);
1217 Result = getSplatBuildVector(VT, DL, Result);
1221 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1223 EVT EltVT = VT.getScalarType();
1224 if (EltVT == MVT::f32)
1225 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1226 else if (EltVT == MVT::f64)
1227 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1228 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1229 EltVT == MVT::f16) {
1231 APFloat APF = APFloat(Val);
1232 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1234 return getConstantFP(APF, DL, VT, isTarget);
1236 llvm_unreachable("Unsupported type in getConstantFP");
1239 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1240 EVT VT, int64_t Offset, bool isTargetGA,
1241 unsigned char TargetFlags) {
1242 assert((TargetFlags == 0 || isTargetGA) &&
1243 "Cannot set target flags on target-independent globals");
1245 // Truncate (with sign-extension) the offset value to the pointer size.
1246 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1248 Offset = SignExtend64(Offset, BitWidth);
1251 if (GV->isThreadLocal())
1252 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1254 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1256 FoldingSetNodeID ID;
1257 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1259 ID.AddInteger(Offset);
1260 ID.AddInteger(TargetFlags);
1262 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1263 return SDValue(E, 0);
1265 auto *N = newSDNode<GlobalAddressSDNode>(
1266 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1267 CSEMap.InsertNode(N, IP);
1269 return SDValue(N, 0);
1272 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1273 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1274 FoldingSetNodeID ID;
1275 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1278 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1279 return SDValue(E, 0);
1281 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1282 CSEMap.InsertNode(N, IP);
1284 return SDValue(N, 0);
1287 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1288 unsigned char TargetFlags) {
1289 assert((TargetFlags == 0 || isTarget) &&
1290 "Cannot set target flags on target-independent jump tables");
1291 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1292 FoldingSetNodeID ID;
1293 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1295 ID.AddInteger(TargetFlags);
1297 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1298 return SDValue(E, 0);
1300 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1301 CSEMap.InsertNode(N, IP);
1303 return SDValue(N, 0);
1306 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1307 unsigned Alignment, int Offset,
1309 unsigned char TargetFlags) {
1310 assert((TargetFlags == 0 || isTarget) &&
1311 "Cannot set target flags on target-independent globals");
1313 Alignment = MF->getFunction()->optForSize()
1314 ? getDataLayout().getABITypeAlignment(C->getType())
1315 : getDataLayout().getPrefTypeAlignment(C->getType());
1316 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1317 FoldingSetNodeID ID;
1318 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1319 ID.AddInteger(Alignment);
1320 ID.AddInteger(Offset);
1322 ID.AddInteger(TargetFlags);
1324 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1325 return SDValue(E, 0);
1327 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1329 CSEMap.InsertNode(N, IP);
1331 return SDValue(N, 0);
1335 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1336 unsigned Alignment, int Offset,
1338 unsigned char TargetFlags) {
1339 assert((TargetFlags == 0 || isTarget) &&
1340 "Cannot set target flags on target-independent globals");
1342 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1343 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1344 FoldingSetNodeID ID;
1345 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1346 ID.AddInteger(Alignment);
1347 ID.AddInteger(Offset);
1348 C->addSelectionDAGCSEId(ID);
1349 ID.AddInteger(TargetFlags);
1351 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1352 return SDValue(E, 0);
1354 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1356 CSEMap.InsertNode(N, IP);
1358 return SDValue(N, 0);
1361 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1362 unsigned char TargetFlags) {
1363 FoldingSetNodeID ID;
1364 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1365 ID.AddInteger(Index);
1366 ID.AddInteger(Offset);
1367 ID.AddInteger(TargetFlags);
1369 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1370 return SDValue(E, 0);
1372 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1373 CSEMap.InsertNode(N, IP);
1375 return SDValue(N, 0);
1378 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1379 FoldingSetNodeID ID;
1380 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1383 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1384 return SDValue(E, 0);
1386 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1387 CSEMap.InsertNode(N, IP);
1389 return SDValue(N, 0);
1392 SDValue SelectionDAG::getValueType(EVT VT) {
1393 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1394 ValueTypeNodes.size())
1395 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1397 SDNode *&N = VT.isExtended() ?
1398 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1400 if (N) return SDValue(N, 0);
1401 N = newSDNode<VTSDNode>(VT);
1403 return SDValue(N, 0);
1406 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1407 SDNode *&N = ExternalSymbols[Sym];
1408 if (N) return SDValue(N, 0);
1409 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1411 return SDValue(N, 0);
1414 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1415 SDNode *&N = MCSymbols[Sym];
1417 return SDValue(N, 0);
1418 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1420 return SDValue(N, 0);
1423 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1424 unsigned char TargetFlags) {
1426 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1428 if (N) return SDValue(N, 0);
1429 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1431 return SDValue(N, 0);
1434 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1435 if ((unsigned)Cond >= CondCodeNodes.size())
1436 CondCodeNodes.resize(Cond+1);
1438 if (!CondCodeNodes[Cond]) {
1439 auto *N = newSDNode<CondCodeSDNode>(Cond);
1440 CondCodeNodes[Cond] = N;
1444 return SDValue(CondCodeNodes[Cond], 0);
1447 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1448 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1449 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1451 ShuffleVectorSDNode::commuteMask(M);
1454 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1455 SDValue N2, ArrayRef<int> Mask) {
1456 assert(VT.getVectorNumElements() == Mask.size() &&
1457 "Must have the same number of vector elements as mask elements!");
1458 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1459 "Invalid VECTOR_SHUFFLE");
1461 // Canonicalize shuffle undef, undef -> undef
1462 if (N1.isUndef() && N2.isUndef())
1463 return getUNDEF(VT);
1465 // Validate that all indices in Mask are within the range of the elements
1466 // input to the shuffle.
1467 int NElts = Mask.size();
1468 assert(all_of(Mask, [&](int M) { return M < (NElts * 2); }) &&
1469 "Index out of range");
1471 // Copy the mask so we can do any needed cleanup.
1472 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1474 // Canonicalize shuffle v, v -> v, undef
1477 for (int i = 0; i != NElts; ++i)
1478 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1481 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1483 commuteShuffle(N1, N2, MaskVec);
1485 // If shuffling a splat, try to blend the splat instead. We do this here so
1486 // that even when this arises during lowering we don't have to re-handle it.
1487 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1488 BitVector UndefElements;
1489 SDValue Splat = BV->getSplatValue(&UndefElements);
1493 for (int i = 0; i < NElts; ++i) {
1494 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1497 // If this input comes from undef, mark it as such.
1498 if (UndefElements[MaskVec[i] - Offset]) {
1503 // If we can blend a non-undef lane, use that instead.
1504 if (!UndefElements[i])
1505 MaskVec[i] = i + Offset;
1508 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1509 BlendSplat(N1BV, 0);
1510 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1511 BlendSplat(N2BV, NElts);
1513 // Canonicalize all index into lhs, -> shuffle lhs, undef
1514 // Canonicalize all index into rhs, -> shuffle rhs, undef
1515 bool AllLHS = true, AllRHS = true;
1516 bool N2Undef = N2.isUndef();
1517 for (int i = 0; i != NElts; ++i) {
1518 if (MaskVec[i] >= NElts) {
1523 } else if (MaskVec[i] >= 0) {
1527 if (AllLHS && AllRHS)
1528 return getUNDEF(VT);
1529 if (AllLHS && !N2Undef)
1533 commuteShuffle(N1, N2, MaskVec);
1535 // Reset our undef status after accounting for the mask.
1536 N2Undef = N2.isUndef();
1537 // Re-check whether both sides ended up undef.
1538 if (N1.isUndef() && N2Undef)
1539 return getUNDEF(VT);
1541 // If Identity shuffle return that node.
1542 bool Identity = true, AllSame = true;
1543 for (int i = 0; i != NElts; ++i) {
1544 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1545 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1547 if (Identity && NElts)
1550 // Shuffling a constant splat doesn't change the result.
1554 // Look through any bitcasts. We check that these don't change the number
1555 // (and size) of elements and just changes their types.
1556 while (V.getOpcode() == ISD::BITCAST)
1557 V = V->getOperand(0);
1559 // A splat should always show up as a build vector node.
1560 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1561 BitVector UndefElements;
1562 SDValue Splat = BV->getSplatValue(&UndefElements);
1563 // If this is a splat of an undef, shuffling it is also undef.
1564 if (Splat && Splat.isUndef())
1565 return getUNDEF(VT);
1568 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1570 // We only have a splat which can skip shuffles if there is a splatted
1571 // value and no undef lanes rearranged by the shuffle.
1572 if (Splat && UndefElements.none()) {
1573 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1574 // number of elements match or the value splatted is a zero constant.
1577 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1578 if (C->isNullValue())
1582 // If the shuffle itself creates a splat, build the vector directly.
1583 if (AllSame && SameNumElts) {
1584 EVT BuildVT = BV->getValueType(0);
1585 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1586 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1588 // We may have jumped through bitcasts, so the type of the
1589 // BUILD_VECTOR may not match the type of the shuffle.
1591 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1597 FoldingSetNodeID ID;
1598 SDValue Ops[2] = { N1, N2 };
1599 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1600 for (int i = 0; i != NElts; ++i)
1601 ID.AddInteger(MaskVec[i]);
1604 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1605 return SDValue(E, 0);
1607 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1608 // SDNode doesn't have access to it. This memory will be "leaked" when
1609 // the node is deallocated, but recovered when the NodeAllocator is released.
1610 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1611 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc);
1613 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1614 dl.getDebugLoc(), MaskAlloc);
1615 createOperands(N, Ops);
1617 CSEMap.InsertNode(N, IP);
1619 return SDValue(N, 0);
1622 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1623 MVT VT = SV.getSimpleValueType(0);
1624 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1625 ShuffleVectorSDNode::commuteMask(MaskVec);
1627 SDValue Op0 = SV.getOperand(0);
1628 SDValue Op1 = SV.getOperand(1);
1629 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1632 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1633 FoldingSetNodeID ID;
1634 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1635 ID.AddInteger(RegNo);
1637 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1638 return SDValue(E, 0);
1640 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1641 CSEMap.InsertNode(N, IP);
1643 return SDValue(N, 0);
1646 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1647 FoldingSetNodeID ID;
1648 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1649 ID.AddPointer(RegMask);
1651 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1652 return SDValue(E, 0);
1654 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1655 CSEMap.InsertNode(N, IP);
1657 return SDValue(N, 0);
1660 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1662 FoldingSetNodeID ID;
1663 SDValue Ops[] = { Root };
1664 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), Ops);
1665 ID.AddPointer(Label);
1667 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1668 return SDValue(E, 0);
1670 auto *N = newSDNode<EHLabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label);
1671 createOperands(N, Ops);
1673 CSEMap.InsertNode(N, IP);
1675 return SDValue(N, 0);
1678 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1681 unsigned char TargetFlags) {
1682 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1684 FoldingSetNodeID ID;
1685 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1687 ID.AddInteger(Offset);
1688 ID.AddInteger(TargetFlags);
1690 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1691 return SDValue(E, 0);
1693 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1694 CSEMap.InsertNode(N, IP);
1696 return SDValue(N, 0);
1699 SDValue SelectionDAG::getSrcValue(const Value *V) {
1700 assert((!V || V->getType()->isPointerTy()) &&
1701 "SrcValue is not a pointer?");
1703 FoldingSetNodeID ID;
1704 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1708 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1709 return SDValue(E, 0);
1711 auto *N = newSDNode<SrcValueSDNode>(V);
1712 CSEMap.InsertNode(N, IP);
1714 return SDValue(N, 0);
1717 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1718 FoldingSetNodeID ID;
1719 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1723 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1724 return SDValue(E, 0);
1726 auto *N = newSDNode<MDNodeSDNode>(MD);
1727 CSEMap.InsertNode(N, IP);
1729 return SDValue(N, 0);
1732 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1733 if (VT == V.getValueType())
1736 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1739 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1740 unsigned SrcAS, unsigned DestAS) {
1741 SDValue Ops[] = {Ptr};
1742 FoldingSetNodeID ID;
1743 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1744 ID.AddInteger(SrcAS);
1745 ID.AddInteger(DestAS);
1748 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1749 return SDValue(E, 0);
1751 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1753 createOperands(N, Ops);
1755 CSEMap.InsertNode(N, IP);
1757 return SDValue(N, 0);
1760 /// getShiftAmountOperand - Return the specified value casted to
1761 /// the target's desired shift amount type.
1762 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1763 EVT OpTy = Op.getValueType();
1764 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1765 if (OpTy == ShTy || OpTy.isVector()) return Op;
1767 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1770 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1772 const TargetLowering &TLI = getTargetLoweringInfo();
1773 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1774 EVT VT = Node->getValueType(0);
1775 SDValue Tmp1 = Node->getOperand(0);
1776 SDValue Tmp2 = Node->getOperand(1);
1777 unsigned Align = Node->getConstantOperandVal(3);
1779 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1780 Tmp2, MachinePointerInfo(V));
1781 SDValue VAList = VAListLoad;
1783 if (Align > TLI.getMinStackArgumentAlignment()) {
1784 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1786 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1787 getConstant(Align - 1, dl, VAList.getValueType()));
1789 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1790 getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1793 // Increment the pointer, VAList, to the next vaarg
1794 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1795 getConstant(getDataLayout().getTypeAllocSize(
1796 VT.getTypeForEVT(*getContext())),
1797 dl, VAList.getValueType()));
1798 // Store the incremented VAList to the legalized pointer
1800 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1801 // Load the actual argument out of the pointer VAList
1802 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1805 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1807 const TargetLowering &TLI = getTargetLoweringInfo();
1808 // This defaults to loading a pointer from the input and storing it to the
1809 // output, returning the chain.
1810 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1811 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1813 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1814 Node->getOperand(2), MachinePointerInfo(VS));
1815 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1816 MachinePointerInfo(VD));
1819 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1820 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1821 unsigned ByteSize = VT.getStoreSize();
1822 Type *Ty = VT.getTypeForEVT(*getContext());
1823 unsigned StackAlign =
1824 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1826 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1827 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1830 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1831 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1832 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1833 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1834 const DataLayout &DL = getDataLayout();
1836 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1838 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1839 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1840 return getFrameIndex(FrameIdx, TLI->getPointerTy(getDataLayout()));
1843 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
1844 ISD::CondCode Cond, const SDLoc &dl) {
1845 // These setcc operations always fold.
1849 case ISD::SETFALSE2: return getConstant(0, dl, VT);
1851 case ISD::SETTRUE2: {
1852 TargetLowering::BooleanContent Cnt =
1853 TLI->getBooleanContents(N1->getValueType(0));
1855 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1869 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1873 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1874 const APInt &C2 = N2C->getAPIntValue();
1875 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1876 const APInt &C1 = N1C->getAPIntValue();
1879 default: llvm_unreachable("Unknown integer setcc!");
1880 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT);
1881 case ISD::SETNE: return getConstant(C1 != C2, dl, VT);
1882 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
1883 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
1884 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
1885 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
1886 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT);
1887 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT);
1888 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT);
1889 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT);
1893 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
1894 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
1895 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1898 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1899 return getUNDEF(VT);
1901 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
1902 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1903 return getUNDEF(VT);
1905 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1906 R==APFloat::cmpLessThan, dl, VT);
1907 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1908 return getUNDEF(VT);
1910 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
1911 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1912 return getUNDEF(VT);
1914 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
1915 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1916 return getUNDEF(VT);
1918 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1919 R==APFloat::cmpEqual, dl, VT);
1920 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1921 return getUNDEF(VT);
1923 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1924 R==APFloat::cmpEqual, dl, VT);
1925 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT);
1926 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT);
1927 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1928 R==APFloat::cmpEqual, dl, VT);
1929 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
1930 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1931 R==APFloat::cmpLessThan, dl, VT);
1932 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1933 R==APFloat::cmpUnordered, dl, VT);
1934 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
1935 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
1938 // Ensure that the constant occurs on the RHS.
1939 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1940 MVT CompVT = N1.getValueType().getSimpleVT();
1941 if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
1944 return getSetCC(dl, VT, N2, N1, SwappedCond);
1948 // Could not fold it.
1952 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1953 /// use this predicate to simplify operations downstream.
1954 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1955 unsigned BitWidth = Op.getScalarValueSizeInBits();
1956 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1959 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1960 /// this predicate to simplify operations downstream. Mask is known to be zero
1961 /// for bits that V cannot have.
1962 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1963 unsigned Depth) const {
1964 APInt KnownZero, KnownOne;
1965 computeKnownBits(Op, KnownZero, KnownOne, Depth);
1966 return (KnownZero & Mask) == Mask;
1969 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
1970 /// is less than the element bit-width of the shift node, return it.
1971 static const APInt *getValidShiftAmountConstant(SDValue V) {
1972 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
1973 // Shifting more than the bitwidth is not valid.
1974 const APInt &ShAmt = SA->getAPIntValue();
1975 if (ShAmt.ult(V.getScalarValueSizeInBits()))
1981 /// Determine which bits of Op are known to be either zero or one and return
1982 /// them in the KnownZero/KnownOne bitsets. For vectors, the known bits are
1983 /// those that are shared by every vector element.
1984 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
1985 APInt &KnownOne, unsigned Depth) const {
1986 EVT VT = Op.getValueType();
1987 APInt DemandedElts = VT.isVector()
1988 ? APInt::getAllOnesValue(VT.getVectorNumElements())
1990 computeKnownBits(Op, KnownZero, KnownOne, DemandedElts, Depth);
1993 /// Determine which bits of Op are known to be either zero or one and return
1994 /// them in the KnownZero/KnownOne bitsets. The DemandedElts argument allows
1995 /// us to only collect the known bits that are shared by the requested vector
1997 /// TODO: We only support DemandedElts on a few opcodes so far, the remainder
1998 /// should be added when they become necessary.
1999 void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
2000 APInt &KnownOne, const APInt &DemandedElts,
2001 unsigned Depth) const {
2002 unsigned BitWidth = Op.getScalarValueSizeInBits();
2004 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
2006 return; // Limit search depth.
2008 APInt KnownZero2, KnownOne2;
2009 unsigned NumElts = DemandedElts.getBitWidth();
2012 return; // No demanded elts, better to assume we don't know anything.
2014 unsigned Opcode = Op.getOpcode();
2017 // We know all of the bits for a constant!
2018 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
2019 KnownZero = ~KnownOne;
2021 case ISD::BUILD_VECTOR:
2022 // Collect the known bits that are shared by every demanded vector element.
2023 assert(NumElts == Op.getValueType().getVectorNumElements() &&
2024 "Unexpected vector size");
2025 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2026 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2027 if (!DemandedElts[i])
2030 SDValue SrcOp = Op.getOperand(i);
2031 computeKnownBits(SrcOp, KnownZero2, KnownOne2, Depth + 1);
2033 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2034 if (SrcOp.getValueSizeInBits() != BitWidth) {
2035 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2036 "Expected BUILD_VECTOR implicit truncation");
2037 KnownOne2 = KnownOne2.trunc(BitWidth);
2038 KnownZero2 = KnownZero2.trunc(BitWidth);
2041 // Known bits are the values that are shared by every demanded element.
2042 KnownOne &= KnownOne2;
2043 KnownZero &= KnownZero2;
2045 // If we don't know any bits, early out.
2046 if (!KnownOne && !KnownZero)
2050 case ISD::VECTOR_SHUFFLE: {
2051 // Collect the known bits that are shared by every vector element referenced
2053 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2054 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2055 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2056 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2057 for (unsigned i = 0; i != NumElts; ++i) {
2058 if (!DemandedElts[i])
2061 int M = SVN->getMaskElt(i);
2063 // For UNDEF elements, we don't know anything about the common state of
2064 // the shuffle result.
2065 KnownOne.clearAllBits();
2066 KnownZero.clearAllBits();
2067 DemandedLHS.clearAllBits();
2068 DemandedRHS.clearAllBits();
2072 if ((unsigned)M < NumElts)
2073 DemandedLHS.setBit((unsigned)M % NumElts);
2075 DemandedRHS.setBit((unsigned)M % NumElts);
2077 // Known bits are the values that are shared by every demanded element.
2078 if (!!DemandedLHS) {
2079 SDValue LHS = Op.getOperand(0);
2080 computeKnownBits(LHS, KnownZero2, KnownOne2, DemandedLHS, Depth + 1);
2081 KnownOne &= KnownOne2;
2082 KnownZero &= KnownZero2;
2084 // If we don't know any bits, early out.
2085 if (!KnownOne && !KnownZero)
2087 if (!!DemandedRHS) {
2088 SDValue RHS = Op.getOperand(1);
2089 computeKnownBits(RHS, KnownZero2, KnownOne2, DemandedRHS, Depth + 1);
2090 KnownOne &= KnownOne2;
2091 KnownZero &= KnownZero2;
2095 case ISD::CONCAT_VECTORS: {
2096 // Split DemandedElts and test each of the demanded subvectors.
2097 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2098 EVT SubVectorVT = Op.getOperand(0).getValueType();
2099 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2100 unsigned NumSubVectors = Op.getNumOperands();
2101 for (unsigned i = 0; i != NumSubVectors; ++i) {
2102 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2103 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2104 if (!!DemandedSub) {
2105 SDValue Sub = Op.getOperand(i);
2106 computeKnownBits(Sub, KnownZero2, KnownOne2, DemandedSub, Depth + 1);
2107 KnownOne &= KnownOne2;
2108 KnownZero &= KnownZero2;
2110 // If we don't know any bits, early out.
2111 if (!KnownOne && !KnownZero)
2116 case ISD::EXTRACT_SUBVECTOR: {
2117 // If we know the element index, just demand that subvector elements,
2118 // otherwise demand them all.
2119 SDValue Src = Op.getOperand(0);
2120 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2121 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2122 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2123 // Offset the demanded elts by the subvector index.
2124 uint64_t Idx = SubIdx->getZExtValue();
2125 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
2126 computeKnownBits(Src, KnownZero, KnownOne, DemandedSrc, Depth + 1);
2128 computeKnownBits(Src, KnownZero, KnownOne, Depth + 1);
2132 case ISD::BITCAST: {
2133 SDValue N0 = Op.getOperand(0);
2134 unsigned SubBitWidth = N0.getScalarValueSizeInBits();
2136 // Ignore bitcasts from floating point.
2137 if (!N0.getValueType().isInteger())
2140 // Fast handling of 'identity' bitcasts.
2141 if (BitWidth == SubBitWidth) {
2142 computeKnownBits(N0, KnownZero, KnownOne, DemandedElts, Depth + 1);
2146 // Support big-endian targets when it becomes useful.
2147 bool IsLE = getDataLayout().isLittleEndian();
2151 // Bitcast 'small element' vector to 'large element' scalar/vector.
2152 if ((BitWidth % SubBitWidth) == 0) {
2153 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2155 // Collect known bits for the (larger) output by collecting the known
2156 // bits from each set of sub elements and shift these into place.
2157 // We need to separately call computeKnownBits for each set of
2158 // sub elements as the knownbits for each is likely to be different.
2159 unsigned SubScale = BitWidth / SubBitWidth;
2160 APInt SubDemandedElts(NumElts * SubScale, 0);
2161 for (unsigned i = 0; i != NumElts; ++i)
2162 if (DemandedElts[i])
2163 SubDemandedElts.setBit(i * SubScale);
2165 for (unsigned i = 0; i != SubScale; ++i) {
2166 computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts.shl(i),
2168 KnownOne |= KnownOne2.zext(BitWidth).shl(SubBitWidth * i);
2169 KnownZero |= KnownZero2.zext(BitWidth).shl(SubBitWidth * i);
2173 // Bitcast 'large element' scalar/vector to 'small element' vector.
2174 if ((SubBitWidth % BitWidth) == 0) {
2175 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2177 // Collect known bits for the (smaller) output by collecting the known
2178 // bits from the overlapping larger input elements and extracting the
2179 // sub sections we actually care about.
2180 unsigned SubScale = SubBitWidth / BitWidth;
2181 APInt SubDemandedElts(NumElts / SubScale, 0);
2182 for (unsigned i = 0; i != NumElts; ++i)
2183 if (DemandedElts[i])
2184 SubDemandedElts.setBit(i / SubScale);
2186 computeKnownBits(N0, KnownZero2, KnownOne2, SubDemandedElts, Depth + 1);
2188 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2189 for (unsigned i = 0; i != NumElts; ++i)
2190 if (DemandedElts[i]) {
2191 unsigned Offset = (i % SubScale) * BitWidth;
2192 KnownOne &= KnownOne2.lshr(Offset).trunc(BitWidth);
2193 KnownZero &= KnownZero2.lshr(Offset).trunc(BitWidth);
2194 // If we don't know any bits, early out.
2195 if (!KnownOne && !KnownZero)
2202 // If either the LHS or the RHS are Zero, the result is zero.
2203 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2205 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2208 // Output known-1 bits are only known if set in both the LHS & RHS.
2209 KnownOne &= KnownOne2;
2210 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2211 KnownZero |= KnownZero2;
2214 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2216 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2219 // Output known-0 bits are only known if clear in both the LHS & RHS.
2220 KnownZero &= KnownZero2;
2221 // Output known-1 are known to be set if set in either the LHS | RHS.
2222 KnownOne |= KnownOne2;
2225 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2227 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2230 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2231 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
2232 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2233 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
2234 KnownZero = KnownZeroOut;
2238 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, DemandedElts,
2240 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2243 // If low bits are zero in either operand, output low known-0 bits.
2244 // Also compute a conservative estimate for high known-0 bits.
2245 // More trickiness is possible, but this is sufficient for the
2246 // interesting case of alignment computation.
2247 KnownOne.clearAllBits();
2248 unsigned TrailZ = KnownZero.countTrailingOnes() +
2249 KnownZero2.countTrailingOnes();
2250 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
2251 KnownZero2.countLeadingOnes(),
2252 BitWidth) - BitWidth;
2254 TrailZ = std::min(TrailZ, BitWidth);
2255 LeadZ = std::min(LeadZ, BitWidth);
2256 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
2257 APInt::getHighBitsSet(BitWidth, LeadZ);
2261 // For the purposes of computing leading zeros we can conservatively
2262 // treat a udiv as a logical right shift by the power of 2 known to
2263 // be less than the denominator.
2264 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2266 unsigned LeadZ = KnownZero2.countLeadingOnes();
2268 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2270 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
2271 if (RHSUnknownLeadingOnes != BitWidth)
2272 LeadZ = std::min(BitWidth,
2273 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
2275 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
2279 computeKnownBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
2280 // If we don't know any bits, early out.
2281 if (!KnownOne && !KnownZero)
2283 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2285 // Only known if known in both the LHS and RHS.
2286 KnownOne &= KnownOne2;
2287 KnownZero &= KnownZero2;
2289 case ISD::SELECT_CC:
2290 computeKnownBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
2291 // If we don't know any bits, early out.
2292 if (!KnownOne && !KnownZero)
2294 computeKnownBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
2296 // Only known if known in both the LHS and RHS.
2297 KnownOne &= KnownOne2;
2298 KnownZero &= KnownZero2;
2306 if (Op.getResNo() != 1)
2308 // The boolean result conforms to getBooleanContents.
2309 // If we know the result of a setcc has the top bits zero, use this info.
2310 // We know that we have an integer-based boolean since these operations
2311 // are only available for integer.
2312 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2313 TargetLowering::ZeroOrOneBooleanContent &&
2315 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2318 // If we know the result of a setcc has the top bits zero, use this info.
2319 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2320 TargetLowering::ZeroOrOneBooleanContent &&
2322 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2325 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2326 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2328 KnownZero = KnownZero << *ShAmt;
2329 KnownOne = KnownOne << *ShAmt;
2330 // Low bits are known zero.
2331 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt->getZExtValue());
2335 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2336 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2338 KnownZero = KnownZero.lshr(*ShAmt);
2339 KnownOne = KnownOne.lshr(*ShAmt);
2340 // High bits are known zero.
2341 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue());
2342 KnownZero |= HighBits;
2346 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2347 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2349 KnownZero = KnownZero.lshr(*ShAmt);
2350 KnownOne = KnownOne.lshr(*ShAmt);
2351 // If we know the value of the sign bit, then we know it is copied across
2352 // the high bits by the shift amount.
2353 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt->getZExtValue());
2354 APInt SignBit = APInt::getSignBit(BitWidth);
2355 SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask.
2356 if (KnownZero.intersects(SignBit)) {
2357 KnownZero |= HighBits; // New bits are known zero.
2358 } else if (KnownOne.intersects(SignBit)) {
2359 KnownOne |= HighBits; // New bits are known one.
2363 case ISD::SIGN_EXTEND_INREG: {
2364 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2365 unsigned EBits = EVT.getScalarSizeInBits();
2367 // Sign extension. Compute the demanded bits in the result that are not
2368 // present in the input.
2369 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2371 APInt InSignBit = APInt::getSignBit(EBits);
2372 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2374 // If the sign extended bits are demanded, we know that the sign
2376 InSignBit = InSignBit.zext(BitWidth);
2377 if (NewBits.getBoolValue())
2378 InputDemandedBits |= InSignBit;
2380 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2382 KnownOne &= InputDemandedBits;
2383 KnownZero &= InputDemandedBits;
2385 // If the sign bit of the input is known set or clear, then we know the
2386 // top bits of the result.
2387 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
2388 KnownZero |= NewBits;
2389 KnownOne &= ~NewBits;
2390 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
2391 KnownOne |= NewBits;
2392 KnownZero &= ~NewBits;
2393 } else { // Input sign bit unknown
2394 KnownZero &= ~NewBits;
2395 KnownOne &= ~NewBits;
2400 case ISD::CTTZ_ZERO_UNDEF:
2402 case ISD::CTLZ_ZERO_UNDEF:
2404 unsigned LowBits = Log2_32(BitWidth)+1;
2405 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
2406 KnownOne.clearAllBits();
2410 LoadSDNode *LD = cast<LoadSDNode>(Op);
2411 // If this is a ZEXTLoad and we are looking at the loaded value.
2412 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2413 EVT VT = LD->getMemoryVT();
2414 unsigned MemBits = VT.getScalarSizeInBits();
2415 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
2416 } else if (const MDNode *Ranges = LD->getRanges()) {
2417 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2418 computeKnownBitsFromRangeMetadata(*Ranges, KnownZero, KnownOne);
2422 case ISD::ZERO_EXTEND: {
2423 EVT InVT = Op.getOperand(0).getValueType();
2424 unsigned InBits = InVT.getScalarSizeInBits();
2425 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
2426 KnownZero = KnownZero.trunc(InBits);
2427 KnownOne = KnownOne.trunc(InBits);
2428 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2430 KnownZero = KnownZero.zext(BitWidth);
2431 KnownOne = KnownOne.zext(BitWidth);
2432 KnownZero |= NewBits;
2435 case ISD::SIGN_EXTEND: {
2436 EVT InVT = Op.getOperand(0).getValueType();
2437 unsigned InBits = InVT.getScalarSizeInBits();
2439 KnownZero = KnownZero.trunc(InBits);
2440 KnownOne = KnownOne.trunc(InBits);
2441 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2444 // If the sign bit is known to be zero or one, then sext will extend
2445 // it to the top bits, else it will just zext.
2446 KnownZero = KnownZero.sext(BitWidth);
2447 KnownOne = KnownOne.sext(BitWidth);
2450 case ISD::ANY_EXTEND: {
2451 EVT InVT = Op.getOperand(0).getValueType();
2452 unsigned InBits = InVT.getScalarSizeInBits();
2453 KnownZero = KnownZero.trunc(InBits);
2454 KnownOne = KnownOne.trunc(InBits);
2455 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2456 KnownZero = KnownZero.zext(BitWidth);
2457 KnownOne = KnownOne.zext(BitWidth);
2460 case ISD::TRUNCATE: {
2461 EVT InVT = Op.getOperand(0).getValueType();
2462 unsigned InBits = InVT.getScalarSizeInBits();
2463 KnownZero = KnownZero.zext(InBits);
2464 KnownOne = KnownOne.zext(InBits);
2465 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2467 KnownZero = KnownZero.trunc(BitWidth);
2468 KnownOne = KnownOne.trunc(BitWidth);
2471 case ISD::AssertZext: {
2472 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2473 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2474 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2475 KnownZero |= (~InMask);
2476 KnownOne &= (~KnownZero);
2480 // All bits are zero except the low bit.
2481 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
2485 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) {
2486 // We know that the top bits of C-X are clear if X contains less bits
2487 // than C (i.e. no wrap-around can happen). For example, 20-X is
2488 // positive if we can prove that X is >= 0 and < 16.
2489 if (CLHS->getAPIntValue().isNonNegative()) {
2490 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2491 // NLZ can't be BitWidth with no sign bit
2492 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2493 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2496 // If all of the MaskV bits are known to be zero, then we know the
2497 // output top bits are zero, because we now know that the output is
2499 if ((KnownZero2 & MaskV) == MaskV) {
2500 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2501 // Top bits known zero.
2502 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2510 // Output known-0 bits are known if clear or set in both the low clear bits
2511 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2512 // low 3 bits clear.
2513 // Output known-0 bits are also known if the top bits of each input are
2514 // known to be clear. For example, if one input has the top 10 bits clear
2515 // and the other has the top 8 bits clear, we know the top 7 bits of the
2516 // output must be clear.
2517 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2519 unsigned KnownZeroHigh = KnownZero2.countLeadingOnes();
2520 unsigned KnownZeroLow = KnownZero2.countTrailingOnes();
2522 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2524 KnownZeroHigh = std::min(KnownZeroHigh,
2525 KnownZero2.countLeadingOnes());
2526 KnownZeroLow = std::min(KnownZeroLow,
2527 KnownZero2.countTrailingOnes());
2529 if (Opcode == ISD::ADD) {
2530 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow);
2531 if (KnownZeroHigh > 1)
2532 KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1);
2536 // With ADDE, a carry bit may be added in, so we can only use this
2537 // information if we know (at least) that the low two bits are clear. We
2538 // then return to the caller that the low bit is unknown but that other bits
2540 if (KnownZeroLow >= 2) // ADDE
2541 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroLow);
2545 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2546 const APInt &RA = Rem->getAPIntValue().abs();
2547 if (RA.isPowerOf2()) {
2548 APInt LowBits = RA - 1;
2549 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2552 // The low bits of the first operand are unchanged by the srem.
2553 KnownZero = KnownZero2 & LowBits;
2554 KnownOne = KnownOne2 & LowBits;
2556 // If the first operand is non-negative or has all low bits zero, then
2557 // the upper bits are all zero.
2558 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2559 KnownZero |= ~LowBits;
2561 // If the first operand is negative and not all low bits are zero, then
2562 // the upper bits are all one.
2563 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2564 KnownOne |= ~LowBits;
2565 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2570 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2571 const APInt &RA = Rem->getAPIntValue();
2572 if (RA.isPowerOf2()) {
2573 APInt LowBits = (RA - 1);
2574 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2577 // The upper bits are all zero, the lower ones are unchanged.
2578 KnownZero = KnownZero2 | ~LowBits;
2579 KnownOne = KnownOne2 & LowBits;
2584 // Since the result is less than or equal to either operand, any leading
2585 // zero bits in either operand must also exist in the result.
2586 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2588 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2591 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2592 KnownZero2.countLeadingOnes());
2593 KnownOne.clearAllBits();
2594 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2597 case ISD::EXTRACT_ELEMENT: {
2598 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2599 const unsigned Index = Op.getConstantOperandVal(1);
2600 const unsigned BitWidth = Op.getValueSizeInBits();
2602 // Remove low part of known bits mask
2603 KnownZero = KnownZero.getHiBits(KnownZero.getBitWidth() - Index * BitWidth);
2604 KnownOne = KnownOne.getHiBits(KnownOne.getBitWidth() - Index * BitWidth);
2606 // Remove high part of known bit mask
2607 KnownZero = KnownZero.trunc(BitWidth);
2608 KnownOne = KnownOne.trunc(BitWidth);
2611 case ISD::EXTRACT_VECTOR_ELT: {
2612 SDValue InVec = Op.getOperand(0);
2613 SDValue EltNo = Op.getOperand(1);
2614 EVT VecVT = InVec.getValueType();
2615 const unsigned BitWidth = Op.getValueSizeInBits();
2616 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
2617 const unsigned NumSrcElts = VecVT.getVectorNumElements();
2618 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2619 // anything about the extended bits.
2620 if (BitWidth > EltBitWidth) {
2621 KnownZero = KnownZero.trunc(EltBitWidth);
2622 KnownOne = KnownOne.trunc(EltBitWidth);
2624 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
2625 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
2626 // If we know the element index, just demand that vector element.
2627 unsigned Idx = ConstEltNo->getZExtValue();
2628 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
2629 computeKnownBits(InVec, KnownZero, KnownOne, DemandedElt, Depth + 1);
2631 // Unknown element index, so ignore DemandedElts and demand them all.
2632 computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1);
2634 if (BitWidth > EltBitWidth) {
2635 KnownZero = KnownZero.zext(BitWidth);
2636 KnownOne = KnownOne.zext(BitWidth);
2640 case ISD::INSERT_VECTOR_ELT: {
2641 SDValue InVec = Op.getOperand(0);
2642 SDValue InVal = Op.getOperand(1);
2643 SDValue EltNo = Op.getOperand(2);
2645 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
2646 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
2647 // If we know the element index, split the demand between the
2648 // source vector and the inserted element.
2649 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
2650 unsigned EltIdx = CEltNo->getZExtValue();
2652 // If we demand the inserted element then add its common known bits.
2653 if (DemandedElts[EltIdx]) {
2654 computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1);
2655 KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth());
2656 KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());;
2659 // If we demand the source vector then add its common known bits, ensuring
2660 // that we don't demand the inserted element.
2661 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
2663 computeKnownBits(InVec, KnownZero2, KnownOne2, VectorElts, Depth + 1);
2664 KnownOne &= KnownOne2;
2665 KnownZero &= KnownZero2;
2668 // Unknown element index, so ignore DemandedElts and demand them all.
2669 computeKnownBits(InVec, KnownZero, KnownOne, Depth + 1);
2670 computeKnownBits(InVal, KnownZero2, KnownOne2, Depth + 1);
2671 KnownOne &= KnownOne2.zextOrTrunc(KnownOne.getBitWidth());
2672 KnownZero &= KnownZero2.zextOrTrunc(KnownZero.getBitWidth());;
2677 computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
2679 KnownZero = KnownZero2.byteSwap();
2680 KnownOne = KnownOne2.byteSwap();
2687 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
2689 // If we don't know any bits, early out.
2690 if (!KnownOne && !KnownZero)
2692 computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
2694 KnownZero &= KnownZero2;
2695 KnownOne &= KnownOne2;
2698 case ISD::FrameIndex:
2699 case ISD::TargetFrameIndex:
2700 if (unsigned Align = InferPtrAlignment(Op)) {
2701 // The low bits are known zero if the pointer is aligned.
2702 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2708 if (Opcode < ISD::BUILTIN_OP_END)
2711 case ISD::INTRINSIC_WO_CHAIN:
2712 case ISD::INTRINSIC_W_CHAIN:
2713 case ISD::INTRINSIC_VOID:
2714 // Allow the target to implement this method for its nodes.
2715 TLI->computeKnownBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2719 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
2722 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
2723 EVT OpVT = Val.getValueType();
2724 unsigned BitWidth = OpVT.getScalarSizeInBits();
2726 // Is the constant a known power of 2?
2727 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
2728 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2730 // A left-shift of a constant one will have exactly one bit set because
2731 // shifting the bit off the end is undefined.
2732 if (Val.getOpcode() == ISD::SHL) {
2733 auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
2734 if (C && C->getAPIntValue() == 1)
2738 // Similarly, a logical right-shift of a constant sign-bit will have exactly
2740 if (Val.getOpcode() == ISD::SRL) {
2741 auto *C = dyn_cast<ConstantSDNode>(Val.getOperand(0));
2742 if (C && C->getAPIntValue().isSignBit())
2746 // Are all operands of a build vector constant powers of two?
2747 if (Val.getOpcode() == ISD::BUILD_VECTOR)
2748 if (llvm::all_of(Val->ops(), [this, BitWidth](SDValue E) {
2749 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
2750 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2755 // More could be done here, though the above checks are enough
2756 // to handle some common cases.
2758 // Fall back to computeKnownBits to catch other known cases.
2759 APInt KnownZero, KnownOne;
2760 computeKnownBits(Val, KnownZero, KnownOne);
2761 return (KnownZero.countPopulation() == BitWidth - 1) &&
2762 (KnownOne.countPopulation() == 1);
2765 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
2766 EVT VT = Op.getValueType();
2767 assert(VT.isInteger() && "Invalid VT!");
2768 unsigned VTBits = VT.getScalarSizeInBits();
2770 unsigned FirstAnswer = 1;
2773 return 1; // Limit search depth.
2775 switch (Op.getOpcode()) {
2777 case ISD::AssertSext:
2778 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2779 return VTBits-Tmp+1;
2780 case ISD::AssertZext:
2781 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2784 case ISD::Constant: {
2785 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2786 return Val.getNumSignBits();
2789 case ISD::SIGN_EXTEND:
2790 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
2791 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2793 case ISD::SIGN_EXTEND_INREG:
2794 // Max of the input and what this extends.
2795 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
2798 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2799 return std::max(Tmp, Tmp2);
2802 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2803 // SRA X, C -> adds C sign bits.
2804 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
2805 APInt ShiftVal = C->getAPIntValue();
2807 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
2811 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
2812 // shl destroys sign bits.
2813 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2814 if (C->getAPIntValue().uge(VTBits) || // Bad shift.
2815 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
2816 return Tmp - C->getZExtValue();
2821 case ISD::XOR: // NOT is handled here.
2822 // Logical binary ops preserve the number of sign bits at the worst.
2823 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2825 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2826 FirstAnswer = std::min(Tmp, Tmp2);
2827 // We computed what we know about the sign bits as our first
2828 // answer. Now proceed to the generic code that uses
2829 // computeKnownBits, and pick whichever answer is better.
2834 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2835 if (Tmp == 1) return 1; // Early out.
2836 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2837 return std::min(Tmp, Tmp2);
2838 case ISD::SELECT_CC:
2839 Tmp = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2840 if (Tmp == 1) return 1; // Early out.
2841 Tmp2 = ComputeNumSignBits(Op.getOperand(3), Depth+1);
2842 return std::min(Tmp, Tmp2);
2847 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2849 return 1; // Early out.
2850 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
2851 return std::min(Tmp, Tmp2);
2858 if (Op.getResNo() != 1)
2860 // The boolean result conforms to getBooleanContents. Fall through.
2861 // If setcc returns 0/-1, all bits are sign bits.
2862 // We know that we have an integer-based boolean since these operations
2863 // are only available for integer.
2864 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2865 TargetLowering::ZeroOrNegativeOneBooleanContent)
2869 // If setcc returns 0/-1, all bits are sign bits.
2870 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2871 TargetLowering::ZeroOrNegativeOneBooleanContent)
2876 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2877 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2879 // Handle rotate right by N like a rotate left by 32-N.
2880 if (Op.getOpcode() == ISD::ROTR)
2881 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2883 // If we aren't rotating out all of the known-in sign bits, return the
2884 // number that are left. This handles rotl(sext(x), 1) for example.
2885 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2886 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2890 // Add can have at most one carry bit. Thus we know that the output
2891 // is, at worst, one more bit than the inputs.
2892 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2893 if (Tmp == 1) return 1; // Early out.
2895 // Special case decrementing a value (ADD X, -1):
2896 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2897 if (CRHS->isAllOnesValue()) {
2898 APInt KnownZero, KnownOne;
2899 computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2901 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2903 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2906 // If we are subtracting one from a positive number, there is no carry
2907 // out of the result.
2908 if (KnownZero.isNegative())
2912 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2913 if (Tmp2 == 1) return 1;
2914 return std::min(Tmp, Tmp2)-1;
2917 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2918 if (Tmp2 == 1) return 1;
2921 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
2922 if (CLHS->isNullValue()) {
2923 APInt KnownZero, KnownOne;
2924 computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2925 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2927 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2930 // If the input is known to be positive (the sign bit is known clear),
2931 // the output of the NEG has the same number of sign bits as the input.
2932 if (KnownZero.isNegative())
2935 // Otherwise, we treat this like a SUB.
2938 // Sub can have at most one carry bit. Thus we know that the output
2939 // is, at worst, one more bit than the inputs.
2940 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2941 if (Tmp == 1) return 1; // Early out.
2942 return std::min(Tmp, Tmp2)-1;
2943 case ISD::TRUNCATE: {
2944 // Check if the sign bits of source go down as far as the truncated value.
2945 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
2946 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2947 if (NumSrcSignBits > (NumSrcBits - VTBits))
2948 return NumSrcSignBits - (NumSrcBits - VTBits);
2951 case ISD::EXTRACT_ELEMENT: {
2952 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2953 const int BitWidth = Op.getValueSizeInBits();
2954 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
2956 // Get reverse index (starting from 1), Op1 value indexes elements from
2957 // little end. Sign starts at big end.
2958 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
2960 // If the sign portion ends in our element the subtraction gives correct
2961 // result. Otherwise it gives either negative or > bitwidth result
2962 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
2964 case ISD::EXTRACT_VECTOR_ELT: {
2965 // At the moment we keep this simple and skip tracking the specific
2966 // element. This way we get the lowest common denominator for all elements
2968 // TODO: get information for given vector element
2969 const unsigned BitWidth = Op.getValueSizeInBits();
2970 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
2971 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
2972 // anything about sign bits. But if the sizes match we can derive knowledge
2973 // about sign bits from the vector operand.
2974 if (BitWidth == EltBitWidth)
2975 return ComputeNumSignBits(Op.getOperand(0), Depth+1);
2978 case ISD::EXTRACT_SUBVECTOR:
2979 return ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2980 case ISD::CONCAT_VECTORS:
2981 // Determine the minimum number of sign bits across all input vectors.
2982 // Early out if the result is already 1.
2983 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2984 for (unsigned i = 1, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i)
2985 Tmp = std::min(Tmp, ComputeNumSignBits(Op.getOperand(i), Depth + 1));
2989 // If we are looking at the loaded value of the SDNode.
2990 if (Op.getResNo() == 0) {
2991 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2992 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2993 unsigned ExtType = LD->getExtensionType();
2996 case ISD::SEXTLOAD: // '17' bits known
2997 Tmp = LD->getMemoryVT().getScalarSizeInBits();
2998 return VTBits-Tmp+1;
2999 case ISD::ZEXTLOAD: // '16' bits known
3000 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3006 // Allow the target to implement this method for its nodes.
3007 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3008 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3009 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3010 Op.getOpcode() == ISD::INTRINSIC_VOID) {
3011 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, *this, Depth);
3012 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
3015 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3016 // use this information.
3017 APInt KnownZero, KnownOne;
3018 computeKnownBits(Op, KnownZero, KnownOne, Depth);
3021 if (KnownZero.isNegative()) { // sign bit is 0
3023 } else if (KnownOne.isNegative()) { // sign bit is 1;
3030 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3031 // the number of identical bits in the top of the input value.
3033 Mask <<= Mask.getBitWidth()-VTBits;
3034 // Return # leading zeros. We use 'min' here in case Val was zero before
3035 // shifting. We don't want to return '64' as for an i32 "0".
3036 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3039 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
3040 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3041 !isa<ConstantSDNode>(Op.getOperand(1)))
3044 if (Op.getOpcode() == ISD::OR &&
3045 !MaskedValueIsZero(Op.getOperand(0),
3046 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
3052 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
3053 // If we're told that NaNs won't happen, assume they won't.
3054 if (getTarget().Options.NoNaNsFPMath)
3057 // If the value is a constant, we can obviously see if it is a NaN or not.
3058 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3059 return !C->getValueAPF().isNaN();
3061 // TODO: Recognize more cases here.
3066 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
3067 // If the value is a constant, we can obviously see if it is a zero or not.
3068 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3069 return !C->isZero();
3071 // TODO: Recognize more cases here.
3072 switch (Op.getOpcode()) {
3075 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3076 return !C->isNullValue();
3083 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
3084 // Check the obvious case.
3085 if (A == B) return true;
3087 // For for negative and positive zero.
3088 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
3089 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
3090 if (CA->isZero() && CB->isZero()) return true;
3092 // Otherwise they may not be equal.
3096 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
3097 assert(A.getValueType() == B.getValueType() &&
3098 "Values must have the same type");
3101 computeKnownBits(A, AZero, AOne);
3102 computeKnownBits(B, BZero, BOne);
3103 return (AZero | BZero).isAllOnesValue();
3106 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
3107 ArrayRef<SDValue> Ops,
3108 llvm::SelectionDAG &DAG) {
3109 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
3110 assert(llvm::all_of(Ops,
3112 return Ops[0].getValueType() == Op.getValueType();
3114 "Concatenation of vectors with inconsistent value types!");
3115 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
3116 VT.getVectorNumElements() &&
3117 "Incorrect element count in vector concatenation!");
3119 if (Ops.size() == 1)
3122 // Concat of UNDEFs is UNDEF.
3123 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
3124 return DAG.getUNDEF(VT);
3126 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
3127 // simplified to one big BUILD_VECTOR.
3128 // FIXME: Add support for SCALAR_TO_VECTOR as well.
3129 EVT SVT = VT.getScalarType();
3130 SmallVector<SDValue, 16> Elts;
3131 for (SDValue Op : Ops) {
3132 EVT OpVT = Op.getValueType();
3134 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
3135 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
3136 Elts.append(Op->op_begin(), Op->op_end());
3141 // BUILD_VECTOR requires all inputs to be of the same type, find the
3142 // maximum type and extend them all.
3143 for (SDValue Op : Elts)
3144 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
3146 if (SVT.bitsGT(VT.getScalarType()))
3147 for (SDValue &Op : Elts)
3148 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
3149 ? DAG.getZExtOrTrunc(Op, DL, SVT)
3150 : DAG.getSExtOrTrunc(Op, DL, SVT);
3152 return DAG.getBuildVector(VT, DL, Elts);
3155 /// Gets or creates the specified node.
3156 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
3157 FoldingSetNodeID ID;
3158 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
3160 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3161 return SDValue(E, 0);
3163 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
3165 CSEMap.InsertNode(N, IP);
3168 return SDValue(N, 0);
3171 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3173 // Constant fold unary operations with an integer constant operand. Even
3174 // opaque constant will be folded, because the folding of unary operations
3175 // doesn't create new constants with different values. Nevertheless, the
3176 // opaque flag is preserved during folding to prevent future folding with
3178 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
3179 const APInt &Val = C->getAPIntValue();
3182 case ISD::SIGN_EXTEND:
3183 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
3184 C->isTargetOpcode(), C->isOpaque());
3185 case ISD::ANY_EXTEND:
3186 case ISD::ZERO_EXTEND:
3188 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
3189 C->isTargetOpcode(), C->isOpaque());
3190 case ISD::UINT_TO_FP:
3191 case ISD::SINT_TO_FP: {
3192 APFloat apf(EVTToAPFloatSemantics(VT),
3193 APInt::getNullValue(VT.getSizeInBits()));
3194 (void)apf.convertFromAPInt(Val,
3195 Opcode==ISD::SINT_TO_FP,
3196 APFloat::rmNearestTiesToEven);
3197 return getConstantFP(apf, DL, VT);
3200 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
3201 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
3202 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
3203 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
3204 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
3205 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
3206 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
3207 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
3210 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
3213 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
3216 case ISD::CTLZ_ZERO_UNDEF:
3217 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
3220 case ISD::CTTZ_ZERO_UNDEF:
3221 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
3226 // Constant fold unary operations with a floating point constant operand.
3227 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
3228 APFloat V = C->getValueAPF(); // make copy
3232 return getConstantFP(V, DL, VT);
3235 return getConstantFP(V, DL, VT);
3237 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
3238 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3239 return getConstantFP(V, DL, VT);
3243 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
3244 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3245 return getConstantFP(V, DL, VT);
3249 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
3250 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3251 return getConstantFP(V, DL, VT);
3254 case ISD::FP_EXTEND: {
3256 // This can return overflow, underflow, or inexact; we don't care.
3257 // FIXME need to be more flexible about rounding mode.
3258 (void)V.convert(EVTToAPFloatSemantics(VT),
3259 APFloat::rmNearestTiesToEven, &ignored);
3260 return getConstantFP(V, DL, VT);
3262 case ISD::FP_TO_SINT:
3263 case ISD::FP_TO_UINT: {
3266 static_assert(integerPartWidth >= 64, "APFloat parts too small!");
3267 // FIXME need to be more flexible about rounding mode.
3268 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
3269 Opcode==ISD::FP_TO_SINT,
3270 APFloat::rmTowardZero, &ignored);
3271 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
3273 APInt api(VT.getSizeInBits(), x);
3274 return getConstant(api, DL, VT);
3277 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
3278 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3279 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
3280 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3281 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
3282 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
3287 // Constant fold unary operations with a vector integer or float operand.
3288 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
3289 if (BV->isConstant()) {
3292 // FIXME: Entirely reasonable to perform folding of other unary
3293 // operations here as the need arises.
3300 case ISD::FP_EXTEND:
3301 case ISD::FP_TO_SINT:
3302 case ISD::FP_TO_UINT:
3304 case ISD::UINT_TO_FP:
3305 case ISD::SINT_TO_FP:
3308 case ISD::CTLZ_ZERO_UNDEF:
3310 case ISD::CTTZ_ZERO_UNDEF:
3312 SDValue Ops = { Operand };
3313 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
3320 unsigned OpOpcode = Operand.getNode()->getOpcode();
3322 case ISD::TokenFactor:
3323 case ISD::MERGE_VALUES:
3324 case ISD::CONCAT_VECTORS:
3325 return Operand; // Factor, merge or concat of one node? No need.
3326 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
3327 case ISD::FP_EXTEND:
3328 assert(VT.isFloatingPoint() &&
3329 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
3330 if (Operand.getValueType() == VT) return Operand; // noop conversion.
3331 assert((!VT.isVector() ||
3332 VT.getVectorNumElements() ==
3333 Operand.getValueType().getVectorNumElements()) &&
3334 "Vector element count mismatch!");
3335 assert(Operand.getValueType().bitsLT(VT) &&
3336 "Invalid fpext node, dst < src!");
3337 if (Operand.isUndef())
3338 return getUNDEF(VT);
3340 case ISD::SIGN_EXTEND:
3341 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3342 "Invalid SIGN_EXTEND!");
3343 if (Operand.getValueType() == VT) return Operand; // noop extension
3344 assert((!VT.isVector() ||
3345 VT.getVectorNumElements() ==
3346 Operand.getValueType().getVectorNumElements()) &&
3347 "Vector element count mismatch!");
3348 assert(Operand.getValueType().bitsLT(VT) &&
3349 "Invalid sext node, dst < src!");
3350 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
3351 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3352 else if (OpOpcode == ISD::UNDEF)
3353 // sext(undef) = 0, because the top bits will all be the same.
3354 return getConstant(0, DL, VT);
3356 case ISD::ZERO_EXTEND:
3357 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3358 "Invalid ZERO_EXTEND!");
3359 if (Operand.getValueType() == VT) return Operand; // noop extension
3360 assert((!VT.isVector() ||
3361 VT.getVectorNumElements() ==
3362 Operand.getValueType().getVectorNumElements()) &&
3363 "Vector element count mismatch!");
3364 assert(Operand.getValueType().bitsLT(VT) &&
3365 "Invalid zext node, dst < src!");
3366 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
3367 return getNode(ISD::ZERO_EXTEND, DL, VT,
3368 Operand.getNode()->getOperand(0));
3369 else if (OpOpcode == ISD::UNDEF)
3370 // zext(undef) = 0, because the top bits will be zero.
3371 return getConstant(0, DL, VT);
3373 case ISD::ANY_EXTEND:
3374 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3375 "Invalid ANY_EXTEND!");
3376 if (Operand.getValueType() == VT) return Operand; // noop extension
3377 assert((!VT.isVector() ||
3378 VT.getVectorNumElements() ==
3379 Operand.getValueType().getVectorNumElements()) &&
3380 "Vector element count mismatch!");
3381 assert(Operand.getValueType().bitsLT(VT) &&
3382 "Invalid anyext node, dst < src!");
3384 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3385 OpOpcode == ISD::ANY_EXTEND)
3386 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
3387 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3388 else if (OpOpcode == ISD::UNDEF)
3389 return getUNDEF(VT);
3391 // (ext (trunx x)) -> x
3392 if (OpOpcode == ISD::TRUNCATE) {
3393 SDValue OpOp = Operand.getNode()->getOperand(0);
3394 if (OpOp.getValueType() == VT)
3399 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3400 "Invalid TRUNCATE!");
3401 if (Operand.getValueType() == VT) return Operand; // noop truncate
3402 assert((!VT.isVector() ||
3403 VT.getVectorNumElements() ==
3404 Operand.getValueType().getVectorNumElements()) &&
3405 "Vector element count mismatch!");
3406 assert(Operand.getValueType().bitsGT(VT) &&
3407 "Invalid truncate node, src < dst!");
3408 if (OpOpcode == ISD::TRUNCATE)
3409 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3410 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3411 OpOpcode == ISD::ANY_EXTEND) {
3412 // If the source is smaller than the dest, we still need an extend.
3413 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
3414 .bitsLT(VT.getScalarType()))
3415 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
3416 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
3417 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
3418 return Operand.getNode()->getOperand(0);
3420 if (OpOpcode == ISD::UNDEF)
3421 return getUNDEF(VT);
3424 assert(VT.isInteger() && VT == Operand.getValueType() &&
3426 assert((VT.getScalarSizeInBits() % 16 == 0) &&
3427 "BSWAP types must be a multiple of 16 bits!");
3428 if (OpOpcode == ISD::UNDEF)
3429 return getUNDEF(VT);
3431 case ISD::BITREVERSE:
3432 assert(VT.isInteger() && VT == Operand.getValueType() &&
3433 "Invalid BITREVERSE!");
3434 if (OpOpcode == ISD::UNDEF)
3435 return getUNDEF(VT);
3438 // Basic sanity checking.
3439 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
3440 "Cannot BITCAST between types of different sizes!");
3441 if (VT == Operand.getValueType()) return Operand; // noop conversion.
3442 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
3443 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
3444 if (OpOpcode == ISD::UNDEF)
3445 return getUNDEF(VT);
3447 case ISD::SCALAR_TO_VECTOR:
3448 assert(VT.isVector() && !Operand.getValueType().isVector() &&
3449 (VT.getVectorElementType() == Operand.getValueType() ||
3450 (VT.getVectorElementType().isInteger() &&
3451 Operand.getValueType().isInteger() &&
3452 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
3453 "Illegal SCALAR_TO_VECTOR node!");
3454 if (OpOpcode == ISD::UNDEF)
3455 return getUNDEF(VT);
3456 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
3457 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
3458 isa<ConstantSDNode>(Operand.getOperand(1)) &&
3459 Operand.getConstantOperandVal(1) == 0 &&
3460 Operand.getOperand(0).getValueType() == VT)
3461 return Operand.getOperand(0);
3464 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
3465 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
3466 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags?
3467 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
3468 Operand.getNode()->getOperand(0),
3469 &cast<BinaryWithFlagsSDNode>(Operand.getNode())->Flags);
3470 if (OpOpcode == ISD::FNEG) // --X -> X
3471 return Operand.getNode()->getOperand(0);
3474 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
3475 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
3480 SDVTList VTs = getVTList(VT);
3481 SDValue Ops[] = {Operand};
3482 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
3483 FoldingSetNodeID ID;
3484 AddNodeIDNode(ID, Opcode, VTs, Ops);
3486 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3487 return SDValue(E, 0);
3489 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3490 createOperands(N, Ops);
3491 CSEMap.InsertNode(N, IP);
3493 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3494 createOperands(N, Ops);
3498 return SDValue(N, 0);
3501 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
3504 case ISD::ADD: return std::make_pair(C1 + C2, true);
3505 case ISD::SUB: return std::make_pair(C1 - C2, true);
3506 case ISD::MUL: return std::make_pair(C1 * C2, true);
3507 case ISD::AND: return std::make_pair(C1 & C2, true);
3508 case ISD::OR: return std::make_pair(C1 | C2, true);
3509 case ISD::XOR: return std::make_pair(C1 ^ C2, true);
3510 case ISD::SHL: return std::make_pair(C1 << C2, true);
3511 case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
3512 case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
3513 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
3514 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
3515 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
3516 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
3517 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
3518 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
3520 if (!C2.getBoolValue())
3522 return std::make_pair(C1.udiv(C2), true);
3524 if (!C2.getBoolValue())
3526 return std::make_pair(C1.urem(C2), true);
3528 if (!C2.getBoolValue())
3530 return std::make_pair(C1.sdiv(C2), true);
3532 if (!C2.getBoolValue())
3534 return std::make_pair(C1.srem(C2), true);
3536 return std::make_pair(APInt(1, 0), false);
3539 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
3540 EVT VT, const ConstantSDNode *Cst1,
3541 const ConstantSDNode *Cst2) {
3542 if (Cst1->isOpaque() || Cst2->isOpaque())
3545 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
3546 Cst2->getAPIntValue());
3549 return getConstant(Folded.first, DL, VT);
3552 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
3553 const GlobalAddressSDNode *GA,
3555 if (GA->getOpcode() != ISD::GlobalAddress)
3557 if (!TLI->isOffsetFoldingLegal(GA))
3559 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2);
3562 int64_t Offset = Cst2->getSExtValue();
3564 case ISD::ADD: break;
3565 case ISD::SUB: Offset = -uint64_t(Offset); break;
3566 default: return SDValue();
3568 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT,
3569 GA->getOffset() + uint64_t(Offset));
3572 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
3573 EVT VT, SDNode *Cst1,
3575 // If the opcode is a target-specific ISD node, there's nothing we can
3576 // do here and the operand rules may not line up with the below, so
3578 if (Opcode >= ISD::BUILTIN_OP_END)
3581 // Handle the case of two scalars.
3582 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
3583 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
3584 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2);
3585 assert((!Folded || !VT.isVector()) &&
3586 "Can't fold vectors ops with scalar operands");
3591 // fold (add Sym, c) -> Sym+c
3592 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1))
3593 return FoldSymbolOffset(Opcode, VT, GA, Cst2);
3594 if (isCommutativeBinOp(Opcode))
3595 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2))
3596 return FoldSymbolOffset(Opcode, VT, GA, Cst1);
3598 // For vectors extract each constant element into Inputs so we can constant
3599 // fold them individually.
3600 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
3601 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
3605 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
3607 EVT SVT = VT.getScalarType();
3608 SmallVector<SDValue, 4> Outputs;
3609 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
3610 SDValue V1 = BV1->getOperand(I);
3611 SDValue V2 = BV2->getOperand(I);
3613 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
3614 // FIXME: This is valid and could be handled by truncation.
3615 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
3618 // Fold one vector element.
3619 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
3621 // Scalar folding only succeeded if the result is a constant or UNDEF.
3622 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
3623 ScalarResult.getOpcode() != ISD::ConstantFP)
3625 Outputs.push_back(ScalarResult);
3628 assert(VT.getVectorNumElements() == Outputs.size() &&
3629 "Vector size mismatch!");
3631 // We may have a vector type but a scalar result. Create a splat.
3632 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
3634 // Build a big vector out of the scalar elements we generated.
3635 return getBuildVector(VT, SDLoc(), Outputs);
3638 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
3639 const SDLoc &DL, EVT VT,
3640 ArrayRef<SDValue> Ops,
3641 const SDNodeFlags *Flags) {
3642 // If the opcode is a target-specific ISD node, there's nothing we can
3643 // do here and the operand rules may not line up with the below, so
3645 if (Opcode >= ISD::BUILTIN_OP_END)
3648 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
3652 unsigned NumElts = VT.getVectorNumElements();
3654 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
3655 return !Op.getValueType().isVector() ||
3656 Op.getValueType().getVectorNumElements() == NumElts;
3659 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
3660 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
3661 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
3662 (BV && BV->isConstant());
3665 // All operands must be vector types with the same number of elements as
3666 // the result type and must be either UNDEF or a build vector of constant
3667 // or UNDEF scalars.
3668 if (!all_of(Ops, IsConstantBuildVectorOrUndef) ||
3669 !all_of(Ops, IsScalarOrSameVectorSize))
3672 // If we are comparing vectors, then the result needs to be a i1 boolean
3673 // that is then sign-extended back to the legal result type.
3674 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
3676 // Find legal integer scalar type for constant promotion and
3677 // ensure that its scalar size is at least as large as source.
3678 EVT LegalSVT = VT.getScalarType();
3679 if (LegalSVT.isInteger()) {
3680 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
3681 if (LegalSVT.bitsLT(VT.getScalarType()))
3685 // Constant fold each scalar lane separately.
3686 SmallVector<SDValue, 4> ScalarResults;
3687 for (unsigned i = 0; i != NumElts; i++) {
3688 SmallVector<SDValue, 4> ScalarOps;
3689 for (SDValue Op : Ops) {
3690 EVT InSVT = Op.getValueType().getScalarType();
3691 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
3693 // We've checked that this is UNDEF or a constant of some kind.
3695 ScalarOps.push_back(getUNDEF(InSVT));
3697 ScalarOps.push_back(Op);
3701 SDValue ScalarOp = InBV->getOperand(i);
3702 EVT ScalarVT = ScalarOp.getValueType();
3704 // Build vector (integer) scalar operands may need implicit
3705 // truncation - do this before constant folding.
3706 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
3707 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
3709 ScalarOps.push_back(ScalarOp);
3712 // Constant fold the scalar operands.
3713 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
3715 // Legalize the (integer) scalar constant if necessary.
3716 if (LegalSVT != SVT)
3717 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
3719 // Scalar folding only succeeded if the result is a constant or UNDEF.
3720 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
3721 ScalarResult.getOpcode() != ISD::ConstantFP)
3723 ScalarResults.push_back(ScalarResult);
3726 return getBuildVector(VT, DL, ScalarResults);
3729 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3730 SDValue N1, SDValue N2,
3731 const SDNodeFlags *Flags) {
3732 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
3733 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
3734 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
3735 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
3737 // Canonicalize constant to RHS if commutative.
3738 if (isCommutativeBinOp(Opcode)) {
3740 std::swap(N1C, N2C);
3742 } else if (N1CFP && !N2CFP) {
3743 std::swap(N1CFP, N2CFP);
3750 case ISD::TokenFactor:
3751 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
3752 N2.getValueType() == MVT::Other && "Invalid token factor!");
3753 // Fold trivial token factors.
3754 if (N1.getOpcode() == ISD::EntryToken) return N2;
3755 if (N2.getOpcode() == ISD::EntryToken) return N1;
3756 if (N1 == N2) return N1;
3758 case ISD::CONCAT_VECTORS: {
3759 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
3760 SDValue Ops[] = {N1, N2};
3761 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
3766 assert(VT.isInteger() && "This operator does not apply to FP types!");
3767 assert(N1.getValueType() == N2.getValueType() &&
3768 N1.getValueType() == VT && "Binary operator types must match!");
3769 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
3770 // worth handling here.
3771 if (N2C && N2C->isNullValue())
3773 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
3780 assert(VT.isInteger() && "This operator does not apply to FP types!");
3781 assert(N1.getValueType() == N2.getValueType() &&
3782 N1.getValueType() == VT && "Binary operator types must match!");
3783 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
3784 // it's worth handling here.
3785 if (N2C && N2C->isNullValue())
3799 assert(VT.isInteger() && "This operator does not apply to FP types!");
3800 assert(N1.getValueType() == N2.getValueType() &&
3801 N1.getValueType() == VT && "Binary operator types must match!");
3808 if (getTarget().Options.UnsafeFPMath) {
3809 if (Opcode == ISD::FADD) {
3811 if (N2CFP && N2CFP->getValueAPF().isZero())
3813 } else if (Opcode == ISD::FSUB) {
3815 if (N2CFP && N2CFP->getValueAPF().isZero())
3817 } else if (Opcode == ISD::FMUL) {
3819 if (N2CFP && N2CFP->isZero())
3822 if (N2CFP && N2CFP->isExactlyValue(1.0))
3826 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
3827 assert(N1.getValueType() == N2.getValueType() &&
3828 N1.getValueType() == VT && "Binary operator types must match!");
3830 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
3831 assert(N1.getValueType() == VT &&
3832 N1.getValueType().isFloatingPoint() &&
3833 N2.getValueType().isFloatingPoint() &&
3834 "Invalid FCOPYSIGN!");
3841 assert(VT == N1.getValueType() &&
3842 "Shift operators return type must be the same as their first arg");
3843 assert(VT.isInteger() && N2.getValueType().isInteger() &&
3844 "Shifts only work on integers");
3845 assert((!VT.isVector() || VT == N2.getValueType()) &&
3846 "Vector shift amounts must be in the same as their first arg");
3847 // Verify that the shift amount VT is bit enough to hold valid shift
3848 // amounts. This catches things like trying to shift an i1024 value by an
3849 // i8, which is easy to fall into in generic code that uses
3850 // TLI.getShiftAmount().
3851 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
3852 "Invalid use of small shift amount with oversized value!");
3854 // Always fold shifts of i1 values so the code generator doesn't need to
3855 // handle them. Since we know the size of the shift has to be less than the
3856 // size of the value, the shift/rotate count is guaranteed to be zero.
3859 if (N2C && N2C->isNullValue())
3862 case ISD::FP_ROUND_INREG: {
3863 EVT EVT = cast<VTSDNode>(N2)->getVT();
3864 assert(VT == N1.getValueType() && "Not an inreg round!");
3865 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
3866 "Cannot FP_ROUND_INREG integer types");
3867 assert(EVT.isVector() == VT.isVector() &&
3868 "FP_ROUND_INREG type should be vector iff the operand "
3870 assert((!EVT.isVector() ||
3871 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3872 "Vector element counts must match in FP_ROUND_INREG");
3873 assert(EVT.bitsLE(VT) && "Not rounding down!");
3875 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
3879 assert(VT.isFloatingPoint() &&
3880 N1.getValueType().isFloatingPoint() &&
3881 VT.bitsLE(N1.getValueType()) &&
3882 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
3883 "Invalid FP_ROUND!");
3884 if (N1.getValueType() == VT) return N1; // noop conversion.
3886 case ISD::AssertSext:
3887 case ISD::AssertZext: {
3888 EVT EVT = cast<VTSDNode>(N2)->getVT();
3889 assert(VT == N1.getValueType() && "Not an inreg extend!");
3890 assert(VT.isInteger() && EVT.isInteger() &&
3891 "Cannot *_EXTEND_INREG FP types");
3892 assert(!EVT.isVector() &&
3893 "AssertSExt/AssertZExt type should be the vector element type "
3894 "rather than the vector type!");
3895 assert(EVT.bitsLE(VT) && "Not extending!");
3896 if (VT == EVT) return N1; // noop assertion.
3899 case ISD::SIGN_EXTEND_INREG: {
3900 EVT EVT = cast<VTSDNode>(N2)->getVT();
3901 assert(VT == N1.getValueType() && "Not an inreg extend!");
3902 assert(VT.isInteger() && EVT.isInteger() &&
3903 "Cannot *_EXTEND_INREG FP types");
3904 assert(EVT.isVector() == VT.isVector() &&
3905 "SIGN_EXTEND_INREG type should be vector iff the operand "
3907 assert((!EVT.isVector() ||
3908 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
3909 "Vector element counts must match in SIGN_EXTEND_INREG");
3910 assert(EVT.bitsLE(VT) && "Not extending!");
3911 if (EVT == VT) return N1; // Not actually extending
3913 auto SignExtendInReg = [&](APInt Val) {
3914 unsigned FromBits = EVT.getScalarSizeInBits();
3915 Val <<= Val.getBitWidth() - FromBits;
3916 Val = Val.ashr(Val.getBitWidth() - FromBits);
3917 return getConstant(Val, DL, VT.getScalarType());
3921 const APInt &Val = N1C->getAPIntValue();
3922 return SignExtendInReg(Val);
3924 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
3925 SmallVector<SDValue, 8> Ops;
3926 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
3927 SDValue Op = N1.getOperand(i);
3929 Ops.push_back(getUNDEF(VT.getScalarType()));
3932 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3933 APInt Val = C->getAPIntValue();
3934 Val = Val.zextOrTrunc(VT.getScalarSizeInBits());
3935 Ops.push_back(SignExtendInReg(Val));
3940 if (Ops.size() == VT.getVectorNumElements())
3941 return getBuildVector(VT, DL, Ops);
3945 case ISD::EXTRACT_VECTOR_ELT:
3946 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
3948 return getUNDEF(VT);
3950 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
3951 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
3952 return getUNDEF(VT);
3954 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
3955 // expanding copies of large vectors from registers.
3957 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3958 N1.getNumOperands() > 0) {
3960 N1.getOperand(0).getValueType().getVectorNumElements();
3961 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3962 N1.getOperand(N2C->getZExtValue() / Factor),
3963 getConstant(N2C->getZExtValue() % Factor, DL,
3964 N2.getValueType()));
3967 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3968 // expanding large vector constants.
3969 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3970 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3972 if (VT != Elt.getValueType())
3973 // If the vector element type is not legal, the BUILD_VECTOR operands
3974 // are promoted and implicitly truncated, and the result implicitly
3975 // extended. Make that explicit here.
3976 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3981 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3982 // operations are lowered to scalars.
3983 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3984 // If the indices are the same, return the inserted element else
3985 // if the indices are known different, extract the element from
3986 // the original vector.
3987 SDValue N1Op2 = N1.getOperand(2);
3988 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
3990 if (N1Op2C && N2C) {
3991 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3992 if (VT == N1.getOperand(1).getValueType())
3993 return N1.getOperand(1);
3995 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3998 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
4002 case ISD::EXTRACT_ELEMENT:
4003 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
4004 assert(!N1.getValueType().isVector() && !VT.isVector() &&
4005 (N1.getValueType().isInteger() == VT.isInteger()) &&
4006 N1.getValueType() != VT &&
4007 "Wrong types for EXTRACT_ELEMENT!");
4009 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
4010 // 64-bit integers into 32-bit parts. Instead of building the extract of
4011 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
4012 if (N1.getOpcode() == ISD::BUILD_PAIR)
4013 return N1.getOperand(N2C->getZExtValue());
4015 // EXTRACT_ELEMENT of a constant int is also very common.
4017 unsigned ElementSize = VT.getSizeInBits();
4018 unsigned Shift = ElementSize * N2C->getZExtValue();
4019 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
4020 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
4023 case ISD::EXTRACT_SUBVECTOR:
4024 if (VT.isSimple() && N1.getValueType().isSimple()) {
4025 assert(VT.isVector() && N1.getValueType().isVector() &&
4026 "Extract subvector VTs must be a vectors!");
4027 assert(VT.getVectorElementType() ==
4028 N1.getValueType().getVectorElementType() &&
4029 "Extract subvector VTs must have the same element type!");
4030 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
4031 "Extract subvector must be from larger vector to smaller vector!");
4034 assert((VT.getVectorNumElements() + N2C->getZExtValue()
4035 <= N1.getValueType().getVectorNumElements())
4036 && "Extract subvector overflow!");
4039 // Trivial extraction.
4040 if (VT.getSimpleVT() == N1.getSimpleValueType())
4043 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
4044 // during shuffle legalization.
4045 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
4046 VT == N1.getOperand(1).getValueType())
4047 return N1.getOperand(1);
4052 // Perform trivial constant folding.
4054 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
4057 // Constant fold FP operations.
4058 bool HasFPExceptions = TLI->hasFloatingPointExceptions();
4061 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
4062 APFloat::opStatus s;
4065 s = V1.add(V2, APFloat::rmNearestTiesToEven);
4066 if (!HasFPExceptions || s != APFloat::opInvalidOp)
4067 return getConstantFP(V1, DL, VT);
4070 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
4071 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4072 return getConstantFP(V1, DL, VT);
4075 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
4076 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4077 return getConstantFP(V1, DL, VT);
4080 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
4081 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4082 s!=APFloat::opDivByZero)) {
4083 return getConstantFP(V1, DL, VT);
4088 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4089 s!=APFloat::opDivByZero)) {
4090 return getConstantFP(V1, DL, VT);
4093 case ISD::FCOPYSIGN:
4095 return getConstantFP(V1, DL, VT);
4100 if (Opcode == ISD::FP_ROUND) {
4101 APFloat V = N1CFP->getValueAPF(); // make copy
4103 // This can return overflow, underflow, or inexact; we don't care.
4104 // FIXME need to be more flexible about rounding mode.
4105 (void)V.convert(EVTToAPFloatSemantics(VT),
4106 APFloat::rmNearestTiesToEven, &ignored);
4107 return getConstantFP(V, DL, VT);
4111 // Canonicalize an UNDEF to the RHS, even over a constant.
4113 if (isCommutativeBinOp(Opcode)) {
4117 case ISD::FP_ROUND_INREG:
4118 case ISD::SIGN_EXTEND_INREG:
4124 return N1; // fold op(undef, arg2) -> undef
4132 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
4133 // For vectors, we can't easily build an all zero vector, just return
4140 // Fold a bunch of operators when the RHS is undef.
4145 // Handle undef ^ undef -> 0 special case. This is a common
4147 return getConstant(0, DL, VT);
4157 return N2; // fold op(arg1, undef) -> undef
4163 if (getTarget().Options.UnsafeFPMath)
4171 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
4172 // For vectors, we can't easily build an all zero vector, just return
4177 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
4178 // For vectors, we can't easily build an all one vector, just return
4186 // Memoize this node if possible.
4188 SDVTList VTs = getVTList(VT);
4189 if (VT != MVT::Glue) {
4190 SDValue Ops[] = {N1, N2};
4191 FoldingSetNodeID ID;
4192 AddNodeIDNode(ID, Opcode, VTs, Ops);
4194 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4196 E->intersectFlagsWith(Flags);
4197 return SDValue(E, 0);
4200 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
4201 CSEMap.InsertNode(N, IP);
4203 N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
4207 return SDValue(N, 0);
4210 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4211 SDValue N1, SDValue N2, SDValue N3) {
4212 // Perform various simplifications.
4215 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4216 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4217 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
4218 if (N1CFP && N2CFP && N3CFP) {
4219 APFloat V1 = N1CFP->getValueAPF();
4220 const APFloat &V2 = N2CFP->getValueAPF();
4221 const APFloat &V3 = N3CFP->getValueAPF();
4222 APFloat::opStatus s =
4223 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
4224 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
4225 return getConstantFP(V1, DL, VT);
4229 case ISD::CONCAT_VECTORS: {
4230 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4231 SDValue Ops[] = {N1, N2, N3};
4232 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4237 // Use FoldSetCC to simplify SETCC's.
4238 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
4240 // Vector constant folding.
4241 SDValue Ops[] = {N1, N2, N3};
4242 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4247 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
4248 if (N1C->getZExtValue())
4249 return N2; // select true, X, Y -> X
4250 return N3; // select false, X, Y -> Y
4253 if (N2 == N3) return N2; // select C, X, X -> X
4255 case ISD::VECTOR_SHUFFLE:
4256 llvm_unreachable("should use getVectorShuffle constructor!");
4257 case ISD::INSERT_VECTOR_ELT: {
4258 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
4259 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
4260 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
4261 return getUNDEF(VT);
4264 case ISD::INSERT_SUBVECTOR: {
4266 if (VT.isSimple() && N1.getValueType().isSimple()
4267 && N2.getValueType().isSimple()) {
4268 assert(VT.isVector() && N1.getValueType().isVector() &&
4269 N2.getValueType().isVector() &&
4270 "Insert subvector VTs must be a vectors");
4271 assert(VT == N1.getValueType() &&
4272 "Dest and insert subvector source types must match!");
4273 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
4274 "Insert subvector must be from smaller vector to larger vector!");
4275 if (isa<ConstantSDNode>(Index)) {
4276 assert((N2.getValueType().getVectorNumElements() +
4277 cast<ConstantSDNode>(Index)->getZExtValue()
4278 <= VT.getVectorNumElements())
4279 && "Insert subvector overflow!");
4282 // Trivial insertion.
4283 if (VT.getSimpleVT() == N2.getSimpleValueType())
4289 // Fold bit_convert nodes from a type to themselves.
4290 if (N1.getValueType() == VT)
4295 // Memoize node if it doesn't produce a flag.
4297 SDVTList VTs = getVTList(VT);
4298 SDValue Ops[] = {N1, N2, N3};
4299 if (VT != MVT::Glue) {
4300 FoldingSetNodeID ID;
4301 AddNodeIDNode(ID, Opcode, VTs, Ops);
4303 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4304 return SDValue(E, 0);
4306 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4307 createOperands(N, Ops);
4308 CSEMap.InsertNode(N, IP);
4310 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4311 createOperands(N, Ops);
4315 return SDValue(N, 0);
4318 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4319 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
4320 SDValue Ops[] = { N1, N2, N3, N4 };
4321 return getNode(Opcode, DL, VT, Ops);
4324 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4325 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
4327 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4328 return getNode(Opcode, DL, VT, Ops);
4331 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
4332 /// the incoming stack arguments to be loaded from the stack.
4333 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
4334 SmallVector<SDValue, 8> ArgChains;
4336 // Include the original chain at the beginning of the list. When this is
4337 // used by target LowerCall hooks, this helps legalize find the
4338 // CALLSEQ_BEGIN node.
4339 ArgChains.push_back(Chain);
4341 // Add a chain value for each stack argument.
4342 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
4343 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
4344 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
4345 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
4346 if (FI->getIndex() < 0)
4347 ArgChains.push_back(SDValue(L, 1));
4349 // Build a tokenfactor for all the chains.
4350 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
4353 /// getMemsetValue - Vectorized representation of the memset value
4355 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
4357 assert(!Value.isUndef());
4359 unsigned NumBits = VT.getScalarSizeInBits();
4360 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
4361 assert(C->getAPIntValue().getBitWidth() == 8);
4362 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
4364 return DAG.getConstant(Val, dl, VT);
4365 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
4369 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
4370 EVT IntVT = VT.getScalarType();
4371 if (!IntVT.isInteger())
4372 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
4374 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
4376 // Use a multiplication with 0x010101... to extend the input to the
4378 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
4379 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
4380 DAG.getConstant(Magic, dl, IntVT));
4383 if (VT != Value.getValueType() && !VT.isInteger())
4384 Value = DAG.getBitcast(VT.getScalarType(), Value);
4385 if (VT != Value.getValueType())
4386 Value = DAG.getSplatBuildVector(VT, dl, Value);
4391 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
4392 /// used when a memcpy is turned into a memset when the source is a constant
4394 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
4395 const TargetLowering &TLI, StringRef Str) {
4396 // Handle vector with all elements zero.
4399 return DAG.getConstant(0, dl, VT);
4400 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
4401 return DAG.getConstantFP(0.0, dl, VT);
4402 else if (VT.isVector()) {
4403 unsigned NumElts = VT.getVectorNumElements();
4404 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
4405 return DAG.getNode(ISD::BITCAST, dl, VT,
4406 DAG.getConstant(0, dl,
4407 EVT::getVectorVT(*DAG.getContext(),
4410 llvm_unreachable("Expected type!");
4413 assert(!VT.isVector() && "Can't handle vector type here!");
4414 unsigned NumVTBits = VT.getSizeInBits();
4415 unsigned NumVTBytes = NumVTBits / 8;
4416 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
4418 APInt Val(NumVTBits, 0);
4419 if (DAG.getDataLayout().isLittleEndian()) {
4420 for (unsigned i = 0; i != NumBytes; ++i)
4421 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
4423 for (unsigned i = 0; i != NumBytes; ++i)
4424 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
4427 // If the "cost" of materializing the integer immediate is less than the cost
4428 // of a load, then it is cost effective to turn the load into the immediate.
4429 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
4430 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
4431 return DAG.getConstant(Val, dl, VT);
4432 return SDValue(nullptr, 0);
4435 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
4437 EVT VT = Base.getValueType();
4438 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
4441 /// isMemSrcFromString - Returns true if memcpy source is a string constant.
4443 static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
4444 uint64_t SrcDelta = 0;
4445 GlobalAddressSDNode *G = nullptr;
4446 if (Src.getOpcode() == ISD::GlobalAddress)
4447 G = cast<GlobalAddressSDNode>(Src);
4448 else if (Src.getOpcode() == ISD::ADD &&
4449 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
4450 Src.getOperand(1).getOpcode() == ISD::Constant) {
4451 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
4452 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
4457 return getConstantStringInfo(G->getGlobal(), Str,
4458 SrcDelta + G->getOffset(), false);
4461 /// Determines the optimal series of memory ops to replace the memset / memcpy.
4462 /// Return true if the number of memory ops is below the threshold (Limit).
4463 /// It returns the types of the sequence of memory ops to perform
4464 /// memset / memcpy by reference.
4465 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
4466 unsigned Limit, uint64_t Size,
4467 unsigned DstAlign, unsigned SrcAlign,
4472 unsigned DstAS, unsigned SrcAS,
4474 const TargetLowering &TLI) {
4475 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
4476 "Expecting memcpy / memset source to meet alignment requirement!");
4477 // If 'SrcAlign' is zero, that means the memory operation does not need to
4478 // load the value, i.e. memset or memcpy from constant string. Otherwise,
4479 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
4480 // is the specified alignment of the memory operation. If it is zero, that
4481 // means it's possible to change the alignment of the destination.
4482 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
4483 // not need to be loaded.
4484 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
4485 IsMemset, ZeroMemset, MemcpyStrSrc,
4486 DAG.getMachineFunction());
4488 if (VT == MVT::Other) {
4489 if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(DstAS) ||
4490 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) {
4491 VT = TLI.getPointerTy(DAG.getDataLayout(), DstAS);
4493 switch (DstAlign & 7) {
4494 case 0: VT = MVT::i64; break;
4495 case 4: VT = MVT::i32; break;
4496 case 2: VT = MVT::i16; break;
4497 default: VT = MVT::i8; break;
4502 while (!TLI.isTypeLegal(LVT))
4503 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
4504 assert(LVT.isInteger());
4510 unsigned NumMemOps = 0;
4512 unsigned VTSize = VT.getSizeInBits() / 8;
4513 while (VTSize > Size) {
4514 // For now, only use non-vector load / store's for the left-over pieces.
4519 if (VT.isVector() || VT.isFloatingPoint()) {
4520 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
4521 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
4522 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
4524 else if (NewVT == MVT::i64 &&
4525 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
4526 TLI.isSafeMemOpType(MVT::f64)) {
4527 // i64 is usually not legal on 32-bit targets, but f64 may be.
4535 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
4536 if (NewVT == MVT::i8)
4538 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
4540 NewVTSize = NewVT.getSizeInBits() / 8;
4542 // If the new VT cannot cover all of the remaining bits, then consider
4543 // issuing a (or a pair of) unaligned and overlapping load / store.
4544 // FIXME: Only does this for 64-bit or more since we don't have proper
4545 // cost model for unaligned load / store.
4547 if (NumMemOps && AllowOverlap &&
4548 VTSize >= 8 && NewVTSize < Size &&
4549 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast)
4557 if (++NumMemOps > Limit)
4560 MemOps.push_back(VT);
4567 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
4568 // On Darwin, -Os means optimize for size without hurting performance, so
4569 // only really optimize for size when -Oz (MinSize) is used.
4570 if (MF.getTarget().getTargetTriple().isOSDarwin())
4571 return MF.getFunction()->optForMinSize();
4572 return MF.getFunction()->optForSize();
4575 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
4576 SDValue Chain, SDValue Dst, SDValue Src,
4577 uint64_t Size, unsigned Align,
4578 bool isVol, bool AlwaysInline,
4579 MachinePointerInfo DstPtrInfo,
4580 MachinePointerInfo SrcPtrInfo) {
4581 // Turn a memcpy of undef to nop.
4585 // Expand memcpy to a series of load and store ops if the size operand falls
4586 // below a certain threshold.
4587 // TODO: In the AlwaysInline case, if the size is big then generate a loop
4588 // rather than maybe a humongous number of loads and stores.
4589 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4590 std::vector<EVT> MemOps;
4591 bool DstAlignCanChange = false;
4592 MachineFunction &MF = DAG.getMachineFunction();
4593 MachineFrameInfo &MFI = MF.getFrameInfo();
4594 bool OptSize = shouldLowerMemFuncForSize(MF);
4595 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4596 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4597 DstAlignCanChange = true;
4598 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4599 if (Align > SrcAlign)
4602 bool CopyFromStr = isMemSrcFromString(Src, Str);
4603 bool isZeroStr = CopyFromStr && Str.empty();
4604 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
4606 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4607 (DstAlignCanChange ? 0 : Align),
4608 (isZeroStr ? 0 : SrcAlign),
4609 false, false, CopyFromStr, true,
4610 DstPtrInfo.getAddrSpace(),
4611 SrcPtrInfo.getAddrSpace(),
4615 if (DstAlignCanChange) {
4616 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4617 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4619 // Don't promote to an alignment that would require dynamic stack
4621 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
4622 if (!TRI->needsStackRealignment(MF))
4623 while (NewAlign > Align &&
4624 DAG.getDataLayout().exceedsNaturalStackAlignment(NewAlign))
4627 if (NewAlign > Align) {
4628 // Give the stack frame object a larger alignment if needed.
4629 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4630 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4635 MachineMemOperand::Flags MMOFlags =
4636 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
4637 SmallVector<SDValue, 8> OutChains;
4638 unsigned NumMemOps = MemOps.size();
4639 uint64_t SrcOff = 0, DstOff = 0;
4640 for (unsigned i = 0; i != NumMemOps; ++i) {
4642 unsigned VTSize = VT.getSizeInBits() / 8;
4643 SDValue Value, Store;
4645 if (VTSize > Size) {
4646 // Issuing an unaligned load / store pair that overlaps with the previous
4647 // pair. Adjust the offset accordingly.
4648 assert(i == NumMemOps-1 && i != 0);
4649 SrcOff -= VTSize - Size;
4650 DstOff -= VTSize - Size;
4654 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
4655 // It's unlikely a store of a vector immediate can be done in a single
4656 // instruction. It would require a load from a constantpool first.
4657 // We only handle zero vectors here.
4658 // FIXME: Handle other cases where store of vector immediate is done in
4659 // a single instruction.
4660 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
4661 if (Value.getNode())
4662 Store = DAG.getStore(Chain, dl, Value,
4663 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4664 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
4667 if (!Store.getNode()) {
4668 // The type might not be legal for the target. This should only happen
4669 // if the type is smaller than a legal type, as on PPC, so the right
4670 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
4671 // to Load/Store if NVT==VT.
4672 // FIXME does the case above also need this?
4673 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
4674 assert(NVT.bitsGE(VT));
4675 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
4676 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
4677 SrcPtrInfo.getWithOffset(SrcOff), VT,
4678 MinAlign(SrcAlign, SrcOff), MMOFlags);
4679 OutChains.push_back(Value.getValue(1));
4680 Store = DAG.getTruncStore(
4681 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4682 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
4684 OutChains.push_back(Store);
4690 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4693 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
4694 SDValue Chain, SDValue Dst, SDValue Src,
4695 uint64_t Size, unsigned Align,
4696 bool isVol, bool AlwaysInline,
4697 MachinePointerInfo DstPtrInfo,
4698 MachinePointerInfo SrcPtrInfo) {
4699 // Turn a memmove of undef to nop.
4703 // Expand memmove to a series of load and store ops if the size operand falls
4704 // below a certain threshold.
4705 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4706 std::vector<EVT> MemOps;
4707 bool DstAlignCanChange = false;
4708 MachineFunction &MF = DAG.getMachineFunction();
4709 MachineFrameInfo &MFI = MF.getFrameInfo();
4710 bool OptSize = shouldLowerMemFuncForSize(MF);
4711 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4712 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4713 DstAlignCanChange = true;
4714 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
4715 if (Align > SrcAlign)
4717 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
4719 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
4720 (DstAlignCanChange ? 0 : Align), SrcAlign,
4721 false, false, false, false,
4722 DstPtrInfo.getAddrSpace(),
4723 SrcPtrInfo.getAddrSpace(),
4727 if (DstAlignCanChange) {
4728 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4729 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4730 if (NewAlign > Align) {
4731 // Give the stack frame object a larger alignment if needed.
4732 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4733 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4738 MachineMemOperand::Flags MMOFlags =
4739 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
4740 uint64_t SrcOff = 0, DstOff = 0;
4741 SmallVector<SDValue, 8> LoadValues;
4742 SmallVector<SDValue, 8> LoadChains;
4743 SmallVector<SDValue, 8> OutChains;
4744 unsigned NumMemOps = MemOps.size();
4745 for (unsigned i = 0; i < NumMemOps; i++) {
4747 unsigned VTSize = VT.getSizeInBits() / 8;
4751 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
4752 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, MMOFlags);
4753 LoadValues.push_back(Value);
4754 LoadChains.push_back(Value.getValue(1));
4757 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
4759 for (unsigned i = 0; i < NumMemOps; i++) {
4761 unsigned VTSize = VT.getSizeInBits() / 8;
4764 Store = DAG.getStore(Chain, dl, LoadValues[i],
4765 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4766 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
4767 OutChains.push_back(Store);
4771 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4774 /// \brief Lower the call to 'memset' intrinsic function into a series of store
4777 /// \param DAG Selection DAG where lowered code is placed.
4778 /// \param dl Link to corresponding IR location.
4779 /// \param Chain Control flow dependency.
4780 /// \param Dst Pointer to destination memory location.
4781 /// \param Src Value of byte to write into the memory.
4782 /// \param Size Number of bytes to write.
4783 /// \param Align Alignment of the destination in bytes.
4784 /// \param isVol True if destination is volatile.
4785 /// \param DstPtrInfo IR information on the memory pointer.
4786 /// \returns New head in the control flow, if lowering was successful, empty
4787 /// SDValue otherwise.
4789 /// The function tries to replace 'llvm.memset' intrinsic with several store
4790 /// operations and value calculation code. This is usually profitable for small
4792 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
4793 SDValue Chain, SDValue Dst, SDValue Src,
4794 uint64_t Size, unsigned Align, bool isVol,
4795 MachinePointerInfo DstPtrInfo) {
4796 // Turn a memset of undef to nop.
4800 // Expand memset to a series of load/store ops if the size operand
4801 // falls below a certain threshold.
4802 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4803 std::vector<EVT> MemOps;
4804 bool DstAlignCanChange = false;
4805 MachineFunction &MF = DAG.getMachineFunction();
4806 MachineFrameInfo &MFI = MF.getFrameInfo();
4807 bool OptSize = shouldLowerMemFuncForSize(MF);
4808 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
4809 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
4810 DstAlignCanChange = true;
4812 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
4813 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
4814 Size, (DstAlignCanChange ? 0 : Align), 0,
4815 true, IsZeroVal, false, true,
4816 DstPtrInfo.getAddrSpace(), ~0u,
4820 if (DstAlignCanChange) {
4821 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
4822 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
4823 if (NewAlign > Align) {
4824 // Give the stack frame object a larger alignment if needed.
4825 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
4826 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
4831 SmallVector<SDValue, 8> OutChains;
4832 uint64_t DstOff = 0;
4833 unsigned NumMemOps = MemOps.size();
4835 // Find the largest store and generate the bit pattern for it.
4836 EVT LargestVT = MemOps[0];
4837 for (unsigned i = 1; i < NumMemOps; i++)
4838 if (MemOps[i].bitsGT(LargestVT))
4839 LargestVT = MemOps[i];
4840 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
4842 for (unsigned i = 0; i < NumMemOps; i++) {
4844 unsigned VTSize = VT.getSizeInBits() / 8;
4845 if (VTSize > Size) {
4846 // Issuing an unaligned load / store pair that overlaps with the previous
4847 // pair. Adjust the offset accordingly.
4848 assert(i == NumMemOps-1 && i != 0);
4849 DstOff -= VTSize - Size;
4852 // If this store is smaller than the largest store see whether we can get
4853 // the smaller value for free with a truncate.
4854 SDValue Value = MemSetValue;
4855 if (VT.bitsLT(LargestVT)) {
4856 if (!LargestVT.isVector() && !VT.isVector() &&
4857 TLI.isTruncateFree(LargestVT, VT))
4858 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
4860 Value = getMemsetValue(Src, VT, DAG, dl);
4862 assert(Value.getValueType() == VT && "Value with wrong type.");
4863 SDValue Store = DAG.getStore(
4864 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
4865 DstPtrInfo.getWithOffset(DstOff), Align,
4866 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
4867 OutChains.push_back(Store);
4868 DstOff += VT.getSizeInBits() / 8;
4872 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
4875 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
4877 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
4878 // pointer operands can be losslessly bitcasted to pointers of address space 0
4879 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
4880 report_fatal_error("cannot lower memory intrinsic in address space " +
4885 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
4886 SDValue Src, SDValue Size, unsigned Align,
4887 bool isVol, bool AlwaysInline, bool isTailCall,
4888 MachinePointerInfo DstPtrInfo,
4889 MachinePointerInfo SrcPtrInfo) {
4890 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4892 // Check to see if we should lower the memcpy to loads and stores first.
4893 // For cases within the target-specified limits, this is the best choice.
4894 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4896 // Memcpy with size zero? Just return the original chain.
4897 if (ConstantSize->isNullValue())
4900 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4901 ConstantSize->getZExtValue(),Align,
4902 isVol, false, DstPtrInfo, SrcPtrInfo);
4903 if (Result.getNode())
4907 // Then check to see if we should lower the memcpy with target-specific
4908 // code. If the target chooses to do this, this is the next best.
4910 SDValue Result = TSI->EmitTargetCodeForMemcpy(
4911 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
4912 DstPtrInfo, SrcPtrInfo);
4913 if (Result.getNode())
4917 // If we really need inline code and the target declined to provide it,
4918 // use a (potentially long) sequence of loads and stores.
4920 assert(ConstantSize && "AlwaysInline requires a constant size!");
4921 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
4922 ConstantSize->getZExtValue(), Align, isVol,
4923 true, DstPtrInfo, SrcPtrInfo);
4926 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4927 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4929 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
4930 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
4931 // respect volatile, so they may do things like read or write memory
4932 // beyond the given memory regions. But fixing this isn't easy, and most
4933 // people don't care.
4935 // Emit a library call.
4936 TargetLowering::ArgListTy Args;
4937 TargetLowering::ArgListEntry Entry;
4938 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
4939 Entry.Node = Dst; Args.push_back(Entry);
4940 Entry.Node = Src; Args.push_back(Entry);
4941 Entry.Node = Size; Args.push_back(Entry);
4942 // FIXME: pass in SDLoc
4943 TargetLowering::CallLoweringInfo CLI(*this);
4946 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
4947 Dst.getValueType().getTypeForEVT(*getContext()),
4948 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
4949 TLI->getPointerTy(getDataLayout())),
4952 .setTailCall(isTailCall);
4954 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
4955 return CallResult.second;
4958 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
4959 SDValue Src, SDValue Size, unsigned Align,
4960 bool isVol, bool isTailCall,
4961 MachinePointerInfo DstPtrInfo,
4962 MachinePointerInfo SrcPtrInfo) {
4963 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
4965 // Check to see if we should lower the memmove to loads and stores first.
4966 // For cases within the target-specified limits, this is the best choice.
4967 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4969 // Memmove with size zero? Just return the original chain.
4970 if (ConstantSize->isNullValue())
4974 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
4975 ConstantSize->getZExtValue(), Align, isVol,
4976 false, DstPtrInfo, SrcPtrInfo);
4977 if (Result.getNode())
4981 // Then check to see if we should lower the memmove with target-specific
4982 // code. If the target chooses to do this, this is the next best.
4984 SDValue Result = TSI->EmitTargetCodeForMemmove(
4985 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
4986 if (Result.getNode())
4990 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
4991 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
4993 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
4994 // not be safe. See memcpy above for more details.
4996 // Emit a library call.
4997 TargetLowering::ArgListTy Args;
4998 TargetLowering::ArgListEntry Entry;
4999 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5000 Entry.Node = Dst; Args.push_back(Entry);
5001 Entry.Node = Src; Args.push_back(Entry);
5002 Entry.Node = Size; Args.push_back(Entry);
5003 // FIXME: pass in SDLoc
5004 TargetLowering::CallLoweringInfo CLI(*this);
5007 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
5008 Dst.getValueType().getTypeForEVT(*getContext()),
5009 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
5010 TLI->getPointerTy(getDataLayout())),
5013 .setTailCall(isTailCall);
5015 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5016 return CallResult.second;
5019 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
5020 SDValue Src, SDValue Size, unsigned Align,
5021 bool isVol, bool isTailCall,
5022 MachinePointerInfo DstPtrInfo) {
5023 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5025 // Check to see if we should lower the memset to stores first.
5026 // For cases within the target-specified limits, this is the best choice.
5027 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5029 // Memset with size zero? Just return the original chain.
5030 if (ConstantSize->isNullValue())
5034 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
5035 Align, isVol, DstPtrInfo);
5037 if (Result.getNode())
5041 // Then check to see if we should lower the memset with target-specific
5042 // code. If the target chooses to do this, this is the next best.
5044 SDValue Result = TSI->EmitTargetCodeForMemset(
5045 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
5046 if (Result.getNode())
5050 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5052 // Emit a library call.
5053 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
5054 TargetLowering::ArgListTy Args;
5055 TargetLowering::ArgListEntry Entry;
5056 Entry.Node = Dst; Entry.Ty = IntPtrTy;
5057 Args.push_back(Entry);
5059 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
5060 Args.push_back(Entry);
5062 Entry.Ty = IntPtrTy;
5063 Args.push_back(Entry);
5065 // FIXME: pass in SDLoc
5066 TargetLowering::CallLoweringInfo CLI(*this);
5069 .setCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
5070 Dst.getValueType().getTypeForEVT(*getContext()),
5071 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
5072 TLI->getPointerTy(getDataLayout())),
5075 .setTailCall(isTailCall);
5077 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5078 return CallResult.second;
5081 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5082 SDVTList VTList, ArrayRef<SDValue> Ops,
5083 MachineMemOperand *MMO) {
5084 FoldingSetNodeID ID;
5085 ID.AddInteger(MemVT.getRawBits());
5086 AddNodeIDNode(ID, Opcode, VTList, Ops);
5087 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5089 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5090 cast<AtomicSDNode>(E)->refineAlignment(MMO);
5091 return SDValue(E, 0);
5094 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5095 VTList, MemVT, MMO);
5096 createOperands(N, Ops);
5098 CSEMap.InsertNode(N, IP);
5100 return SDValue(N, 0);
5103 SDValue SelectionDAG::getAtomicCmpSwap(
5104 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
5105 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
5106 unsigned Alignment, AtomicOrdering SuccessOrdering,
5107 AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
5108 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5109 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
5110 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5112 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5113 Alignment = getEVTAlignment(MemVT);
5115 MachineFunction &MF = getMachineFunction();
5117 // FIXME: Volatile isn't really correct; we should keep track of atomic
5118 // orderings in the memoperand.
5119 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad |
5120 MachineMemOperand::MOStore;
5121 MachineMemOperand *MMO =
5122 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
5123 AAMDNodes(), nullptr, SynchScope, SuccessOrdering,
5126 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
5129 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
5130 EVT MemVT, SDVTList VTs, SDValue Chain,
5131 SDValue Ptr, SDValue Cmp, SDValue Swp,
5132 MachineMemOperand *MMO) {
5133 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5134 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
5135 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5137 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
5138 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5141 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5142 SDValue Chain, SDValue Ptr, SDValue Val,
5143 const Value *PtrVal, unsigned Alignment,
5144 AtomicOrdering Ordering,
5145 SynchronizationScope SynchScope) {
5146 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5147 Alignment = getEVTAlignment(MemVT);
5149 MachineFunction &MF = getMachineFunction();
5150 // An atomic store does not load. An atomic load does not store.
5151 // (An atomicrmw obviously both loads and stores.)
5152 // For now, atomics are considered to be volatile always, and they are
5154 // FIXME: Volatile isn't really correct; we should keep track of atomic
5155 // orderings in the memoperand.
5156 auto Flags = MachineMemOperand::MOVolatile;
5157 if (Opcode != ISD::ATOMIC_STORE)
5158 Flags |= MachineMemOperand::MOLoad;
5159 if (Opcode != ISD::ATOMIC_LOAD)
5160 Flags |= MachineMemOperand::MOStore;
5162 MachineMemOperand *MMO =
5163 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
5164 MemVT.getStoreSize(), Alignment, AAMDNodes(),
5165 nullptr, SynchScope, Ordering);
5167 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
5170 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5171 SDValue Chain, SDValue Ptr, SDValue Val,
5172 MachineMemOperand *MMO) {
5173 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
5174 Opcode == ISD::ATOMIC_LOAD_SUB ||
5175 Opcode == ISD::ATOMIC_LOAD_AND ||
5176 Opcode == ISD::ATOMIC_LOAD_OR ||
5177 Opcode == ISD::ATOMIC_LOAD_XOR ||
5178 Opcode == ISD::ATOMIC_LOAD_NAND ||
5179 Opcode == ISD::ATOMIC_LOAD_MIN ||
5180 Opcode == ISD::ATOMIC_LOAD_MAX ||
5181 Opcode == ISD::ATOMIC_LOAD_UMIN ||
5182 Opcode == ISD::ATOMIC_LOAD_UMAX ||
5183 Opcode == ISD::ATOMIC_SWAP ||
5184 Opcode == ISD::ATOMIC_STORE) &&
5185 "Invalid Atomic Op");
5187 EVT VT = Val.getValueType();
5189 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
5190 getVTList(VT, MVT::Other);
5191 SDValue Ops[] = {Chain, Ptr, Val};
5192 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5195 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5196 EVT VT, SDValue Chain, SDValue Ptr,
5197 MachineMemOperand *MMO) {
5198 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
5200 SDVTList VTs = getVTList(VT, MVT::Other);
5201 SDValue Ops[] = {Chain, Ptr};
5202 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5205 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
5206 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
5207 if (Ops.size() == 1)
5210 SmallVector<EVT, 4> VTs;
5211 VTs.reserve(Ops.size());
5212 for (unsigned i = 0; i < Ops.size(); ++i)
5213 VTs.push_back(Ops[i].getValueType());
5214 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
5217 SDValue SelectionDAG::getMemIntrinsicNode(
5218 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
5219 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, bool Vol,
5220 bool ReadMem, bool WriteMem, unsigned Size) {
5221 if (Align == 0) // Ensure that codegen never sees alignment 0
5222 Align = getEVTAlignment(MemVT);
5224 MachineFunction &MF = getMachineFunction();
5225 auto Flags = MachineMemOperand::MONone;
5227 Flags |= MachineMemOperand::MOStore;
5229 Flags |= MachineMemOperand::MOLoad;
5231 Flags |= MachineMemOperand::MOVolatile;
5233 Size = MemVT.getStoreSize();
5234 MachineMemOperand *MMO =
5235 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
5237 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
5240 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
5242 ArrayRef<SDValue> Ops, EVT MemVT,
5243 MachineMemOperand *MMO) {
5244 assert((Opcode == ISD::INTRINSIC_VOID ||
5245 Opcode == ISD::INTRINSIC_W_CHAIN ||
5246 Opcode == ISD::PREFETCH ||
5247 Opcode == ISD::LIFETIME_START ||
5248 Opcode == ISD::LIFETIME_END ||
5249 (Opcode <= INT_MAX &&
5250 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
5251 "Opcode is not a memory-accessing opcode!");
5253 // Memoize the node unless it returns a flag.
5254 MemIntrinsicSDNode *N;
5255 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5256 FoldingSetNodeID ID;
5257 AddNodeIDNode(ID, Opcode, VTList, Ops);
5258 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5260 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5261 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
5262 return SDValue(E, 0);
5265 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5266 VTList, MemVT, MMO);
5267 createOperands(N, Ops);
5269 CSEMap.InsertNode(N, IP);
5271 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5272 VTList, MemVT, MMO);
5273 createOperands(N, Ops);
5276 return SDValue(N, 0);
5279 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5280 /// MachinePointerInfo record from it. This is particularly useful because the
5281 /// code generator has many cases where it doesn't bother passing in a
5282 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5283 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr,
5284 int64_t Offset = 0) {
5285 // If this is FI+Offset, we can model it.
5286 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
5287 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
5288 FI->getIndex(), Offset);
5290 // If this is (FI+Offset1)+Offset2, we can model it.
5291 if (Ptr.getOpcode() != ISD::ADD ||
5292 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
5293 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
5294 return MachinePointerInfo();
5296 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5297 return MachinePointerInfo::getFixedStack(
5298 DAG.getMachineFunction(), FI,
5299 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
5302 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5303 /// MachinePointerInfo record from it. This is particularly useful because the
5304 /// code generator has many cases where it doesn't bother passing in a
5305 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5306 static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr,
5308 // If the 'Offset' value isn't a constant, we can't handle this.
5309 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
5310 return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue());
5311 if (OffsetOp.isUndef())
5312 return InferPointerInfo(DAG, Ptr);
5313 return MachinePointerInfo();
5316 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5317 EVT VT, const SDLoc &dl, SDValue Chain,
5318 SDValue Ptr, SDValue Offset,
5319 MachinePointerInfo PtrInfo, EVT MemVT,
5321 MachineMemOperand::Flags MMOFlags,
5322 const AAMDNodes &AAInfo, const MDNode *Ranges) {
5323 assert(Chain.getValueType() == MVT::Other &&
5324 "Invalid chain type");
5325 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5326 Alignment = getEVTAlignment(MemVT);
5328 MMOFlags |= MachineMemOperand::MOLoad;
5329 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
5330 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
5332 if (PtrInfo.V.isNull())
5333 PtrInfo = InferPointerInfo(*this, Ptr, Offset);
5335 MachineFunction &MF = getMachineFunction();
5336 MachineMemOperand *MMO = MF.getMachineMemOperand(
5337 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
5338 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
5341 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5342 EVT VT, const SDLoc &dl, SDValue Chain,
5343 SDValue Ptr, SDValue Offset, EVT MemVT,
5344 MachineMemOperand *MMO) {
5346 ExtType = ISD::NON_EXTLOAD;
5347 } else if (ExtType == ISD::NON_EXTLOAD) {
5348 assert(VT == MemVT && "Non-extending load from different memory type!");
5351 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
5352 "Should only be an extending load, not truncating!");
5353 assert(VT.isInteger() == MemVT.isInteger() &&
5354 "Cannot convert from FP to Int or Int -> FP!");
5355 assert(VT.isVector() == MemVT.isVector() &&
5356 "Cannot use an ext load to convert to or from a vector!");
5357 assert((!VT.isVector() ||
5358 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
5359 "Cannot use an ext load to change the number of vector elements!");
5362 bool Indexed = AM != ISD::UNINDEXED;
5363 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
5365 SDVTList VTs = Indexed ?
5366 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
5367 SDValue Ops[] = { Chain, Ptr, Offset };
5368 FoldingSetNodeID ID;
5369 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
5370 ID.AddInteger(MemVT.getRawBits());
5371 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
5372 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
5373 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5375 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5376 cast<LoadSDNode>(E)->refineAlignment(MMO);
5377 return SDValue(E, 0);
5379 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5380 ExtType, MemVT, MMO);
5381 createOperands(N, Ops);
5383 CSEMap.InsertNode(N, IP);
5385 return SDValue(N, 0);
5388 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5389 SDValue Ptr, MachinePointerInfo PtrInfo,
5391 MachineMemOperand::Flags MMOFlags,
5392 const AAMDNodes &AAInfo, const MDNode *Ranges) {
5393 SDValue Undef = getUNDEF(Ptr.getValueType());
5394 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5395 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
5398 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5399 SDValue Ptr, MachineMemOperand *MMO) {
5400 SDValue Undef = getUNDEF(Ptr.getValueType());
5401 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5405 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
5406 EVT VT, SDValue Chain, SDValue Ptr,
5407 MachinePointerInfo PtrInfo, EVT MemVT,
5409 MachineMemOperand::Flags MMOFlags,
5410 const AAMDNodes &AAInfo) {
5411 SDValue Undef = getUNDEF(Ptr.getValueType());
5412 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
5413 MemVT, Alignment, MMOFlags, AAInfo);
5416 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
5417 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
5418 MachineMemOperand *MMO) {
5419 SDValue Undef = getUNDEF(Ptr.getValueType());
5420 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
5424 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
5425 SDValue Base, SDValue Offset,
5426 ISD::MemIndexedMode AM) {
5427 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
5428 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
5429 // Don't propagate the invariant or dereferenceable flags.
5431 LD->getMemOperand()->getFlags() &
5432 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
5433 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
5434 LD->getChain(), Base, Offset, LD->getPointerInfo(),
5435 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
5439 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5440 SDValue Ptr, MachinePointerInfo PtrInfo,
5442 MachineMemOperand::Flags MMOFlags,
5443 const AAMDNodes &AAInfo) {
5444 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
5445 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5446 Alignment = getEVTAlignment(Val.getValueType());
5448 MMOFlags |= MachineMemOperand::MOStore;
5449 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
5451 if (PtrInfo.V.isNull())
5452 PtrInfo = InferPointerInfo(*this, Ptr);
5454 MachineFunction &MF = getMachineFunction();
5455 MachineMemOperand *MMO = MF.getMachineMemOperand(
5456 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
5457 return getStore(Chain, dl, Val, Ptr, MMO);
5460 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5461 SDValue Ptr, MachineMemOperand *MMO) {
5462 assert(Chain.getValueType() == MVT::Other &&
5463 "Invalid chain type");
5464 EVT VT = Val.getValueType();
5465 SDVTList VTs = getVTList(MVT::Other);
5466 SDValue Undef = getUNDEF(Ptr.getValueType());
5467 SDValue Ops[] = { Chain, Val, Ptr, Undef };
5468 FoldingSetNodeID ID;
5469 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5470 ID.AddInteger(VT.getRawBits());
5471 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
5472 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
5473 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5475 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5476 cast<StoreSDNode>(E)->refineAlignment(MMO);
5477 return SDValue(E, 0);
5479 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5480 ISD::UNINDEXED, false, VT, MMO);
5481 createOperands(N, Ops);
5483 CSEMap.InsertNode(N, IP);
5485 return SDValue(N, 0);
5488 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5489 SDValue Ptr, MachinePointerInfo PtrInfo,
5490 EVT SVT, unsigned Alignment,
5491 MachineMemOperand::Flags MMOFlags,
5492 const AAMDNodes &AAInfo) {
5493 assert(Chain.getValueType() == MVT::Other &&
5494 "Invalid chain type");
5495 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5496 Alignment = getEVTAlignment(SVT);
5498 MMOFlags |= MachineMemOperand::MOStore;
5499 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
5501 if (PtrInfo.V.isNull())
5502 PtrInfo = InferPointerInfo(*this, Ptr);
5504 MachineFunction &MF = getMachineFunction();
5505 MachineMemOperand *MMO = MF.getMachineMemOperand(
5506 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
5507 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
5510 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
5511 SDValue Ptr, EVT SVT,
5512 MachineMemOperand *MMO) {
5513 EVT VT = Val.getValueType();
5515 assert(Chain.getValueType() == MVT::Other &&
5516 "Invalid chain type");
5518 return getStore(Chain, dl, Val, Ptr, MMO);
5520 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
5521 "Should only be a truncating store, not extending!");
5522 assert(VT.isInteger() == SVT.isInteger() &&
5523 "Can't do FP-INT conversion!");
5524 assert(VT.isVector() == SVT.isVector() &&
5525 "Cannot use trunc store to convert to or from a vector!");
5526 assert((!VT.isVector() ||
5527 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
5528 "Cannot use trunc store to change the number of vector elements!");
5530 SDVTList VTs = getVTList(MVT::Other);
5531 SDValue Undef = getUNDEF(Ptr.getValueType());
5532 SDValue Ops[] = { Chain, Val, Ptr, Undef };
5533 FoldingSetNodeID ID;
5534 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5535 ID.AddInteger(SVT.getRawBits());
5536 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
5537 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
5538 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5540 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5541 cast<StoreSDNode>(E)->refineAlignment(MMO);
5542 return SDValue(E, 0);
5544 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5545 ISD::UNINDEXED, true, SVT, MMO);
5546 createOperands(N, Ops);
5548 CSEMap.InsertNode(N, IP);
5550 return SDValue(N, 0);
5553 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
5554 SDValue Base, SDValue Offset,
5555 ISD::MemIndexedMode AM) {
5556 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
5557 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
5558 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
5559 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
5560 FoldingSetNodeID ID;
5561 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
5562 ID.AddInteger(ST->getMemoryVT().getRawBits());
5563 ID.AddInteger(ST->getRawSubclassData());
5564 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
5566 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
5567 return SDValue(E, 0);
5569 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5570 ST->isTruncatingStore(), ST->getMemoryVT(),
5571 ST->getMemOperand());
5572 createOperands(N, Ops);
5574 CSEMap.InsertNode(N, IP);
5576 return SDValue(N, 0);
5579 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5580 SDValue Ptr, SDValue Mask, SDValue Src0,
5581 EVT MemVT, MachineMemOperand *MMO,
5582 ISD::LoadExtType ExtTy, bool isExpanding) {
5584 SDVTList VTs = getVTList(VT, MVT::Other);
5585 SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
5586 FoldingSetNodeID ID;
5587 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
5588 ID.AddInteger(VT.getRawBits());
5589 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
5590 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
5591 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5593 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5594 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
5595 return SDValue(E, 0);
5597 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5598 ExtTy, isExpanding, MemVT, MMO);
5599 createOperands(N, Ops);
5601 CSEMap.InsertNode(N, IP);
5603 return SDValue(N, 0);
5606 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
5607 SDValue Val, SDValue Ptr, SDValue Mask,
5608 EVT MemVT, MachineMemOperand *MMO,
5609 bool IsTruncating, bool IsCompressing) {
5610 assert(Chain.getValueType() == MVT::Other &&
5611 "Invalid chain type");
5612 EVT VT = Val.getValueType();
5613 SDVTList VTs = getVTList(MVT::Other);
5614 SDValue Ops[] = { Chain, Ptr, Mask, Val };
5615 FoldingSetNodeID ID;
5616 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
5617 ID.AddInteger(VT.getRawBits());
5618 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
5619 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
5620 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5622 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5623 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
5624 return SDValue(E, 0);
5626 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
5627 IsTruncating, IsCompressing, MemVT, MMO);
5628 createOperands(N, Ops);
5630 CSEMap.InsertNode(N, IP);
5632 return SDValue(N, 0);
5635 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
5636 ArrayRef<SDValue> Ops,
5637 MachineMemOperand *MMO) {
5638 assert(Ops.size() == 5 && "Incompatible number of operands");
5640 FoldingSetNodeID ID;
5641 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
5642 ID.AddInteger(VT.getRawBits());
5643 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
5644 dl.getIROrder(), VTs, VT, MMO));
5645 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5647 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5648 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
5649 return SDValue(E, 0);
5652 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
5654 createOperands(N, Ops);
5656 assert(N->getValue().getValueType() == N->getValueType(0) &&
5657 "Incompatible type of the PassThru value in MaskedGatherSDNode");
5658 assert(N->getMask().getValueType().getVectorNumElements() ==
5659 N->getValueType(0).getVectorNumElements() &&
5660 "Vector width mismatch between mask and data");
5661 assert(N->getIndex().getValueType().getVectorNumElements() ==
5662 N->getValueType(0).getVectorNumElements() &&
5663 "Vector width mismatch between index and data");
5665 CSEMap.InsertNode(N, IP);
5667 return SDValue(N, 0);
5670 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
5671 ArrayRef<SDValue> Ops,
5672 MachineMemOperand *MMO) {
5673 assert(Ops.size() == 5 && "Incompatible number of operands");
5675 FoldingSetNodeID ID;
5676 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
5677 ID.AddInteger(VT.getRawBits());
5678 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
5679 dl.getIROrder(), VTs, VT, MMO));
5680 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5682 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5683 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
5684 return SDValue(E, 0);
5686 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
5688 createOperands(N, Ops);
5690 assert(N->getMask().getValueType().getVectorNumElements() ==
5691 N->getValue().getValueType().getVectorNumElements() &&
5692 "Vector width mismatch between mask and data");
5693 assert(N->getIndex().getValueType().getVectorNumElements() ==
5694 N->getValue().getValueType().getVectorNumElements() &&
5695 "Vector width mismatch between index and data");
5697 CSEMap.InsertNode(N, IP);
5699 return SDValue(N, 0);
5702 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
5703 SDValue Ptr, SDValue SV, unsigned Align) {
5704 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
5705 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
5708 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5709 ArrayRef<SDUse> Ops) {
5710 switch (Ops.size()) {
5711 case 0: return getNode(Opcode, DL, VT);
5712 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
5713 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
5714 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5718 // Copy from an SDUse array into an SDValue array for use with
5719 // the regular getNode logic.
5720 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
5721 return getNode(Opcode, DL, VT, NewOps);
5724 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5725 ArrayRef<SDValue> Ops, const SDNodeFlags *Flags) {
5726 unsigned NumOps = Ops.size();
5728 case 0: return getNode(Opcode, DL, VT);
5729 case 1: return getNode(Opcode, DL, VT, Ops[0]);
5730 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
5731 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
5737 case ISD::CONCAT_VECTORS: {
5738 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
5739 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
5743 case ISD::SELECT_CC: {
5744 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
5745 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
5746 "LHS and RHS of condition must have same type!");
5747 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5748 "True and False arms of SelectCC must have same type!");
5749 assert(Ops[2].getValueType() == VT &&
5750 "select_cc node must be of same type as true and false value!");
5754 assert(NumOps == 5 && "BR_CC takes 5 operands!");
5755 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
5756 "LHS/RHS of comparison should match types!");
5763 SDVTList VTs = getVTList(VT);
5765 if (VT != MVT::Glue) {
5766 FoldingSetNodeID ID;
5767 AddNodeIDNode(ID, Opcode, VTs, Ops);
5770 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5771 return SDValue(E, 0);
5773 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5774 createOperands(N, Ops);
5776 CSEMap.InsertNode(N, IP);
5778 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5779 createOperands(N, Ops);
5783 return SDValue(N, 0);
5786 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
5787 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
5788 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
5791 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5792 ArrayRef<SDValue> Ops) {
5793 if (VTList.NumVTs == 1)
5794 return getNode(Opcode, DL, VTList.VTs[0], Ops);
5798 // FIXME: figure out how to safely handle things like
5799 // int foo(int x) { return 1 << (x & 255); }
5800 // int bar() { return foo(256); }
5801 case ISD::SRA_PARTS:
5802 case ISD::SRL_PARTS:
5803 case ISD::SHL_PARTS:
5804 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
5805 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
5806 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5807 else if (N3.getOpcode() == ISD::AND)
5808 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
5809 // If the and is only masking out bits that cannot effect the shift,
5810 // eliminate the and.
5811 unsigned NumBits = VT.getScalarSizeInBits()*2;
5812 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
5813 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
5819 // Memoize the node unless it returns a flag.
5821 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5822 FoldingSetNodeID ID;
5823 AddNodeIDNode(ID, Opcode, VTList, Ops);
5825 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5826 return SDValue(E, 0);
5828 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
5829 createOperands(N, Ops);
5830 CSEMap.InsertNode(N, IP);
5832 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
5833 createOperands(N, Ops);
5836 return SDValue(N, 0);
5839 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
5841 return getNode(Opcode, DL, VTList, None);
5844 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5846 SDValue Ops[] = { N1 };
5847 return getNode(Opcode, DL, VTList, Ops);
5850 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5851 SDValue N1, SDValue N2) {
5852 SDValue Ops[] = { N1, N2 };
5853 return getNode(Opcode, DL, VTList, Ops);
5856 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5857 SDValue N1, SDValue N2, SDValue N3) {
5858 SDValue Ops[] = { N1, N2, N3 };
5859 return getNode(Opcode, DL, VTList, Ops);
5862 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5863 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5864 SDValue Ops[] = { N1, N2, N3, N4 };
5865 return getNode(Opcode, DL, VTList, Ops);
5868 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
5869 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5871 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5872 return getNode(Opcode, DL, VTList, Ops);
5875 SDVTList SelectionDAG::getVTList(EVT VT) {
5876 return makeVTList(SDNode::getValueTypeList(VT), 1);
5879 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
5880 FoldingSetNodeID ID;
5882 ID.AddInteger(VT1.getRawBits());
5883 ID.AddInteger(VT2.getRawBits());
5886 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5888 EVT *Array = Allocator.Allocate<EVT>(2);
5891 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
5892 VTListMap.InsertNode(Result, IP);
5894 return Result->getSDVTList();
5897 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
5898 FoldingSetNodeID ID;
5900 ID.AddInteger(VT1.getRawBits());
5901 ID.AddInteger(VT2.getRawBits());
5902 ID.AddInteger(VT3.getRawBits());
5905 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5907 EVT *Array = Allocator.Allocate<EVT>(3);
5911 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
5912 VTListMap.InsertNode(Result, IP);
5914 return Result->getSDVTList();
5917 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
5918 FoldingSetNodeID ID;
5920 ID.AddInteger(VT1.getRawBits());
5921 ID.AddInteger(VT2.getRawBits());
5922 ID.AddInteger(VT3.getRawBits());
5923 ID.AddInteger(VT4.getRawBits());
5926 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5928 EVT *Array = Allocator.Allocate<EVT>(4);
5933 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
5934 VTListMap.InsertNode(Result, IP);
5936 return Result->getSDVTList();
5939 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
5940 unsigned NumVTs = VTs.size();
5941 FoldingSetNodeID ID;
5942 ID.AddInteger(NumVTs);
5943 for (unsigned index = 0; index < NumVTs; index++) {
5944 ID.AddInteger(VTs[index].getRawBits());
5948 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
5950 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
5951 std::copy(VTs.begin(), VTs.end(), Array);
5952 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
5953 VTListMap.InsertNode(Result, IP);
5955 return Result->getSDVTList();
5959 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
5960 /// specified operands. If the resultant node already exists in the DAG,
5961 /// this does not modify the specified node, instead it returns the node that
5962 /// already exists. If the resultant node does not exist in the DAG, the
5963 /// input node is returned. As a degenerate case, if you specify the same
5964 /// input operands as the node already has, the input node is returned.
5965 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
5966 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
5968 // Check to see if there is no change.
5969 if (Op == N->getOperand(0)) return N;
5971 // See if the modified node already exists.
5972 void *InsertPos = nullptr;
5973 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
5976 // Nope it doesn't. Remove the node from its current place in the maps.
5978 if (!RemoveNodeFromCSEMaps(N))
5979 InsertPos = nullptr;
5981 // Now we update the operands.
5982 N->OperandList[0].set(Op);
5984 // If this gets put into a CSE map, add it.
5985 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5989 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
5990 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
5992 // Check to see if there is no change.
5993 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
5994 return N; // No operands changed, just return the input node.
5996 // See if the modified node already exists.
5997 void *InsertPos = nullptr;
5998 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
6001 // Nope it doesn't. Remove the node from its current place in the maps.
6003 if (!RemoveNodeFromCSEMaps(N))
6004 InsertPos = nullptr;
6006 // Now we update the operands.
6007 if (N->OperandList[0] != Op1)
6008 N->OperandList[0].set(Op1);
6009 if (N->OperandList[1] != Op2)
6010 N->OperandList[1].set(Op2);
6012 // If this gets put into a CSE map, add it.
6013 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6017 SDNode *SelectionDAG::
6018 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
6019 SDValue Ops[] = { Op1, Op2, Op3 };
6020 return UpdateNodeOperands(N, Ops);
6023 SDNode *SelectionDAG::
6024 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
6025 SDValue Op3, SDValue Op4) {
6026 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
6027 return UpdateNodeOperands(N, Ops);
6030 SDNode *SelectionDAG::
6031 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
6032 SDValue Op3, SDValue Op4, SDValue Op5) {
6033 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
6034 return UpdateNodeOperands(N, Ops);
6037 SDNode *SelectionDAG::
6038 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
6039 unsigned NumOps = Ops.size();
6040 assert(N->getNumOperands() == NumOps &&
6041 "Update with wrong number of operands");
6043 // If no operands changed just return the input node.
6044 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
6047 // See if the modified node already exists.
6048 void *InsertPos = nullptr;
6049 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
6052 // Nope it doesn't. Remove the node from its current place in the maps.
6054 if (!RemoveNodeFromCSEMaps(N))
6055 InsertPos = nullptr;
6057 // Now we update the operands.
6058 for (unsigned i = 0; i != NumOps; ++i)
6059 if (N->OperandList[i] != Ops[i])
6060 N->OperandList[i].set(Ops[i]);
6062 // If this gets put into a CSE map, add it.
6063 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6067 /// DropOperands - Release the operands and set this node to have
6069 void SDNode::DropOperands() {
6070 // Unlike the code in MorphNodeTo that does this, we don't need to
6071 // watch for dead nodes here.
6072 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
6078 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
6081 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6083 SDVTList VTs = getVTList(VT);
6084 return SelectNodeTo(N, MachineOpc, VTs, None);
6087 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6088 EVT VT, SDValue Op1) {
6089 SDVTList VTs = getVTList(VT);
6090 SDValue Ops[] = { Op1 };
6091 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6094 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6095 EVT VT, SDValue Op1,
6097 SDVTList VTs = getVTList(VT);
6098 SDValue Ops[] = { Op1, Op2 };
6099 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6102 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6103 EVT VT, SDValue Op1,
6104 SDValue Op2, SDValue Op3) {
6105 SDVTList VTs = getVTList(VT);
6106 SDValue Ops[] = { Op1, Op2, Op3 };
6107 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6110 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6111 EVT VT, ArrayRef<SDValue> Ops) {
6112 SDVTList VTs = getVTList(VT);
6113 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6116 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6117 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
6118 SDVTList VTs = getVTList(VT1, VT2);
6119 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6122 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6124 SDVTList VTs = getVTList(VT1, VT2);
6125 return SelectNodeTo(N, MachineOpc, VTs, None);
6128 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6129 EVT VT1, EVT VT2, EVT VT3,
6130 ArrayRef<SDValue> Ops) {
6131 SDVTList VTs = getVTList(VT1, VT2, VT3);
6132 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6135 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6137 SDValue Op1, SDValue Op2) {
6138 SDVTList VTs = getVTList(VT1, VT2);
6139 SDValue Ops[] = { Op1, Op2 };
6140 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6143 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6144 SDVTList VTs,ArrayRef<SDValue> Ops) {
6145 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
6146 // Reset the NodeID to -1.
6149 ReplaceAllUsesWith(N, New);
6155 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
6156 /// the line number information on the merged node since it is not possible to
6157 /// preserve the information that operation is associated with multiple lines.
6158 /// This will make the debugger working better at -O0, were there is a higher
6159 /// probability having other instructions associated with that line.
6161 /// For IROrder, we keep the smaller of the two
6162 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
6163 DebugLoc NLoc = N->getDebugLoc();
6164 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
6165 N->setDebugLoc(DebugLoc());
6167 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
6168 N->setIROrder(Order);
6172 /// MorphNodeTo - This *mutates* the specified node to have the specified
6173 /// return type, opcode, and operands.
6175 /// Note that MorphNodeTo returns the resultant node. If there is already a
6176 /// node of the specified opcode and operands, it returns that node instead of
6177 /// the current one. Note that the SDLoc need not be the same.
6179 /// Using MorphNodeTo is faster than creating a new node and swapping it in
6180 /// with ReplaceAllUsesWith both because it often avoids allocating a new
6181 /// node, and because it doesn't require CSE recalculation for any of
6182 /// the node's users.
6184 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
6185 /// As a consequence it isn't appropriate to use from within the DAG combiner or
6186 /// the legalizer which maintain worklists that would need to be updated when
6187 /// deleting things.
6188 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
6189 SDVTList VTs, ArrayRef<SDValue> Ops) {
6190 // If an identical node already exists, use it.
6192 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
6193 FoldingSetNodeID ID;
6194 AddNodeIDNode(ID, Opc, VTs, Ops);
6195 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
6196 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
6199 if (!RemoveNodeFromCSEMaps(N))
6202 // Start the morphing.
6204 N->ValueList = VTs.VTs;
6205 N->NumValues = VTs.NumVTs;
6207 // Clear the operands list, updating used nodes to remove this from their
6208 // use list. Keep track of any operands that become dead as a result.
6209 SmallPtrSet<SDNode*, 16> DeadNodeSet;
6210 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
6212 SDNode *Used = Use.getNode();
6214 if (Used->use_empty())
6215 DeadNodeSet.insert(Used);
6218 // For MachineNode, initialize the memory references information.
6219 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
6220 MN->setMemRefs(nullptr, nullptr);
6222 // Swap for an appropriately sized array from the recycler.
6224 createOperands(N, Ops);
6226 // Delete any nodes that are still dead after adding the uses for the
6228 if (!DeadNodeSet.empty()) {
6229 SmallVector<SDNode *, 16> DeadNodes;
6230 for (SDNode *N : DeadNodeSet)
6232 DeadNodes.push_back(N);
6233 RemoveDeadNodes(DeadNodes);
6237 CSEMap.InsertNode(N, IP); // Memoize the new node.
6242 /// getMachineNode - These are used for target selectors to create a new node
6243 /// with specified return type(s), MachineInstr opcode, and operands.
6245 /// Note that getMachineNode returns the resultant node. If there is already a
6246 /// node of the specified opcode and operands, it returns that node instead of
6247 /// the current one.
6248 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6250 SDVTList VTs = getVTList(VT);
6251 return getMachineNode(Opcode, dl, VTs, None);
6254 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6255 EVT VT, SDValue Op1) {
6256 SDVTList VTs = getVTList(VT);
6257 SDValue Ops[] = { Op1 };
6258 return getMachineNode(Opcode, dl, VTs, Ops);
6261 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6262 EVT VT, SDValue Op1, SDValue Op2) {
6263 SDVTList VTs = getVTList(VT);
6264 SDValue Ops[] = { Op1, Op2 };
6265 return getMachineNode(Opcode, dl, VTs, Ops);
6268 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6269 EVT VT, SDValue Op1, SDValue Op2,
6271 SDVTList VTs = getVTList(VT);
6272 SDValue Ops[] = { Op1, Op2, Op3 };
6273 return getMachineNode(Opcode, dl, VTs, Ops);
6276 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6277 EVT VT, ArrayRef<SDValue> Ops) {
6278 SDVTList VTs = getVTList(VT);
6279 return getMachineNode(Opcode, dl, VTs, Ops);
6282 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6283 EVT VT1, EVT VT2, SDValue Op1,
6285 SDVTList VTs = getVTList(VT1, VT2);
6286 SDValue Ops[] = { Op1, Op2 };
6287 return getMachineNode(Opcode, dl, VTs, Ops);
6290 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6291 EVT VT1, EVT VT2, SDValue Op1,
6292 SDValue Op2, SDValue Op3) {
6293 SDVTList VTs = getVTList(VT1, VT2);
6294 SDValue Ops[] = { Op1, Op2, Op3 };
6295 return getMachineNode(Opcode, dl, VTs, Ops);
6298 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6300 ArrayRef<SDValue> Ops) {
6301 SDVTList VTs = getVTList(VT1, VT2);
6302 return getMachineNode(Opcode, dl, VTs, Ops);
6305 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6306 EVT VT1, EVT VT2, EVT VT3,
6307 SDValue Op1, SDValue Op2) {
6308 SDVTList VTs = getVTList(VT1, VT2, VT3);
6309 SDValue Ops[] = { Op1, Op2 };
6310 return getMachineNode(Opcode, dl, VTs, Ops);
6313 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6314 EVT VT1, EVT VT2, EVT VT3,
6315 SDValue Op1, SDValue Op2,
6317 SDVTList VTs = getVTList(VT1, VT2, VT3);
6318 SDValue Ops[] = { Op1, Op2, Op3 };
6319 return getMachineNode(Opcode, dl, VTs, Ops);
6322 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6323 EVT VT1, EVT VT2, EVT VT3,
6324 ArrayRef<SDValue> Ops) {
6325 SDVTList VTs = getVTList(VT1, VT2, VT3);
6326 return getMachineNode(Opcode, dl, VTs, Ops);
6329 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6330 ArrayRef<EVT> ResultTys,
6331 ArrayRef<SDValue> Ops) {
6332 SDVTList VTs = getVTList(ResultTys);
6333 return getMachineNode(Opcode, dl, VTs, Ops);
6336 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
6338 ArrayRef<SDValue> Ops) {
6339 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
6344 FoldingSetNodeID ID;
6345 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
6347 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6348 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
6352 // Allocate a new MachineSDNode.
6353 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6354 createOperands(N, Ops);
6357 CSEMap.InsertNode(N, IP);
6363 /// getTargetExtractSubreg - A convenience function for creating
6364 /// TargetOpcode::EXTRACT_SUBREG nodes.
6365 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
6367 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6368 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
6369 VT, Operand, SRIdxVal);
6370 return SDValue(Subreg, 0);
6373 /// getTargetInsertSubreg - A convenience function for creating
6374 /// TargetOpcode::INSERT_SUBREG nodes.
6375 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
6376 SDValue Operand, SDValue Subreg) {
6377 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
6378 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
6379 VT, Operand, Subreg, SRIdxVal);
6380 return SDValue(Result, 0);
6383 /// getNodeIfExists - Get the specified node if it's already available, or
6384 /// else return NULL.
6385 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
6386 ArrayRef<SDValue> Ops,
6387 const SDNodeFlags *Flags) {
6388 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
6389 FoldingSetNodeID ID;
6390 AddNodeIDNode(ID, Opcode, VTList, Ops);
6392 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
6394 E->intersectFlagsWith(Flags);
6401 /// getDbgValue - Creates a SDDbgValue node.
6404 SDDbgValue *SelectionDAG::getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N,
6405 unsigned R, bool IsIndirect, uint64_t Off,
6406 const DebugLoc &DL, unsigned O) {
6407 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6408 "Expected inlined-at fields to agree");
6409 return new (DbgInfo->getAlloc())
6410 SDDbgValue(Var, Expr, N, R, IsIndirect, Off, DL, O);
6414 SDDbgValue *SelectionDAG::getConstantDbgValue(MDNode *Var, MDNode *Expr,
6415 const Value *C, uint64_t Off,
6416 const DebugLoc &DL, unsigned O) {
6417 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6418 "Expected inlined-at fields to agree");
6419 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, Off, DL, O);
6423 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(MDNode *Var, MDNode *Expr,
6424 unsigned FI, uint64_t Off,
6427 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
6428 "Expected inlined-at fields to agree");
6429 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, Off, DL, O);
6434 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
6435 /// pointed to by a use iterator is deleted, increment the use iterator
6436 /// so that it doesn't dangle.
6438 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
6439 SDNode::use_iterator &UI;
6440 SDNode::use_iterator &UE;
6442 void NodeDeleted(SDNode *N, SDNode *E) override {
6443 // Increment the iterator as needed.
6444 while (UI != UE && N == *UI)
6449 RAUWUpdateListener(SelectionDAG &d,
6450 SDNode::use_iterator &ui,
6451 SDNode::use_iterator &ue)
6452 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
6457 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6458 /// This can cause recursive merging of nodes in the DAG.
6460 /// This version assumes From has a single result value.
6462 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
6463 SDNode *From = FromN.getNode();
6464 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
6465 "Cannot replace with this method!");
6466 assert(From != To.getNode() && "Cannot replace uses of with self");
6468 // Preserve Debug Values
6469 TransferDbgValues(FromN, To);
6471 // Iterate over all the existing uses of From. New uses will be added
6472 // to the beginning of the use list, which we avoid visiting.
6473 // This specifically avoids visiting uses of From that arise while the
6474 // replacement is happening, because any such uses would be the result
6475 // of CSE: If an existing node looks like From after one of its operands
6476 // is replaced by To, we don't want to replace of all its users with To
6477 // too. See PR3018 for more info.
6478 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6479 RAUWUpdateListener Listener(*this, UI, UE);
6483 // This node is about to morph, remove its old self from the CSE maps.
6484 RemoveNodeFromCSEMaps(User);
6486 // A user can appear in a use list multiple times, and when this
6487 // happens the uses are usually next to each other in the list.
6488 // To help reduce the number of CSE recomputations, process all
6489 // the uses of this user that we can find this way.
6491 SDUse &Use = UI.getUse();
6494 } while (UI != UE && *UI == User);
6496 // Now that we have modified User, add it back to the CSE maps. If it
6497 // already exists there, recursively merge the results together.
6498 AddModifiedNodeToCSEMaps(User);
6502 // If we just RAUW'd the root, take note.
6503 if (FromN == getRoot())
6507 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6508 /// This can cause recursive merging of nodes in the DAG.
6510 /// This version assumes that for each value of From, there is a
6511 /// corresponding value in To in the same position with the same type.
6513 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
6515 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6516 assert((!From->hasAnyUseOfValue(i) ||
6517 From->getValueType(i) == To->getValueType(i)) &&
6518 "Cannot use this version of ReplaceAllUsesWith!");
6521 // Handle the trivial case.
6525 // Preserve Debug Info. Only do this if there's a use.
6526 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6527 if (From->hasAnyUseOfValue(i)) {
6528 assert((i < To->getNumValues()) && "Invalid To location");
6529 TransferDbgValues(SDValue(From, i), SDValue(To, i));
6532 // Iterate over just the existing users of From. See the comments in
6533 // the ReplaceAllUsesWith above.
6534 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6535 RAUWUpdateListener Listener(*this, UI, UE);
6539 // This node is about to morph, remove its old self from the CSE maps.
6540 RemoveNodeFromCSEMaps(User);
6542 // A user can appear in a use list multiple times, and when this
6543 // happens the uses are usually next to each other in the list.
6544 // To help reduce the number of CSE recomputations, process all
6545 // the uses of this user that we can find this way.
6547 SDUse &Use = UI.getUse();
6550 } while (UI != UE && *UI == User);
6552 // Now that we have modified User, add it back to the CSE maps. If it
6553 // already exists there, recursively merge the results together.
6554 AddModifiedNodeToCSEMaps(User);
6557 // If we just RAUW'd the root, take note.
6558 if (From == getRoot().getNode())
6559 setRoot(SDValue(To, getRoot().getResNo()));
6562 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
6563 /// This can cause recursive merging of nodes in the DAG.
6565 /// This version can replace From with any result values. To must match the
6566 /// number and types of values returned by From.
6567 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
6568 if (From->getNumValues() == 1) // Handle the simple case efficiently.
6569 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
6571 // Preserve Debug Info.
6572 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
6573 TransferDbgValues(SDValue(From, i), *To);
6575 // Iterate over just the existing users of From. See the comments in
6576 // the ReplaceAllUsesWith above.
6577 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
6578 RAUWUpdateListener Listener(*this, UI, UE);
6582 // This node is about to morph, remove its old self from the CSE maps.
6583 RemoveNodeFromCSEMaps(User);
6585 // A user can appear in a use list multiple times, and when this
6586 // happens the uses are usually next to each other in the list.
6587 // To help reduce the number of CSE recomputations, process all
6588 // the uses of this user that we can find this way.
6590 SDUse &Use = UI.getUse();
6591 const SDValue &ToOp = To[Use.getResNo()];
6594 } while (UI != UE && *UI == User);
6596 // Now that we have modified User, add it back to the CSE maps. If it
6597 // already exists there, recursively merge the results together.
6598 AddModifiedNodeToCSEMaps(User);
6601 // If we just RAUW'd the root, take note.
6602 if (From == getRoot().getNode())
6603 setRoot(SDValue(To[getRoot().getResNo()]));
6606 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
6607 /// uses of other values produced by From.getNode() alone. The Deleted
6608 /// vector is handled the same way as for ReplaceAllUsesWith.
6609 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
6610 // Handle the really simple, really trivial case efficiently.
6611 if (From == To) return;
6613 // Handle the simple, trivial, case efficiently.
6614 if (From.getNode()->getNumValues() == 1) {
6615 ReplaceAllUsesWith(From, To);
6619 // Preserve Debug Info.
6620 TransferDbgValues(From, To);
6622 // Iterate over just the existing users of From. See the comments in
6623 // the ReplaceAllUsesWith above.
6624 SDNode::use_iterator UI = From.getNode()->use_begin(),
6625 UE = From.getNode()->use_end();
6626 RAUWUpdateListener Listener(*this, UI, UE);
6629 bool UserRemovedFromCSEMaps = false;
6631 // A user can appear in a use list multiple times, and when this
6632 // happens the uses are usually next to each other in the list.
6633 // To help reduce the number of CSE recomputations, process all
6634 // the uses of this user that we can find this way.
6636 SDUse &Use = UI.getUse();
6638 // Skip uses of different values from the same node.
6639 if (Use.getResNo() != From.getResNo()) {
6644 // If this node hasn't been modified yet, it's still in the CSE maps,
6645 // so remove its old self from the CSE maps.
6646 if (!UserRemovedFromCSEMaps) {
6647 RemoveNodeFromCSEMaps(User);
6648 UserRemovedFromCSEMaps = true;
6653 } while (UI != UE && *UI == User);
6655 // We are iterating over all uses of the From node, so if a use
6656 // doesn't use the specific value, no changes are made.
6657 if (!UserRemovedFromCSEMaps)
6660 // Now that we have modified User, add it back to the CSE maps. If it
6661 // already exists there, recursively merge the results together.
6662 AddModifiedNodeToCSEMaps(User);
6665 // If we just RAUW'd the root, take note.
6666 if (From == getRoot())
6671 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
6672 /// to record information about a use.
6679 /// operator< - Sort Memos by User.
6680 bool operator<(const UseMemo &L, const UseMemo &R) {
6681 return (intptr_t)L.User < (intptr_t)R.User;
6685 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
6686 /// uses of other values produced by From.getNode() alone. The same value
6687 /// may appear in both the From and To list. The Deleted vector is
6688 /// handled the same way as for ReplaceAllUsesWith.
6689 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
6692 // Handle the simple, trivial case efficiently.
6694 return ReplaceAllUsesOfValueWith(*From, *To);
6696 TransferDbgValues(*From, *To);
6698 // Read up all the uses and make records of them. This helps
6699 // processing new uses that are introduced during the
6700 // replacement process.
6701 SmallVector<UseMemo, 4> Uses;
6702 for (unsigned i = 0; i != Num; ++i) {
6703 unsigned FromResNo = From[i].getResNo();
6704 SDNode *FromNode = From[i].getNode();
6705 for (SDNode::use_iterator UI = FromNode->use_begin(),
6706 E = FromNode->use_end(); UI != E; ++UI) {
6707 SDUse &Use = UI.getUse();
6708 if (Use.getResNo() == FromResNo) {
6709 UseMemo Memo = { *UI, i, &Use };
6710 Uses.push_back(Memo);
6715 // Sort the uses, so that all the uses from a given User are together.
6716 std::sort(Uses.begin(), Uses.end());
6718 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
6719 UseIndex != UseIndexEnd; ) {
6720 // We know that this user uses some value of From. If it is the right
6721 // value, update it.
6722 SDNode *User = Uses[UseIndex].User;
6724 // This node is about to morph, remove its old self from the CSE maps.
6725 RemoveNodeFromCSEMaps(User);
6727 // The Uses array is sorted, so all the uses for a given User
6728 // are next to each other in the list.
6729 // To help reduce the number of CSE recomputations, process all
6730 // the uses of this user that we can find this way.
6732 unsigned i = Uses[UseIndex].Index;
6733 SDUse &Use = *Uses[UseIndex].Use;
6737 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
6739 // Now that we have modified User, add it back to the CSE maps. If it
6740 // already exists there, recursively merge the results together.
6741 AddModifiedNodeToCSEMaps(User);
6745 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
6746 /// based on their topological order. It returns the maximum id and a vector
6747 /// of the SDNodes* in assigned order by reference.
6748 unsigned SelectionDAG::AssignTopologicalOrder() {
6750 unsigned DAGSize = 0;
6752 // SortedPos tracks the progress of the algorithm. Nodes before it are
6753 // sorted, nodes after it are unsorted. When the algorithm completes
6754 // it is at the end of the list.
6755 allnodes_iterator SortedPos = allnodes_begin();
6757 // Visit all the nodes. Move nodes with no operands to the front of
6758 // the list immediately. Annotate nodes that do have operands with their
6759 // operand count. Before we do this, the Node Id fields of the nodes
6760 // may contain arbitrary values. After, the Node Id fields for nodes
6761 // before SortedPos will contain the topological sort index, and the
6762 // Node Id fields for nodes At SortedPos and after will contain the
6763 // count of outstanding operands.
6764 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
6766 checkForCycles(N, this);
6767 unsigned Degree = N->getNumOperands();
6769 // A node with no uses, add it to the result array immediately.
6770 N->setNodeId(DAGSize++);
6771 allnodes_iterator Q(N);
6773 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
6774 assert(SortedPos != AllNodes.end() && "Overran node list");
6777 // Temporarily use the Node Id as scratch space for the degree count.
6778 N->setNodeId(Degree);
6782 // Visit all the nodes. As we iterate, move nodes into sorted order,
6783 // such that by the time the end is reached all nodes will be sorted.
6784 for (SDNode &Node : allnodes()) {
6786 checkForCycles(N, this);
6787 // N is in sorted position, so all its uses have one less operand
6788 // that needs to be sorted.
6789 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
6792 unsigned Degree = P->getNodeId();
6793 assert(Degree != 0 && "Invalid node degree");
6796 // All of P's operands are sorted, so P may sorted now.
6797 P->setNodeId(DAGSize++);
6798 if (P->getIterator() != SortedPos)
6799 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
6800 assert(SortedPos != AllNodes.end() && "Overran node list");
6803 // Update P's outstanding operand count.
6804 P->setNodeId(Degree);
6807 if (Node.getIterator() == SortedPos) {
6809 allnodes_iterator I(N);
6811 dbgs() << "Overran sorted position:\n";
6812 S->dumprFull(this); dbgs() << "\n";
6813 dbgs() << "Checking if this is due to cycles\n";
6814 checkForCycles(this, true);
6816 llvm_unreachable(nullptr);
6820 assert(SortedPos == AllNodes.end() &&
6821 "Topological sort incomplete!");
6822 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
6823 "First node in topological sort is not the entry token!");
6824 assert(AllNodes.front().getNodeId() == 0 &&
6825 "First node in topological sort has non-zero id!");
6826 assert(AllNodes.front().getNumOperands() == 0 &&
6827 "First node in topological sort has operands!");
6828 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
6829 "Last node in topologic sort has unexpected id!");
6830 assert(AllNodes.back().use_empty() &&
6831 "Last node in topologic sort has users!");
6832 assert(DAGSize == allnodes_size() && "Node count mismatch!");
6836 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
6837 /// value is produced by SD.
6838 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
6840 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
6841 SD->setHasDebugValue(true);
6843 DbgInfo->add(DB, SD, isParameter);
6846 /// TransferDbgValues - Transfer SDDbgValues. Called in replace nodes.
6847 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
6848 if (From == To || !From.getNode()->getHasDebugValue())
6850 SDNode *FromNode = From.getNode();
6851 SDNode *ToNode = To.getNode();
6852 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
6853 SmallVector<SDDbgValue *, 2> ClonedDVs;
6854 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
6856 SDDbgValue *Dbg = *I;
6857 // Only add Dbgvalues attached to same ResNo.
6858 if (Dbg->getKind() == SDDbgValue::SDNODE &&
6859 Dbg->getSDNode() == From.getNode() &&
6860 Dbg->getResNo() == From.getResNo() && !Dbg->isInvalidated()) {
6861 assert(FromNode != ToNode &&
6862 "Should not transfer Debug Values intranode");
6864 getDbgValue(Dbg->getVariable(), Dbg->getExpression(), ToNode,
6865 To.getResNo(), Dbg->isIndirect(), Dbg->getOffset(),
6866 Dbg->getDebugLoc(), Dbg->getOrder());
6867 ClonedDVs.push_back(Clone);
6868 Dbg->setIsInvalidated();
6871 for (SDDbgValue *I : ClonedDVs)
6872 AddDbgValue(I, ToNode, false);
6875 //===----------------------------------------------------------------------===//
6877 //===----------------------------------------------------------------------===//
6879 bool llvm::isNullConstant(SDValue V) {
6880 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6881 return Const != nullptr && Const->isNullValue();
6884 bool llvm::isNullFPConstant(SDValue V) {
6885 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
6886 return Const != nullptr && Const->isZero() && !Const->isNegative();
6889 bool llvm::isAllOnesConstant(SDValue V) {
6890 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6891 return Const != nullptr && Const->isAllOnesValue();
6894 bool llvm::isOneConstant(SDValue V) {
6895 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
6896 return Const != nullptr && Const->isOne();
6899 bool llvm::isBitwiseNot(SDValue V) {
6900 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1));
6903 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) {
6904 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
6907 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
6908 BitVector UndefElements;
6909 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
6911 // BuildVectors can truncate their operands. Ignore that case here.
6912 // FIXME: We blindly ignore splats which include undef which is overly
6914 if (CN && UndefElements.none() &&
6915 CN->getValueType(0) == N.getValueType().getScalarType())
6922 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) {
6923 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
6926 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
6927 BitVector UndefElements;
6928 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
6930 if (CN && UndefElements.none())
6937 HandleSDNode::~HandleSDNode() {
6941 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
6943 const GlobalValue *GA, EVT VT,
6944 int64_t o, unsigned char TF)
6945 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
6949 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
6950 EVT VT, unsigned SrcAS,
6952 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
6953 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
6955 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
6956 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
6957 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
6958 MemSDNodeBits.IsVolatile = MMO->isVolatile();
6959 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
6960 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
6961 MemSDNodeBits.IsInvariant = MMO->isInvariant();
6963 // We check here that the size of the memory operand fits within the size of
6964 // the MMO. This is because the MMO might indicate only a possible address
6965 // range instead of specifying the affected memory addresses precisely.
6966 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
6969 /// Profile - Gather unique data for the node.
6971 void SDNode::Profile(FoldingSetNodeID &ID) const {
6972 AddNodeIDNode(ID, this);
6977 std::vector<EVT> VTs;
6980 VTs.reserve(MVT::LAST_VALUETYPE);
6981 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
6982 VTs.push_back(MVT((MVT::SimpleValueType)i));
6987 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
6988 static ManagedStatic<EVTArray> SimpleVTArray;
6989 static ManagedStatic<sys::SmartMutex<true> > VTMutex;
6991 /// getValueTypeList - Return a pointer to the specified value type.
6993 const EVT *SDNode::getValueTypeList(EVT VT) {
6994 if (VT.isExtended()) {
6995 sys::SmartScopedLock<true> Lock(*VTMutex);
6996 return &(*EVTs->insert(VT).first);
6998 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
6999 "Value type out of range!");
7000 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
7004 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
7005 /// indicated value. This method ignores uses of other values defined by this
7007 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
7008 assert(Value < getNumValues() && "Bad value!");
7010 // TODO: Only iterate over uses of a given value of the node
7011 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
7012 if (UI.getUse().getResNo() == Value) {
7019 // Found exactly the right number of uses?
7024 /// hasAnyUseOfValue - Return true if there are any use of the indicated
7025 /// value. This method ignores uses of other values defined by this operation.
7026 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
7027 assert(Value < getNumValues() && "Bad value!");
7029 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
7030 if (UI.getUse().getResNo() == Value)
7037 /// isOnlyUserOf - Return true if this node is the only use of N.
7039 bool SDNode::isOnlyUserOf(const SDNode *N) const {
7041 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
7052 /// isOperand - Return true if this node is an operand of N.
7054 bool SDValue::isOperandOf(const SDNode *N) const {
7055 for (const SDValue &Op : N->op_values())
7061 bool SDNode::isOperandOf(const SDNode *N) const {
7062 for (const SDValue &Op : N->op_values())
7063 if (this == Op.getNode())
7068 /// reachesChainWithoutSideEffects - Return true if this operand (which must
7069 /// be a chain) reaches the specified operand without crossing any
7070 /// side-effecting instructions on any chain path. In practice, this looks
7071 /// through token factors and non-volatile loads. In order to remain efficient,
7072 /// this only looks a couple of nodes in, it does not do an exhaustive search.
7073 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
7074 unsigned Depth) const {
7075 if (*this == Dest) return true;
7077 // Don't search too deeply, we just want to be able to see through
7078 // TokenFactor's etc.
7079 if (Depth == 0) return false;
7081 // If this is a token factor, all inputs to the TF happen in parallel. If any
7082 // of the operands of the TF does not reach dest, then we cannot do the xform.
7083 if (getOpcode() == ISD::TokenFactor) {
7084 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
7085 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
7090 // Loads don't have side effects, look through them.
7091 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
7092 if (!Ld->isVolatile())
7093 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
7098 bool SDNode::hasPredecessor(const SDNode *N) const {
7099 SmallPtrSet<const SDNode *, 32> Visited;
7100 SmallVector<const SDNode *, 16> Worklist;
7101 Worklist.push_back(this);
7102 return hasPredecessorHelper(N, Visited, Worklist);
7105 uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
7106 assert(Num < NumOperands && "Invalid child # of SDNode!");
7107 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
7110 const SDNodeFlags *SDNode::getFlags() const {
7111 if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
7112 return &FlagsNode->Flags;
7116 void SDNode::intersectFlagsWith(const SDNodeFlags *Flags) {
7117 if (auto *FlagsNode = dyn_cast<BinaryWithFlagsSDNode>(this))
7118 FlagsNode->Flags.intersectWith(Flags);
7121 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
7122 assert(N->getNumValues() == 1 &&
7123 "Can't unroll a vector with multiple results!");
7125 EVT VT = N->getValueType(0);
7126 unsigned NE = VT.getVectorNumElements();
7127 EVT EltVT = VT.getVectorElementType();
7130 SmallVector<SDValue, 8> Scalars;
7131 SmallVector<SDValue, 4> Operands(N->getNumOperands());
7133 // If ResNE is 0, fully unroll the vector op.
7136 else if (NE > ResNE)
7140 for (i= 0; i != NE; ++i) {
7141 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
7142 SDValue Operand = N->getOperand(j);
7143 EVT OperandVT = Operand.getValueType();
7144 if (OperandVT.isVector()) {
7145 // A vector operand; extract a single element.
7146 EVT OperandEltVT = OperandVT.getVectorElementType();
7148 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
7149 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
7151 // A scalar operand; just use it as is.
7152 Operands[j] = Operand;
7156 switch (N->getOpcode()) {
7158 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
7163 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
7170 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
7171 getShiftAmountOperand(Operands[0].getValueType(),
7174 case ISD::SIGN_EXTEND_INREG:
7175 case ISD::FP_ROUND_INREG: {
7176 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
7177 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
7179 getValueType(ExtVT)));
7184 for (; i < ResNE; ++i)
7185 Scalars.push_back(getUNDEF(EltVT));
7187 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
7188 return getBuildVector(VecVT, dl, Scalars);
7191 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
7195 if (LD->isVolatile() || Base->isVolatile())
7197 if (LD->isIndexed() || Base->isIndexed())
7199 if (LD->getChain() != Base->getChain())
7201 EVT VT = LD->getValueType(0);
7202 if (VT.getSizeInBits() / 8 != Bytes)
7205 SDValue Loc = LD->getOperand(1);
7206 SDValue BaseLoc = Base->getOperand(1);
7207 if (Loc.getOpcode() == ISD::FrameIndex) {
7208 if (BaseLoc.getOpcode() != ISD::FrameIndex)
7210 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
7211 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
7212 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
7213 int FS = MFI.getObjectSize(FI);
7214 int BFS = MFI.getObjectSize(BFI);
7215 if (FS != BFS || FS != (int)Bytes) return false;
7216 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
7220 if (isBaseWithConstantOffset(Loc)) {
7221 int64_t LocOffset = cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
7222 if (Loc.getOperand(0) == BaseLoc) {
7223 // If the base location is a simple address with no offset itself, then
7224 // the second load's first add operand should be the base address.
7225 if (LocOffset == Dist * (int)Bytes)
7227 } else if (isBaseWithConstantOffset(BaseLoc)) {
7228 // The base location itself has an offset, so subtract that value from the
7229 // second load's offset before comparing to distance * size.
7231 cast<ConstantSDNode>(BaseLoc.getOperand(1))->getSExtValue();
7232 if (Loc.getOperand(0) == BaseLoc.getOperand(0)) {
7233 if ((LocOffset - BOffset) == Dist * (int)Bytes)
7238 const GlobalValue *GV1 = nullptr;
7239 const GlobalValue *GV2 = nullptr;
7240 int64_t Offset1 = 0;
7241 int64_t Offset2 = 0;
7242 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1);
7243 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
7244 if (isGA1 && isGA2 && GV1 == GV2)
7245 return Offset1 == (Offset2 + Dist*Bytes);
7250 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
7251 /// it cannot be inferred.
7252 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
7253 // If this is a GlobalAddress + cst, return the alignment.
7254 const GlobalValue *GV;
7255 int64_t GVOffset = 0;
7256 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
7257 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
7258 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
7259 llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
7261 unsigned AlignBits = KnownZero.countTrailingOnes();
7262 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
7264 return MinAlign(Align, GVOffset);
7267 // If this is a direct reference to a stack slot, use information about the
7268 // stack slot's alignment.
7269 int FrameIdx = 1 << 31;
7270 int64_t FrameOffset = 0;
7271 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
7272 FrameIdx = FI->getIndex();
7273 } else if (isBaseWithConstantOffset(Ptr) &&
7274 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7276 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7277 FrameOffset = Ptr.getConstantOperandVal(1);
7280 if (FrameIdx != (1 << 31)) {
7281 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
7282 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
7290 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
7291 /// which is split (or expanded) into two not necessarily identical pieces.
7292 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
7293 // Currently all types are split in half.
7295 if (!VT.isVector()) {
7296 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
7298 unsigned NumElements = VT.getVectorNumElements();
7299 assert(!(NumElements & 1) && "Splitting vector, but not in half!");
7300 LoVT = HiVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
7303 return std::make_pair(LoVT, HiVT);
7306 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
7308 std::pair<SDValue, SDValue>
7309 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
7311 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
7312 N.getValueType().getVectorNumElements() &&
7313 "More vector elements requested than available!");
7315 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
7316 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
7317 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
7318 getConstant(LoVT.getVectorNumElements(), DL,
7319 TLI->getVectorIdxTy(getDataLayout())));
7320 return std::make_pair(Lo, Hi);
7323 void SelectionDAG::ExtractVectorElements(SDValue Op,
7324 SmallVectorImpl<SDValue> &Args,
7325 unsigned Start, unsigned Count) {
7326 EVT VT = Op.getValueType();
7328 Count = VT.getVectorNumElements();
7330 EVT EltVT = VT.getVectorElementType();
7331 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
7333 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
7334 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
7335 Op, getConstant(i, SL, IdxTy)));
7339 // getAddressSpace - Return the address space this GlobalAddress belongs to.
7340 unsigned GlobalAddressSDNode::getAddressSpace() const {
7341 return getGlobal()->getType()->getAddressSpace();
7345 Type *ConstantPoolSDNode::getType() const {
7346 if (isMachineConstantPoolEntry())
7347 return Val.MachineCPVal->getType();
7348 return Val.ConstVal->getType();
7351 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
7353 unsigned &SplatBitSize,
7355 unsigned MinSplatBits,
7356 bool isBigEndian) const {
7357 EVT VT = getValueType(0);
7358 assert(VT.isVector() && "Expected a vector type");
7359 unsigned sz = VT.getSizeInBits();
7360 if (MinSplatBits > sz)
7363 SplatValue = APInt(sz, 0);
7364 SplatUndef = APInt(sz, 0);
7366 // Get the bits. Bits with undefined values (when the corresponding element
7367 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
7368 // in SplatValue. If any of the values are not constant, give up and return
7370 unsigned int nOps = getNumOperands();
7371 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
7372 unsigned EltBitSize = VT.getScalarSizeInBits();
7374 for (unsigned j = 0; j < nOps; ++j) {
7375 unsigned i = isBigEndian ? nOps-1-j : j;
7376 SDValue OpVal = getOperand(i);
7377 unsigned BitPos = j * EltBitSize;
7379 if (OpVal.isUndef())
7380 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
7381 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
7382 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
7383 zextOrTrunc(sz) << BitPos;
7384 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
7385 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
7390 // The build_vector is all constants or undefs. Find the smallest element
7391 // size that splats the vector.
7393 HasAnyUndefs = (SplatUndef != 0);
7396 unsigned HalfSize = sz / 2;
7397 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
7398 APInt LowValue = SplatValue.trunc(HalfSize);
7399 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
7400 APInt LowUndef = SplatUndef.trunc(HalfSize);
7402 // If the two halves do not match (ignoring undef bits), stop here.
7403 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
7404 MinSplatBits > HalfSize)
7407 SplatValue = HighValue | LowValue;
7408 SplatUndef = HighUndef & LowUndef;
7417 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
7418 if (UndefElements) {
7419 UndefElements->clear();
7420 UndefElements->resize(getNumOperands());
7423 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
7424 SDValue Op = getOperand(i);
7427 (*UndefElements)[i] = true;
7428 } else if (!Splatted) {
7430 } else if (Splatted != Op) {
7436 assert(getOperand(0).isUndef() &&
7437 "Can only have a splat without a constant for all undefs.");
7438 return getOperand(0);
7445 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
7446 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
7450 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
7451 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
7455 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
7456 uint32_t BitWidth) const {
7457 if (ConstantFPSDNode *CN =
7458 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
7460 APSInt IntVal(BitWidth);
7461 const APFloat &APF = CN->getValueAPF();
7462 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
7467 return IntVal.exactLogBase2();
7472 bool BuildVectorSDNode::isConstant() const {
7473 for (const SDValue &Op : op_values()) {
7474 unsigned Opc = Op.getOpcode();
7475 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
7481 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
7482 // Find the first non-undef value in the shuffle mask.
7484 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
7487 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
7489 // Make sure all remaining elements are either undef or the same as the first
7491 for (int Idx = Mask[i]; i != e; ++i)
7492 if (Mask[i] >= 0 && Mask[i] != Idx)
7497 // \brief Returns the SDNode if it is a constant integer BuildVector
7498 // or constant integer.
7499 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
7500 if (isa<ConstantSDNode>(N))
7502 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
7504 // Treat a GlobalAddress supporting constant offset folding as a
7505 // constant integer.
7506 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
7507 if (GA->getOpcode() == ISD::GlobalAddress &&
7508 TLI->isOffsetFoldingLegal(GA))
7513 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
7514 if (isa<ConstantFPSDNode>(N))
7517 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
7524 static void checkForCyclesHelper(const SDNode *N,
7525 SmallPtrSetImpl<const SDNode*> &Visited,
7526 SmallPtrSetImpl<const SDNode*> &Checked,
7527 const llvm::SelectionDAG *DAG) {
7528 // If this node has already been checked, don't check it again.
7529 if (Checked.count(N))
7532 // If a node has already been visited on this depth-first walk, reject it as
7534 if (!Visited.insert(N).second) {
7535 errs() << "Detected cycle in SelectionDAG\n";
7536 dbgs() << "Offending node:\n";
7537 N->dumprFull(DAG); dbgs() << "\n";
7541 for (const SDValue &Op : N->op_values())
7542 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
7549 void llvm::checkForCycles(const llvm::SDNode *N,
7550 const llvm::SelectionDAG *DAG,
7554 #ifdef EXPENSIVE_CHECKS
7556 #endif // EXPENSIVE_CHECKS
7558 assert(N && "Checking nonexistent SDNode");
7559 SmallPtrSet<const SDNode*, 32> visited;
7560 SmallPtrSet<const SDNode*, 32> checked;
7561 checkForCyclesHelper(N, visited, checked, DAG);
7566 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
7567 checkForCycles(DAG->getRoot().getNode(), DAG, force);