1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAG class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/APSInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/FoldingSet.h"
22 #include "llvm/ADT/None.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/CodeGen/ISDOpcodes.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/MachineValueType.h"
36 #include "llvm/CodeGen/RuntimeLibcalls.h"
37 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
38 #include "llvm/CodeGen/SelectionDAGNodes.h"
39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
40 #include "llvm/CodeGen/TargetLowering.h"
41 #include "llvm/CodeGen/TargetRegisterInfo.h"
42 #include "llvm/CodeGen/TargetSubtargetInfo.h"
43 #include "llvm/CodeGen/ValueTypes.h"
44 #include "llvm/IR/Constant.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DebugInfoMetadata.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalValue.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CodeGen.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/KnownBits.h"
61 #include "llvm/Support/ManagedStatic.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/Mutex.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Target/TargetMachine.h"
66 #include "llvm/Target/TargetOptions.h"
79 /// makeVTList - Return an instance of the SDVTList struct initialized with the
80 /// specified members.
81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
82 SDVTList Res = {VTs, NumVTs};
86 // Default null implementations of the callbacks.
87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
90 #define DEBUG_TYPE "selectiondag"
92 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
99 //===----------------------------------------------------------------------===//
100 // ConstantFPSDNode Class
101 //===----------------------------------------------------------------------===//
103 /// isExactlyValue - We don't rely on operator== working on double values, as
104 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
105 /// As such, this method can be used to do an exact bit-for-bit comparison of
106 /// two floating point values.
107 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
108 return getValueAPF().bitwiseIsEqual(V);
111 bool ConstantFPSDNode::isValueValidForType(EVT VT,
112 const APFloat& Val) {
113 assert(VT.isFloatingPoint() && "Can only convert between FP types");
115 // convert modifies in place, so make a copy.
116 APFloat Val2 = APFloat(Val);
118 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
119 APFloat::rmNearestTiesToEven,
124 //===----------------------------------------------------------------------===//
126 //===----------------------------------------------------------------------===//
128 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
129 auto *BV = dyn_cast<BuildVectorSDNode>(N);
134 unsigned SplatBitSize;
136 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
137 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
139 EltSize == SplatBitSize;
142 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
143 // specializations of the more general isConstantSplatVector()?
145 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
146 // Look through a bit convert.
147 while (N->getOpcode() == ISD::BITCAST)
148 N = N->getOperand(0).getNode();
150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
152 unsigned i = 0, e = N->getNumOperands();
154 // Skip over all of the undef values.
155 while (i != e && N->getOperand(i).isUndef())
158 // Do not accept an all-undef vector.
159 if (i == e) return false;
161 // Do not accept build_vectors that aren't all constants or which have non-~0
162 // elements. We have to be a bit careful here, as the type of the constant
163 // may not be the same as the type of the vector elements due to type
164 // legalization (the elements are promoted to a legal type for the target and
165 // a vector of a type may be legal when the base element type is not).
166 // We only want to check enough bits to cover the vector elements, because
167 // we care if the resultant vector is all ones, not whether the individual
169 SDValue NotZero = N->getOperand(i);
170 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
171 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
172 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
174 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
175 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
180 // Okay, we have at least one ~0 value, check to see if the rest match or are
181 // undefs. Even with the above element type twiddling, this should be OK, as
182 // the same type legalization should have applied to all the elements.
183 for (++i; i != e; ++i)
184 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
189 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
190 // Look through a bit convert.
191 while (N->getOpcode() == ISD::BITCAST)
192 N = N->getOperand(0).getNode();
194 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
196 bool IsAllUndef = true;
197 for (const SDValue &Op : N->op_values()) {
201 // Do not accept build_vectors that aren't all constants or which have non-0
202 // elements. We have to be a bit careful here, as the type of the constant
203 // may not be the same as the type of the vector elements due to type
204 // legalization (the elements are promoted to a legal type for the target
205 // and a vector of a type may be legal when the base element type is not).
206 // We only want to check enough bits to cover the vector elements, because
207 // we care if the resultant vector is all zeros, not whether the individual
209 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
210 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
211 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
213 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
214 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
220 // Do not accept an all-undef vector.
226 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
227 if (N->getOpcode() != ISD::BUILD_VECTOR)
230 for (const SDValue &Op : N->op_values()) {
233 if (!isa<ConstantSDNode>(Op))
239 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
240 if (N->getOpcode() != ISD::BUILD_VECTOR)
243 for (const SDValue &Op : N->op_values()) {
246 if (!isa<ConstantFPSDNode>(Op))
252 bool ISD::allOperandsUndef(const SDNode *N) {
253 // Return false if the node has no operands.
254 // This is "logically inconsistent" with the definition of "all" but
255 // is probably the desired behavior.
256 if (N->getNumOperands() == 0)
259 for (const SDValue &Op : N->op_values())
266 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
269 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
271 return ISD::SIGN_EXTEND;
273 return ISD::ZERO_EXTEND;
278 llvm_unreachable("Invalid LoadExtType");
281 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
282 // To perform this operation, we just need to swap the L and G bits of the
284 unsigned OldL = (Operation >> 2) & 1;
285 unsigned OldG = (Operation >> 1) & 1;
286 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
287 (OldL << 1) | // New G bit
288 (OldG << 2)); // New L bit.
291 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
292 unsigned Operation = Op;
294 Operation ^= 7; // Flip L, G, E bits, but not U.
296 Operation ^= 15; // Flip all of the condition bits.
298 if (Operation > ISD::SETTRUE2)
299 Operation &= ~8; // Don't let N and U bits get set.
301 return ISD::CondCode(Operation);
304 /// For an integer comparison, return 1 if the comparison is a signed operation
305 /// and 2 if the result is an unsigned comparison. Return zero if the operation
306 /// does not depend on the sign of the input (setne and seteq).
307 static int isSignedOp(ISD::CondCode Opcode) {
309 default: llvm_unreachable("Illegal integer setcc operation!");
311 case ISD::SETNE: return 0;
315 case ISD::SETGE: return 1;
319 case ISD::SETUGE: return 2;
323 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
325 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
326 // Cannot fold a signed integer setcc with an unsigned integer setcc.
327 return ISD::SETCC_INVALID;
329 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
331 // If the N and U bits get set, then the resultant comparison DOES suddenly
332 // care about orderedness, and it is true when ordered.
333 if (Op > ISD::SETTRUE2)
334 Op &= ~16; // Clear the U bit if the N bit is set.
336 // Canonicalize illegal integer setcc's.
337 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
340 return ISD::CondCode(Op);
343 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
345 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
346 // Cannot fold a signed setcc with an unsigned setcc.
347 return ISD::SETCC_INVALID;
349 // Combine all of the condition bits.
350 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
352 // Canonicalize illegal integer setcc's.
356 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
357 case ISD::SETOEQ: // SETEQ & SETU[LG]E
358 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
359 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
360 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
367 //===----------------------------------------------------------------------===//
368 // SDNode Profile Support
369 //===----------------------------------------------------------------------===//
371 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
372 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
376 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
377 /// solely with their pointer.
378 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
379 ID.AddPointer(VTList.VTs);
382 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
383 static void AddNodeIDOperands(FoldingSetNodeID &ID,
384 ArrayRef<SDValue> Ops) {
385 for (auto& Op : Ops) {
386 ID.AddPointer(Op.getNode());
387 ID.AddInteger(Op.getResNo());
391 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
392 static void AddNodeIDOperands(FoldingSetNodeID &ID,
393 ArrayRef<SDUse> Ops) {
394 for (auto& Op : Ops) {
395 ID.AddPointer(Op.getNode());
396 ID.AddInteger(Op.getResNo());
400 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
401 SDVTList VTList, ArrayRef<SDValue> OpList) {
402 AddNodeIDOpcode(ID, OpC);
403 AddNodeIDValueTypes(ID, VTList);
404 AddNodeIDOperands(ID, OpList);
407 /// If this is an SDNode with special info, add this info to the NodeID data.
408 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
409 switch (N->getOpcode()) {
410 case ISD::TargetExternalSymbol:
411 case ISD::ExternalSymbol:
413 llvm_unreachable("Should only be used on nodes with operands");
414 default: break; // Normal nodes don't need extra info.
415 case ISD::TargetConstant:
416 case ISD::Constant: {
417 const ConstantSDNode *C = cast<ConstantSDNode>(N);
418 ID.AddPointer(C->getConstantIntValue());
419 ID.AddBoolean(C->isOpaque());
422 case ISD::TargetConstantFP:
423 case ISD::ConstantFP:
424 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
426 case ISD::TargetGlobalAddress:
427 case ISD::GlobalAddress:
428 case ISD::TargetGlobalTLSAddress:
429 case ISD::GlobalTLSAddress: {
430 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
431 ID.AddPointer(GA->getGlobal());
432 ID.AddInteger(GA->getOffset());
433 ID.AddInteger(GA->getTargetFlags());
436 case ISD::BasicBlock:
437 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
440 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
442 case ISD::RegisterMask:
443 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
446 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
448 case ISD::FrameIndex:
449 case ISD::TargetFrameIndex:
450 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
453 case ISD::TargetJumpTable:
454 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
455 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
457 case ISD::ConstantPool:
458 case ISD::TargetConstantPool: {
459 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
460 ID.AddInteger(CP->getAlignment());
461 ID.AddInteger(CP->getOffset());
462 if (CP->isMachineConstantPoolEntry())
463 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
465 ID.AddPointer(CP->getConstVal());
466 ID.AddInteger(CP->getTargetFlags());
469 case ISD::TargetIndex: {
470 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
471 ID.AddInteger(TI->getIndex());
472 ID.AddInteger(TI->getOffset());
473 ID.AddInteger(TI->getTargetFlags());
477 const LoadSDNode *LD = cast<LoadSDNode>(N);
478 ID.AddInteger(LD->getMemoryVT().getRawBits());
479 ID.AddInteger(LD->getRawSubclassData());
480 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
484 const StoreSDNode *ST = cast<StoreSDNode>(N);
485 ID.AddInteger(ST->getMemoryVT().getRawBits());
486 ID.AddInteger(ST->getRawSubclassData());
487 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
490 case ISD::ATOMIC_CMP_SWAP:
491 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
492 case ISD::ATOMIC_SWAP:
493 case ISD::ATOMIC_LOAD_ADD:
494 case ISD::ATOMIC_LOAD_SUB:
495 case ISD::ATOMIC_LOAD_AND:
496 case ISD::ATOMIC_LOAD_OR:
497 case ISD::ATOMIC_LOAD_XOR:
498 case ISD::ATOMIC_LOAD_NAND:
499 case ISD::ATOMIC_LOAD_MIN:
500 case ISD::ATOMIC_LOAD_MAX:
501 case ISD::ATOMIC_LOAD_UMIN:
502 case ISD::ATOMIC_LOAD_UMAX:
503 case ISD::ATOMIC_LOAD:
504 case ISD::ATOMIC_STORE: {
505 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
506 ID.AddInteger(AT->getMemoryVT().getRawBits());
507 ID.AddInteger(AT->getRawSubclassData());
508 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
511 case ISD::PREFETCH: {
512 const MemSDNode *PF = cast<MemSDNode>(N);
513 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
516 case ISD::VECTOR_SHUFFLE: {
517 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
518 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
520 ID.AddInteger(SVN->getMaskElt(i));
523 case ISD::TargetBlockAddress:
524 case ISD::BlockAddress: {
525 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
526 ID.AddPointer(BA->getBlockAddress());
527 ID.AddInteger(BA->getOffset());
528 ID.AddInteger(BA->getTargetFlags());
531 } // end switch (N->getOpcode())
533 // Target specific memory nodes could also have address spaces to check.
534 if (N->isTargetMemoryOpcode())
535 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
538 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
540 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
541 AddNodeIDOpcode(ID, N->getOpcode());
542 // Add the return value info.
543 AddNodeIDValueTypes(ID, N->getVTList());
544 // Add the operand info.
545 AddNodeIDOperands(ID, N->ops());
547 // Handle SDNode leafs with special info.
548 AddNodeIDCustom(ID, N);
551 //===----------------------------------------------------------------------===//
552 // SelectionDAG Class
553 //===----------------------------------------------------------------------===//
555 /// doNotCSE - Return true if CSE should not be performed for this node.
556 static bool doNotCSE(SDNode *N) {
557 if (N->getValueType(0) == MVT::Glue)
558 return true; // Never CSE anything that produces a flag.
560 switch (N->getOpcode()) {
562 case ISD::HANDLENODE:
564 return true; // Never CSE these nodes.
567 // Check that remaining values produced are not flags.
568 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
569 if (N->getValueType(i) == MVT::Glue)
570 return true; // Never CSE anything that produces a flag.
575 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
577 void SelectionDAG::RemoveDeadNodes() {
578 // Create a dummy node (which is not added to allnodes), that adds a reference
579 // to the root node, preventing it from being deleted.
580 HandleSDNode Dummy(getRoot());
582 SmallVector<SDNode*, 128> DeadNodes;
584 // Add all obviously-dead nodes to the DeadNodes worklist.
585 for (SDNode &Node : allnodes())
586 if (Node.use_empty())
587 DeadNodes.push_back(&Node);
589 RemoveDeadNodes(DeadNodes);
591 // If the root changed (e.g. it was a dead load, update the root).
592 setRoot(Dummy.getValue());
595 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
596 /// given list, and any nodes that become unreachable as a result.
597 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
599 // Process the worklist, deleting the nodes and adding their uses to the
601 while (!DeadNodes.empty()) {
602 SDNode *N = DeadNodes.pop_back_val();
603 // Skip to next node if we've already managed to delete the node. This could
604 // happen if replacing a node causes a node previously added to the node to
606 if (N->getOpcode() == ISD::DELETED_NODE)
609 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
610 DUL->NodeDeleted(N, nullptr);
612 // Take the node out of the appropriate CSE map.
613 RemoveNodeFromCSEMaps(N);
615 // Next, brutally remove the operand list. This is safe to do, as there are
616 // no cycles in the graph.
617 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
619 SDNode *Operand = Use.getNode();
622 // Now that we removed this operand, see if there are no uses of it left.
623 if (Operand->use_empty())
624 DeadNodes.push_back(Operand);
631 void SelectionDAG::RemoveDeadNode(SDNode *N){
632 SmallVector<SDNode*, 16> DeadNodes(1, N);
634 // Create a dummy node that adds a reference to the root node, preventing
635 // it from being deleted. (This matters if the root is an operand of the
637 HandleSDNode Dummy(getRoot());
639 RemoveDeadNodes(DeadNodes);
642 void SelectionDAG::DeleteNode(SDNode *N) {
643 // First take this out of the appropriate CSE map.
644 RemoveNodeFromCSEMaps(N);
646 // Finally, remove uses due to operands of this node, remove from the
647 // AllNodes list, and delete the node.
648 DeleteNodeNotInCSEMaps(N);
651 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
652 assert(N->getIterator() != AllNodes.begin() &&
653 "Cannot delete the entry node!");
654 assert(N->use_empty() && "Cannot delete a node that is not dead!");
656 // Drop all of the operands and decrement used node's use counts.
662 void SDDbgInfo::erase(const SDNode *Node) {
663 DbgValMapType::iterator I = DbgValMap.find(Node);
664 if (I == DbgValMap.end())
666 for (auto &Val: I->second)
667 Val->setIsInvalidated();
671 void SelectionDAG::DeallocateNode(SDNode *N) {
672 // If we have operands, deallocate them.
675 NodeAllocator.Deallocate(AllNodes.remove(N));
677 // Set the opcode to DELETED_NODE to help catch bugs when node
678 // memory is reallocated.
679 // FIXME: There are places in SDag that have grown a dependency on the opcode
680 // value in the released node.
681 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
682 N->NodeType = ISD::DELETED_NODE;
684 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
685 // them and forget about that node.
690 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
691 static void VerifySDNode(SDNode *N) {
692 switch (N->getOpcode()) {
695 case ISD::BUILD_PAIR: {
696 EVT VT = N->getValueType(0);
697 assert(N->getNumValues() == 1 && "Too many results!");
698 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
699 "Wrong return type!");
700 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
701 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
702 "Mismatched operand types!");
703 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
704 "Wrong operand type!");
705 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
706 "Wrong return type size");
709 case ISD::BUILD_VECTOR: {
710 assert(N->getNumValues() == 1 && "Too many results!");
711 assert(N->getValueType(0).isVector() && "Wrong return type!");
712 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
713 "Wrong number of operands!");
714 EVT EltVT = N->getValueType(0).getVectorElementType();
715 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
716 assert((I->getValueType() == EltVT ||
717 (EltVT.isInteger() && I->getValueType().isInteger() &&
718 EltVT.bitsLE(I->getValueType()))) &&
719 "Wrong operand type!");
720 assert(I->getValueType() == N->getOperand(0).getValueType() &&
721 "Operands must all have the same type");
729 /// \brief Insert a newly allocated node into the DAG.
731 /// Handles insertion into the all nodes list and CSE map, as well as
732 /// verification and other common operations when a new node is allocated.
733 void SelectionDAG::InsertNode(SDNode *N) {
734 AllNodes.push_back(N);
736 N->PersistentId = NextPersistentId++;
741 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
742 /// correspond to it. This is useful when we're about to delete or repurpose
743 /// the node. We don't want future request for structurally identical nodes
744 /// to return N anymore.
745 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
747 switch (N->getOpcode()) {
748 case ISD::HANDLENODE: return false; // noop.
750 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
751 "Cond code doesn't exist!");
752 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
753 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
755 case ISD::ExternalSymbol:
756 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
758 case ISD::TargetExternalSymbol: {
759 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
760 Erased = TargetExternalSymbols.erase(
761 std::pair<std::string,unsigned char>(ESN->getSymbol(),
762 ESN->getTargetFlags()));
765 case ISD::MCSymbol: {
766 auto *MCSN = cast<MCSymbolSDNode>(N);
767 Erased = MCSymbols.erase(MCSN->getMCSymbol());
770 case ISD::VALUETYPE: {
771 EVT VT = cast<VTSDNode>(N)->getVT();
772 if (VT.isExtended()) {
773 Erased = ExtendedValueTypeNodes.erase(VT);
775 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
776 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
781 // Remove it from the CSE Map.
782 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
783 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
784 Erased = CSEMap.RemoveNode(N);
788 // Verify that the node was actually in one of the CSE maps, unless it has a
789 // flag result (which cannot be CSE'd) or is one of the special cases that are
790 // not subject to CSE.
791 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
792 !N->isMachineOpcode() && !doNotCSE(N)) {
795 llvm_unreachable("Node is not in map!");
801 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
802 /// maps and modified in place. Add it back to the CSE maps, unless an identical
803 /// node already exists, in which case transfer all its users to the existing
804 /// node. This transfer can potentially trigger recursive merging.
806 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
807 // For node types that aren't CSE'd, just act as if no identical node
810 SDNode *Existing = CSEMap.GetOrInsertNode(N);
812 // If there was already an existing matching node, use ReplaceAllUsesWith
813 // to replace the dead one with the existing one. This can cause
814 // recursive merging of other unrelated nodes down the line.
815 ReplaceAllUsesWith(N, Existing);
817 // N is now dead. Inform the listeners and delete it.
818 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
819 DUL->NodeDeleted(N, Existing);
820 DeleteNodeNotInCSEMaps(N);
825 // If the node doesn't already exist, we updated it. Inform listeners.
826 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
830 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
831 /// were replaced with those specified. If this node is never memoized,
832 /// return null, otherwise return a pointer to the slot it would take. If a
833 /// node already exists with these operands, the slot will be non-null.
834 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
839 SDValue Ops[] = { Op };
841 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
842 AddNodeIDCustom(ID, N);
843 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
845 Node->intersectFlagsWith(N->getFlags());
849 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
850 /// were replaced with those specified. If this node is never memoized,
851 /// return null, otherwise return a pointer to the slot it would take. If a
852 /// node already exists with these operands, the slot will be non-null.
853 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
854 SDValue Op1, SDValue Op2,
859 SDValue Ops[] = { Op1, Op2 };
861 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
862 AddNodeIDCustom(ID, N);
863 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
865 Node->intersectFlagsWith(N->getFlags());
869 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
870 /// were replaced with those specified. If this node is never memoized,
871 /// return null, otherwise return a pointer to the slot it would take. If a
872 /// node already exists with these operands, the slot will be non-null.
873 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
879 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
880 AddNodeIDCustom(ID, N);
881 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
883 Node->intersectFlagsWith(N->getFlags());
887 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
888 Type *Ty = VT == MVT::iPTR ?
889 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
890 VT.getTypeForEVT(*getContext());
892 return getDataLayout().getABITypeAlignment(Ty);
895 // EntryNode could meaningfully have debug info if we can find it...
896 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
897 : TM(tm), OptLevel(OL),
898 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
899 Root(getEntryNode()) {
900 InsertNode(&EntryNode);
901 DbgInfo = new SDDbgInfo();
904 void SelectionDAG::init(MachineFunction &NewMF,
905 OptimizationRemarkEmitter &NewORE,
908 SDAGISelPass = PassPtr;
910 TLI = getSubtarget().getTargetLowering();
911 TSI = getSubtarget().getSelectionDAGInfo();
912 Context = &MF->getFunction().getContext();
915 SelectionDAG::~SelectionDAG() {
916 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
918 OperandRecycler.clear(OperandAllocator);
922 void SelectionDAG::allnodes_clear() {
923 assert(&*AllNodes.begin() == &EntryNode);
924 AllNodes.remove(AllNodes.begin());
925 while (!AllNodes.empty())
926 DeallocateNode(&AllNodes.front());
928 NextPersistentId = 0;
932 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
934 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
936 switch (N->getOpcode()) {
939 case ISD::ConstantFP:
940 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
941 "debug location. Use another overload.");
947 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
948 const SDLoc &DL, void *&InsertPos) {
949 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
951 switch (N->getOpcode()) {
953 case ISD::ConstantFP:
954 // Erase debug location from the node if the node is used at several
955 // different places. Do not propagate one location to all uses as it
956 // will cause a worse single stepping debugging experience.
957 if (N->getDebugLoc() != DL.getDebugLoc())
958 N->setDebugLoc(DebugLoc());
961 // When the node's point of use is located earlier in the instruction
962 // sequence than its prior point of use, update its debug info to the
964 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
965 N->setDebugLoc(DL.getDebugLoc());
972 void SelectionDAG::clear() {
974 OperandRecycler.clear(OperandAllocator);
975 OperandAllocator.Reset();
978 ExtendedValueTypeNodes.clear();
979 ExternalSymbols.clear();
980 TargetExternalSymbols.clear();
982 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
983 static_cast<CondCodeSDNode*>(nullptr));
984 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
985 static_cast<SDNode*>(nullptr));
987 EntryNode.UseList = nullptr;
988 InsertNode(&EntryNode);
989 Root = getEntryNode();
993 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
994 return VT.bitsGT(Op.getValueType())
995 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
996 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
999 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1000 return VT.bitsGT(Op.getValueType()) ?
1001 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1002 getNode(ISD::TRUNCATE, DL, VT, Op);
1005 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1006 return VT.bitsGT(Op.getValueType()) ?
1007 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1008 getNode(ISD::TRUNCATE, DL, VT, Op);
1011 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1012 return VT.bitsGT(Op.getValueType()) ?
1013 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1014 getNode(ISD::TRUNCATE, DL, VT, Op);
1017 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1019 if (VT.bitsLE(Op.getValueType()))
1020 return getNode(ISD::TRUNCATE, SL, VT, Op);
1022 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1023 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1026 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1027 assert(!VT.isVector() &&
1028 "getZeroExtendInReg should use the vector element type instead of "
1029 "the vector type!");
1030 if (Op.getValueType().getScalarType() == VT) return Op;
1031 unsigned BitWidth = Op.getScalarValueSizeInBits();
1032 APInt Imm = APInt::getLowBitsSet(BitWidth,
1033 VT.getSizeInBits());
1034 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1035 getConstant(Imm, DL, Op.getValueType()));
1038 SDValue SelectionDAG::getAnyExtendVectorInReg(SDValue Op, const SDLoc &DL,
1040 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1041 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1042 "The sizes of the input and result must match in order to perform the "
1043 "extend in-register.");
1044 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1045 "The destination vector type must have fewer lanes than the input.");
1046 return getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Op);
1049 SDValue SelectionDAG::getSignExtendVectorInReg(SDValue Op, const SDLoc &DL,
1051 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1052 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1053 "The sizes of the input and result must match in order to perform the "
1054 "extend in-register.");
1055 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1056 "The destination vector type must have fewer lanes than the input.");
1057 return getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, VT, Op);
1060 SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
1062 assert(VT.isVector() && "This DAG node is restricted to vector types.");
1063 assert(VT.getSizeInBits() == Op.getValueSizeInBits() &&
1064 "The sizes of the input and result must match in order to perform the "
1065 "extend in-register.");
1066 assert(VT.getVectorNumElements() < Op.getValueType().getVectorNumElements() &&
1067 "The destination vector type must have fewer lanes than the input.");
1068 return getNode(ISD::ZERO_EXTEND_VECTOR_INREG, DL, VT, Op);
1071 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1072 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1073 EVT EltVT = VT.getScalarType();
1075 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1076 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1079 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1080 EVT EltVT = VT.getScalarType();
1082 switch (TLI->getBooleanContents(VT)) {
1083 case TargetLowering::ZeroOrOneBooleanContent:
1084 case TargetLowering::UndefinedBooleanContent:
1085 TrueValue = getConstant(1, DL, VT);
1087 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1088 TrueValue = getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL,
1092 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1095 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1096 bool isT, bool isO) {
1097 EVT EltVT = VT.getScalarType();
1098 assert((EltVT.getSizeInBits() >= 64 ||
1099 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1100 "getConstant with a uint64_t value that doesn't fit in the type!");
1101 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1104 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1105 bool isT, bool isO) {
1106 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1109 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1110 EVT VT, bool isT, bool isO) {
1111 assert(VT.isInteger() && "Cannot create FP integer constant!");
1113 EVT EltVT = VT.getScalarType();
1114 const ConstantInt *Elt = &Val;
1116 // In some cases the vector type is legal but the element type is illegal and
1117 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1118 // inserted value (the type does not need to match the vector element type).
1119 // Any extra bits introduced will be truncated away.
1120 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1121 TargetLowering::TypePromoteInteger) {
1122 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1123 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1124 Elt = ConstantInt::get(*getContext(), NewVal);
1126 // In other cases the element type is illegal and needs to be expanded, for
1127 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1128 // the value into n parts and use a vector type with n-times the elements.
1129 // Then bitcast to the type requested.
1130 // Legalizing constants too early makes the DAGCombiner's job harder so we
1131 // only legalize if the DAG tells us we must produce legal types.
1132 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1133 TLI->getTypeAction(*getContext(), EltVT) ==
1134 TargetLowering::TypeExpandInteger) {
1135 const APInt &NewVal = Elt->getValue();
1136 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1137 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1138 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1139 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1141 // Check the temporary vector is the correct size. If this fails then
1142 // getTypeToTransformTo() probably returned a type whose size (in bits)
1143 // isn't a power-of-2 factor of the requested type size.
1144 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1146 SmallVector<SDValue, 2> EltParts;
1147 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1148 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1149 .zextOrTrunc(ViaEltSizeInBits), DL,
1150 ViaEltVT, isT, isO));
1153 // EltParts is currently in little endian order. If we actually want
1154 // big-endian order then reverse it now.
1155 if (getDataLayout().isBigEndian())
1156 std::reverse(EltParts.begin(), EltParts.end());
1158 // The elements must be reversed when the element order is different
1159 // to the endianness of the elements (because the BITCAST is itself a
1160 // vector shuffle in this situation). However, we do not need any code to
1161 // perform this reversal because getConstant() is producing a vector
1163 // This situation occurs in MIPS MSA.
1165 SmallVector<SDValue, 8> Ops;
1166 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1167 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1169 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1173 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1174 "APInt size does not match type size!");
1175 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1176 FoldingSetNodeID ID;
1177 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1181 SDNode *N = nullptr;
1182 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1184 return SDValue(N, 0);
1187 N = newSDNode<ConstantSDNode>(isT, isO, Elt, DL.getDebugLoc(), EltVT);
1188 CSEMap.InsertNode(N, IP);
1190 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1193 SDValue Result(N, 0);
1195 Result = getSplatBuildVector(VT, DL, Result);
1200 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1202 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1205 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1207 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1210 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1211 EVT VT, bool isTarget) {
1212 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1214 EVT EltVT = VT.getScalarType();
1216 // Do the map lookup using the actual bit pattern for the floating point
1217 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1218 // we don't have issues with SNANs.
1219 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1220 FoldingSetNodeID ID;
1221 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1224 SDNode *N = nullptr;
1225 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1227 return SDValue(N, 0);
1230 N = newSDNode<ConstantFPSDNode>(isTarget, &V, DL.getDebugLoc(), EltVT);
1231 CSEMap.InsertNode(N, IP);
1235 SDValue Result(N, 0);
1237 Result = getSplatBuildVector(VT, DL, Result);
1238 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1242 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1244 EVT EltVT = VT.getScalarType();
1245 if (EltVT == MVT::f32)
1246 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1247 else if (EltVT == MVT::f64)
1248 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1249 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1250 EltVT == MVT::f16) {
1252 APFloat APF = APFloat(Val);
1253 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1255 return getConstantFP(APF, DL, VT, isTarget);
1257 llvm_unreachable("Unsupported type in getConstantFP");
1260 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1261 EVT VT, int64_t Offset, bool isTargetGA,
1262 unsigned char TargetFlags) {
1263 assert((TargetFlags == 0 || isTargetGA) &&
1264 "Cannot set target flags on target-independent globals");
1266 // Truncate (with sign-extension) the offset value to the pointer size.
1267 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1269 Offset = SignExtend64(Offset, BitWidth);
1272 if (GV->isThreadLocal())
1273 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1275 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1277 FoldingSetNodeID ID;
1278 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1280 ID.AddInteger(Offset);
1281 ID.AddInteger(TargetFlags);
1283 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1284 return SDValue(E, 0);
1286 auto *N = newSDNode<GlobalAddressSDNode>(
1287 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1288 CSEMap.InsertNode(N, IP);
1290 return SDValue(N, 0);
1293 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1294 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1295 FoldingSetNodeID ID;
1296 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1299 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1300 return SDValue(E, 0);
1302 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1303 CSEMap.InsertNode(N, IP);
1305 return SDValue(N, 0);
1308 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1309 unsigned char TargetFlags) {
1310 assert((TargetFlags == 0 || isTarget) &&
1311 "Cannot set target flags on target-independent jump tables");
1312 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1313 FoldingSetNodeID ID;
1314 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1316 ID.AddInteger(TargetFlags);
1318 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1319 return SDValue(E, 0);
1321 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1322 CSEMap.InsertNode(N, IP);
1324 return SDValue(N, 0);
1327 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1328 unsigned Alignment, int Offset,
1330 unsigned char TargetFlags) {
1331 assert((TargetFlags == 0 || isTarget) &&
1332 "Cannot set target flags on target-independent globals");
1334 Alignment = MF->getFunction().optForSize()
1335 ? getDataLayout().getABITypeAlignment(C->getType())
1336 : getDataLayout().getPrefTypeAlignment(C->getType());
1337 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1338 FoldingSetNodeID ID;
1339 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1340 ID.AddInteger(Alignment);
1341 ID.AddInteger(Offset);
1343 ID.AddInteger(TargetFlags);
1345 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1346 return SDValue(E, 0);
1348 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1350 CSEMap.InsertNode(N, IP);
1352 return SDValue(N, 0);
1355 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1356 unsigned Alignment, int Offset,
1358 unsigned char TargetFlags) {
1359 assert((TargetFlags == 0 || isTarget) &&
1360 "Cannot set target flags on target-independent globals");
1362 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1363 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1364 FoldingSetNodeID ID;
1365 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1366 ID.AddInteger(Alignment);
1367 ID.AddInteger(Offset);
1368 C->addSelectionDAGCSEId(ID);
1369 ID.AddInteger(TargetFlags);
1371 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1372 return SDValue(E, 0);
1374 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1376 CSEMap.InsertNode(N, IP);
1378 return SDValue(N, 0);
1381 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1382 unsigned char TargetFlags) {
1383 FoldingSetNodeID ID;
1384 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1385 ID.AddInteger(Index);
1386 ID.AddInteger(Offset);
1387 ID.AddInteger(TargetFlags);
1389 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1390 return SDValue(E, 0);
1392 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1393 CSEMap.InsertNode(N, IP);
1395 return SDValue(N, 0);
1398 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1399 FoldingSetNodeID ID;
1400 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1403 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1404 return SDValue(E, 0);
1406 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1407 CSEMap.InsertNode(N, IP);
1409 return SDValue(N, 0);
1412 SDValue SelectionDAG::getValueType(EVT VT) {
1413 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1414 ValueTypeNodes.size())
1415 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1417 SDNode *&N = VT.isExtended() ?
1418 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1420 if (N) return SDValue(N, 0);
1421 N = newSDNode<VTSDNode>(VT);
1423 return SDValue(N, 0);
1426 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1427 SDNode *&N = ExternalSymbols[Sym];
1428 if (N) return SDValue(N, 0);
1429 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1431 return SDValue(N, 0);
1434 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1435 SDNode *&N = MCSymbols[Sym];
1437 return SDValue(N, 0);
1438 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1440 return SDValue(N, 0);
1443 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1444 unsigned char TargetFlags) {
1446 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1448 if (N) return SDValue(N, 0);
1449 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1451 return SDValue(N, 0);
1454 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1455 if ((unsigned)Cond >= CondCodeNodes.size())
1456 CondCodeNodes.resize(Cond+1);
1458 if (!CondCodeNodes[Cond]) {
1459 auto *N = newSDNode<CondCodeSDNode>(Cond);
1460 CondCodeNodes[Cond] = N;
1464 return SDValue(CondCodeNodes[Cond], 0);
1467 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1468 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
1469 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1471 ShuffleVectorSDNode::commuteMask(M);
1474 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1475 SDValue N2, ArrayRef<int> Mask) {
1476 assert(VT.getVectorNumElements() == Mask.size() &&
1477 "Must have the same number of vector elements as mask elements!");
1478 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1479 "Invalid VECTOR_SHUFFLE");
1481 // Canonicalize shuffle undef, undef -> undef
1482 if (N1.isUndef() && N2.isUndef())
1483 return getUNDEF(VT);
1485 // Validate that all indices in Mask are within the range of the elements
1486 // input to the shuffle.
1487 int NElts = Mask.size();
1488 assert(llvm::all_of(Mask,
1489 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1490 "Index out of range");
1492 // Copy the mask so we can do any needed cleanup.
1493 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1495 // Canonicalize shuffle v, v -> v, undef
1498 for (int i = 0; i != NElts; ++i)
1499 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1502 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1504 commuteShuffle(N1, N2, MaskVec);
1506 // If shuffling a splat, try to blend the splat instead. We do this here so
1507 // that even when this arises during lowering we don't have to re-handle it.
1508 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1509 BitVector UndefElements;
1510 SDValue Splat = BV->getSplatValue(&UndefElements);
1514 for (int i = 0; i < NElts; ++i) {
1515 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1518 // If this input comes from undef, mark it as such.
1519 if (UndefElements[MaskVec[i] - Offset]) {
1524 // If we can blend a non-undef lane, use that instead.
1525 if (!UndefElements[i])
1526 MaskVec[i] = i + Offset;
1529 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1530 BlendSplat(N1BV, 0);
1531 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1532 BlendSplat(N2BV, NElts);
1534 // Canonicalize all index into lhs, -> shuffle lhs, undef
1535 // Canonicalize all index into rhs, -> shuffle rhs, undef
1536 bool AllLHS = true, AllRHS = true;
1537 bool N2Undef = N2.isUndef();
1538 for (int i = 0; i != NElts; ++i) {
1539 if (MaskVec[i] >= NElts) {
1544 } else if (MaskVec[i] >= 0) {
1548 if (AllLHS && AllRHS)
1549 return getUNDEF(VT);
1550 if (AllLHS && !N2Undef)
1554 commuteShuffle(N1, N2, MaskVec);
1556 // Reset our undef status after accounting for the mask.
1557 N2Undef = N2.isUndef();
1558 // Re-check whether both sides ended up undef.
1559 if (N1.isUndef() && N2Undef)
1560 return getUNDEF(VT);
1562 // If Identity shuffle return that node.
1563 bool Identity = true, AllSame = true;
1564 for (int i = 0; i != NElts; ++i) {
1565 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1566 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1568 if (Identity && NElts)
1571 // Shuffling a constant splat doesn't change the result.
1575 // Look through any bitcasts. We check that these don't change the number
1576 // (and size) of elements and just changes their types.
1577 while (V.getOpcode() == ISD::BITCAST)
1578 V = V->getOperand(0);
1580 // A splat should always show up as a build vector node.
1581 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1582 BitVector UndefElements;
1583 SDValue Splat = BV->getSplatValue(&UndefElements);
1584 // If this is a splat of an undef, shuffling it is also undef.
1585 if (Splat && Splat.isUndef())
1586 return getUNDEF(VT);
1589 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1591 // We only have a splat which can skip shuffles if there is a splatted
1592 // value and no undef lanes rearranged by the shuffle.
1593 if (Splat && UndefElements.none()) {
1594 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1595 // number of elements match or the value splatted is a zero constant.
1598 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1599 if (C->isNullValue())
1603 // If the shuffle itself creates a splat, build the vector directly.
1604 if (AllSame && SameNumElts) {
1605 EVT BuildVT = BV->getValueType(0);
1606 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1607 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1609 // We may have jumped through bitcasts, so the type of the
1610 // BUILD_VECTOR may not match the type of the shuffle.
1612 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1618 FoldingSetNodeID ID;
1619 SDValue Ops[2] = { N1, N2 };
1620 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1621 for (int i = 0; i != NElts; ++i)
1622 ID.AddInteger(MaskVec[i]);
1625 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1626 return SDValue(E, 0);
1628 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1629 // SDNode doesn't have access to it. This memory will be "leaked" when
1630 // the node is deallocated, but recovered when the NodeAllocator is released.
1631 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1632 std::copy(MaskVec.begin(), MaskVec.end(), MaskAlloc);
1634 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1635 dl.getDebugLoc(), MaskAlloc);
1636 createOperands(N, Ops);
1638 CSEMap.InsertNode(N, IP);
1640 SDValue V = SDValue(N, 0);
1641 NewSDValueDbgMsg(V, "Creating new node: ", this);
1645 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1646 MVT VT = SV.getSimpleValueType(0);
1647 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1648 ShuffleVectorSDNode::commuteMask(MaskVec);
1650 SDValue Op0 = SV.getOperand(0);
1651 SDValue Op1 = SV.getOperand(1);
1652 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1655 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1656 FoldingSetNodeID ID;
1657 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1658 ID.AddInteger(RegNo);
1660 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1661 return SDValue(E, 0);
1663 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1664 CSEMap.InsertNode(N, IP);
1666 return SDValue(N, 0);
1669 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1670 FoldingSetNodeID ID;
1671 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1672 ID.AddPointer(RegMask);
1674 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1675 return SDValue(E, 0);
1677 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1678 CSEMap.InsertNode(N, IP);
1680 return SDValue(N, 0);
1683 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1685 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1688 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1689 SDValue Root, MCSymbol *Label) {
1690 FoldingSetNodeID ID;
1691 SDValue Ops[] = { Root };
1692 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1693 ID.AddPointer(Label);
1695 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1696 return SDValue(E, 0);
1698 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label);
1699 createOperands(N, Ops);
1701 CSEMap.InsertNode(N, IP);
1703 return SDValue(N, 0);
1706 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1709 unsigned char TargetFlags) {
1710 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1712 FoldingSetNodeID ID;
1713 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1715 ID.AddInteger(Offset);
1716 ID.AddInteger(TargetFlags);
1718 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1719 return SDValue(E, 0);
1721 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1722 CSEMap.InsertNode(N, IP);
1724 return SDValue(N, 0);
1727 SDValue SelectionDAG::getSrcValue(const Value *V) {
1728 assert((!V || V->getType()->isPointerTy()) &&
1729 "SrcValue is not a pointer?");
1731 FoldingSetNodeID ID;
1732 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1736 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1737 return SDValue(E, 0);
1739 auto *N = newSDNode<SrcValueSDNode>(V);
1740 CSEMap.InsertNode(N, IP);
1742 return SDValue(N, 0);
1745 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1746 FoldingSetNodeID ID;
1747 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1751 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1752 return SDValue(E, 0);
1754 auto *N = newSDNode<MDNodeSDNode>(MD);
1755 CSEMap.InsertNode(N, IP);
1757 return SDValue(N, 0);
1760 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1761 if (VT == V.getValueType())
1764 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1767 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1768 unsigned SrcAS, unsigned DestAS) {
1769 SDValue Ops[] = {Ptr};
1770 FoldingSetNodeID ID;
1771 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1772 ID.AddInteger(SrcAS);
1773 ID.AddInteger(DestAS);
1776 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1777 return SDValue(E, 0);
1779 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1781 createOperands(N, Ops);
1783 CSEMap.InsertNode(N, IP);
1785 return SDValue(N, 0);
1788 /// getShiftAmountOperand - Return the specified value casted to
1789 /// the target's desired shift amount type.
1790 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1791 EVT OpTy = Op.getValueType();
1792 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1793 if (OpTy == ShTy || OpTy.isVector()) return Op;
1795 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1798 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1800 const TargetLowering &TLI = getTargetLoweringInfo();
1801 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1802 EVT VT = Node->getValueType(0);
1803 SDValue Tmp1 = Node->getOperand(0);
1804 SDValue Tmp2 = Node->getOperand(1);
1805 unsigned Align = Node->getConstantOperandVal(3);
1807 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1808 Tmp2, MachinePointerInfo(V));
1809 SDValue VAList = VAListLoad;
1811 if (Align > TLI.getMinStackArgumentAlignment()) {
1812 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1814 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1815 getConstant(Align - 1, dl, VAList.getValueType()));
1817 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1818 getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1821 // Increment the pointer, VAList, to the next vaarg
1822 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1823 getConstant(getDataLayout().getTypeAllocSize(
1824 VT.getTypeForEVT(*getContext())),
1825 dl, VAList.getValueType()));
1826 // Store the incremented VAList to the legalized pointer
1828 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1829 // Load the actual argument out of the pointer VAList
1830 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1833 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1835 const TargetLowering &TLI = getTargetLoweringInfo();
1836 // This defaults to loading a pointer from the input and storing it to the
1837 // output, returning the chain.
1838 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1839 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1841 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1842 Node->getOperand(2), MachinePointerInfo(VS));
1843 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1844 MachinePointerInfo(VD));
1847 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1848 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1849 unsigned ByteSize = VT.getStoreSize();
1850 Type *Ty = VT.getTypeForEVT(*getContext());
1851 unsigned StackAlign =
1852 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1854 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1855 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1858 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1859 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1860 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1861 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1862 const DataLayout &DL = getDataLayout();
1864 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1866 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1867 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1868 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1871 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
1872 ISD::CondCode Cond, const SDLoc &dl) {
1873 // These setcc operations always fold.
1877 case ISD::SETFALSE2: return getConstant(0, dl, VT);
1879 case ISD::SETTRUE2: {
1880 TargetLowering::BooleanContent Cnt =
1881 TLI->getBooleanContents(N1->getValueType(0));
1883 Cnt == TargetLowering::ZeroOrNegativeOneBooleanContent ? -1ULL : 1, dl,
1897 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1901 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1902 const APInt &C2 = N2C->getAPIntValue();
1903 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1904 const APInt &C1 = N1C->getAPIntValue();
1907 default: llvm_unreachable("Unknown integer setcc!");
1908 case ISD::SETEQ: return getConstant(C1 == C2, dl, VT);
1909 case ISD::SETNE: return getConstant(C1 != C2, dl, VT);
1910 case ISD::SETULT: return getConstant(C1.ult(C2), dl, VT);
1911 case ISD::SETUGT: return getConstant(C1.ugt(C2), dl, VT);
1912 case ISD::SETULE: return getConstant(C1.ule(C2), dl, VT);
1913 case ISD::SETUGE: return getConstant(C1.uge(C2), dl, VT);
1914 case ISD::SETLT: return getConstant(C1.slt(C2), dl, VT);
1915 case ISD::SETGT: return getConstant(C1.sgt(C2), dl, VT);
1916 case ISD::SETLE: return getConstant(C1.sle(C2), dl, VT);
1917 case ISD::SETGE: return getConstant(C1.sge(C2), dl, VT);
1921 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
1922 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
1923 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1926 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1927 return getUNDEF(VT);
1929 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, dl, VT);
1930 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1931 return getUNDEF(VT);
1933 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1934 R==APFloat::cmpLessThan, dl, VT);
1935 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1936 return getUNDEF(VT);
1938 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, dl, VT);
1939 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1940 return getUNDEF(VT);
1942 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, dl, VT);
1943 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1944 return getUNDEF(VT);
1946 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1947 R==APFloat::cmpEqual, dl, VT);
1948 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1949 return getUNDEF(VT);
1951 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1952 R==APFloat::cmpEqual, dl, VT);
1953 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, dl, VT);
1954 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, dl, VT);
1955 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1956 R==APFloat::cmpEqual, dl, VT);
1957 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, dl, VT);
1958 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1959 R==APFloat::cmpLessThan, dl, VT);
1960 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1961 R==APFloat::cmpUnordered, dl, VT);
1962 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, dl, VT);
1963 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, dl, VT);
1966 // Ensure that the constant occurs on the RHS.
1967 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
1968 MVT CompVT = N1.getValueType().getSimpleVT();
1969 if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
1972 return getSetCC(dl, VT, N2, N1, SwappedCond);
1976 // Could not fold it.
1980 /// See if the specified operand can be simplified with the knowledge that only
1981 /// the bits specified by Mask are used.
1982 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) {
1983 switch (V.getOpcode()) {
1986 case ISD::Constant: {
1987 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
1988 assert(CV && "Const value should be ConstSDNode.");
1989 const APInt &CVal = CV->getAPIntValue();
1990 APInt NewVal = CVal & Mask;
1992 return getConstant(NewVal, SDLoc(V), V.getValueType());
1997 // If the LHS or RHS don't contribute bits to the or, drop them.
1998 if (MaskedValueIsZero(V.getOperand(0), Mask))
1999 return V.getOperand(1);
2000 if (MaskedValueIsZero(V.getOperand(1), Mask))
2001 return V.getOperand(0);
2004 // Only look at single-use SRLs.
2005 if (!V.getNode()->hasOneUse())
2007 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2008 // See if we can recursively simplify the LHS.
2009 unsigned Amt = RHSC->getZExtValue();
2011 // Watch out for shift count overflow though.
2012 if (Amt >= Mask.getBitWidth())
2014 APInt NewMask = Mask << Amt;
2015 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask))
2016 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2021 // X & -1 -> X (ignoring bits which aren't demanded).
2022 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1));
2023 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue()))
2024 return V.getOperand(0);
2027 case ISD::ANY_EXTEND: {
2028 SDValue Src = V.getOperand(0);
2029 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
2030 // Being conservative here - only peek through if we only demand bits in the
2031 // non-extended source (even though the extended bits are technically undef).
2032 if (Mask.getActiveBits() > SrcBitWidth)
2034 APInt SrcMask = Mask.trunc(SrcBitWidth);
2035 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask))
2036 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
2043 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2044 /// use this predicate to simplify operations downstream.
2045 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2046 unsigned BitWidth = Op.getScalarValueSizeInBits();
2047 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2050 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2051 /// this predicate to simplify operations downstream. Mask is known to be zero
2052 /// for bits that V cannot have.
2053 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
2054 unsigned Depth) const {
2056 computeKnownBits(Op, Known, Depth);
2057 return Mask.isSubsetOf(Known.Zero);
2060 /// Helper function that checks to see if a node is a constant or a
2061 /// build vector of splat constants at least within the demanded elts.
2062 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N,
2063 const APInt &DemandedElts) {
2064 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
2066 if (N.getOpcode() != ISD::BUILD_VECTOR)
2068 EVT VT = N.getValueType();
2069 ConstantSDNode *Cst = nullptr;
2070 unsigned NumElts = VT.getVectorNumElements();
2071 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size");
2072 for (unsigned i = 0; i != NumElts; ++i) {
2073 if (!DemandedElts[i])
2075 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i));
2076 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) ||
2077 C->getValueType(0) != VT.getScalarType())
2084 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2085 /// is less than the element bit-width of the shift node, return it.
2086 static const APInt *getValidShiftAmountConstant(SDValue V) {
2087 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
2088 // Shifting more than the bitwidth is not valid.
2089 const APInt &ShAmt = SA->getAPIntValue();
2090 if (ShAmt.ult(V.getScalarValueSizeInBits()))
2096 /// Determine which bits of Op are known to be either zero or one and return
2097 /// them in Known. For vectors, the known bits are those that are shared by
2098 /// every vector element.
2099 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
2100 unsigned Depth) const {
2101 EVT VT = Op.getValueType();
2102 APInt DemandedElts = VT.isVector()
2103 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2105 computeKnownBits(Op, Known, DemandedElts, Depth);
2108 /// Determine which bits of Op are known to be either zero or one and return
2109 /// them in Known. The DemandedElts argument allows us to only collect the known
2110 /// bits that are shared by the requested vector elements.
2111 void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
2112 const APInt &DemandedElts,
2113 unsigned Depth) const {
2114 unsigned BitWidth = Op.getScalarValueSizeInBits();
2116 Known = KnownBits(BitWidth); // Don't know anything.
2118 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2119 // We know all of the bits for a constant!
2120 Known.One = C->getAPIntValue();
2121 Known.Zero = ~Known.One;
2124 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2125 // We know all of the bits for a constant fp!
2126 Known.One = C->getValueAPF().bitcastToAPInt();
2127 Known.Zero = ~Known.One;
2132 return; // Limit search depth.
2135 unsigned NumElts = DemandedElts.getBitWidth();
2138 return; // No demanded elts, better to assume we don't know anything.
2140 unsigned Opcode = Op.getOpcode();
2142 case ISD::BUILD_VECTOR:
2143 // Collect the known bits that are shared by every demanded vector element.
2144 assert(NumElts == Op.getValueType().getVectorNumElements() &&
2145 "Unexpected vector size");
2146 Known.Zero.setAllBits(); Known.One.setAllBits();
2147 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2148 if (!DemandedElts[i])
2151 SDValue SrcOp = Op.getOperand(i);
2152 computeKnownBits(SrcOp, Known2, Depth + 1);
2154 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2155 if (SrcOp.getValueSizeInBits() != BitWidth) {
2156 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2157 "Expected BUILD_VECTOR implicit truncation");
2158 Known2 = Known2.trunc(BitWidth);
2161 // Known bits are the values that are shared by every demanded element.
2162 Known.One &= Known2.One;
2163 Known.Zero &= Known2.Zero;
2165 // If we don't know any bits, early out.
2166 if (Known.isUnknown())
2170 case ISD::VECTOR_SHUFFLE: {
2171 // Collect the known bits that are shared by every vector element referenced
2173 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2174 Known.Zero.setAllBits(); Known.One.setAllBits();
2175 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2176 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2177 for (unsigned i = 0; i != NumElts; ++i) {
2178 if (!DemandedElts[i])
2181 int M = SVN->getMaskElt(i);
2183 // For UNDEF elements, we don't know anything about the common state of
2184 // the shuffle result.
2186 DemandedLHS.clearAllBits();
2187 DemandedRHS.clearAllBits();
2191 if ((unsigned)M < NumElts)
2192 DemandedLHS.setBit((unsigned)M % NumElts);
2194 DemandedRHS.setBit((unsigned)M % NumElts);
2196 // Known bits are the values that are shared by every demanded element.
2197 if (!!DemandedLHS) {
2198 SDValue LHS = Op.getOperand(0);
2199 computeKnownBits(LHS, Known2, DemandedLHS, Depth + 1);
2200 Known.One &= Known2.One;
2201 Known.Zero &= Known2.Zero;
2203 // If we don't know any bits, early out.
2204 if (Known.isUnknown())
2206 if (!!DemandedRHS) {
2207 SDValue RHS = Op.getOperand(1);
2208 computeKnownBits(RHS, Known2, DemandedRHS, Depth + 1);
2209 Known.One &= Known2.One;
2210 Known.Zero &= Known2.Zero;
2214 case ISD::CONCAT_VECTORS: {
2215 // Split DemandedElts and test each of the demanded subvectors.
2216 Known.Zero.setAllBits(); Known.One.setAllBits();
2217 EVT SubVectorVT = Op.getOperand(0).getValueType();
2218 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2219 unsigned NumSubVectors = Op.getNumOperands();
2220 for (unsigned i = 0; i != NumSubVectors; ++i) {
2221 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2222 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2223 if (!!DemandedSub) {
2224 SDValue Sub = Op.getOperand(i);
2225 computeKnownBits(Sub, Known2, DemandedSub, Depth + 1);
2226 Known.One &= Known2.One;
2227 Known.Zero &= Known2.Zero;
2229 // If we don't know any bits, early out.
2230 if (Known.isUnknown())
2235 case ISD::INSERT_SUBVECTOR: {
2236 // If we know the element index, demand any elements from the subvector and
2237 // the remainder from the src its inserted into, otherwise demand them all.
2238 SDValue Src = Op.getOperand(0);
2239 SDValue Sub = Op.getOperand(1);
2240 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2241 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2242 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
2243 Known.One.setAllBits();
2244 Known.Zero.setAllBits();
2245 uint64_t Idx = SubIdx->getZExtValue();
2246 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2247 if (!!DemandedSubElts) {
2248 computeKnownBits(Sub, Known, DemandedSubElts, Depth + 1);
2249 if (Known.isUnknown())
2250 break; // early-out.
2252 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
2253 APInt DemandedSrcElts = DemandedElts & ~SubMask;
2254 if (!!DemandedSrcElts) {
2255 computeKnownBits(Src, Known2, DemandedSrcElts, Depth + 1);
2256 Known.One &= Known2.One;
2257 Known.Zero &= Known2.Zero;
2260 computeKnownBits(Sub, Known, Depth + 1);
2261 if (Known.isUnknown())
2262 break; // early-out.
2263 computeKnownBits(Src, Known2, Depth + 1);
2264 Known.One &= Known2.One;
2265 Known.Zero &= Known2.Zero;
2269 case ISD::EXTRACT_SUBVECTOR: {
2270 // If we know the element index, just demand that subvector elements,
2271 // otherwise demand them all.
2272 SDValue Src = Op.getOperand(0);
2273 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2274 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2275 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2276 // Offset the demanded elts by the subvector index.
2277 uint64_t Idx = SubIdx->getZExtValue();
2278 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
2279 computeKnownBits(Src, Known, DemandedSrc, Depth + 1);
2281 computeKnownBits(Src, Known, Depth + 1);
2285 case ISD::BITCAST: {
2286 SDValue N0 = Op.getOperand(0);
2287 EVT SubVT = N0.getValueType();
2288 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2290 // Ignore bitcasts from unsupported types.
2291 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2294 // Fast handling of 'identity' bitcasts.
2295 if (BitWidth == SubBitWidth) {
2296 computeKnownBits(N0, Known, DemandedElts, Depth + 1);
2300 // Support big-endian targets when it becomes useful.
2301 bool IsLE = getDataLayout().isLittleEndian();
2305 // Bitcast 'small element' vector to 'large element' scalar/vector.
2306 if ((BitWidth % SubBitWidth) == 0) {
2307 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2309 // Collect known bits for the (larger) output by collecting the known
2310 // bits from each set of sub elements and shift these into place.
2311 // We need to separately call computeKnownBits for each set of
2312 // sub elements as the knownbits for each is likely to be different.
2313 unsigned SubScale = BitWidth / SubBitWidth;
2314 APInt SubDemandedElts(NumElts * SubScale, 0);
2315 for (unsigned i = 0; i != NumElts; ++i)
2316 if (DemandedElts[i])
2317 SubDemandedElts.setBit(i * SubScale);
2319 for (unsigned i = 0; i != SubScale; ++i) {
2320 computeKnownBits(N0, Known2, SubDemandedElts.shl(i),
2322 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * i);
2323 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * i);
2327 // Bitcast 'large element' scalar/vector to 'small element' vector.
2328 if ((SubBitWidth % BitWidth) == 0) {
2329 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2331 // Collect known bits for the (smaller) output by collecting the known
2332 // bits from the overlapping larger input elements and extracting the
2333 // sub sections we actually care about.
2334 unsigned SubScale = SubBitWidth / BitWidth;
2335 APInt SubDemandedElts(NumElts / SubScale, 0);
2336 for (unsigned i = 0; i != NumElts; ++i)
2337 if (DemandedElts[i])
2338 SubDemandedElts.setBit(i / SubScale);
2340 computeKnownBits(N0, Known2, SubDemandedElts, Depth + 1);
2342 Known.Zero.setAllBits(); Known.One.setAllBits();
2343 for (unsigned i = 0; i != NumElts; ++i)
2344 if (DemandedElts[i]) {
2345 unsigned Offset = (i % SubScale) * BitWidth;
2346 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2347 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2348 // If we don't know any bits, early out.
2349 if (Known.isUnknown())
2356 // If either the LHS or the RHS are Zero, the result is zero.
2357 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1);
2358 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2360 // Output known-1 bits are only known if set in both the LHS & RHS.
2361 Known.One &= Known2.One;
2362 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2363 Known.Zero |= Known2.Zero;
2366 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1);
2367 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2369 // Output known-0 bits are only known if clear in both the LHS & RHS.
2370 Known.Zero &= Known2.Zero;
2371 // Output known-1 are known to be set if set in either the LHS | RHS.
2372 Known.One |= Known2.One;
2375 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1);
2376 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2378 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2379 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
2380 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2381 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
2382 Known.Zero = KnownZeroOut;
2386 computeKnownBits(Op.getOperand(1), Known, DemandedElts, Depth + 1);
2387 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2389 // If low bits are zero in either operand, output low known-0 bits.
2390 // Also compute a conservative estimate for high known-0 bits.
2391 // More trickiness is possible, but this is sufficient for the
2392 // interesting case of alignment computation.
2393 unsigned TrailZ = Known.countMinTrailingZeros() +
2394 Known2.countMinTrailingZeros();
2395 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
2396 Known2.countMinLeadingZeros(),
2397 BitWidth) - BitWidth;
2400 Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
2401 Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
2405 // For the purposes of computing leading zeros we can conservatively
2406 // treat a udiv as a logical right shift by the power of 2 known to
2407 // be less than the denominator.
2408 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2409 unsigned LeadZ = Known2.countMinLeadingZeros();
2411 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1);
2412 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
2413 if (RHSMaxLeadingZeros != BitWidth)
2414 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
2416 Known.Zero.setHighBits(LeadZ);
2421 computeKnownBits(Op.getOperand(2), Known, DemandedElts, Depth+1);
2422 // If we don't know any bits, early out.
2423 if (Known.isUnknown())
2425 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth+1);
2427 // Only known if known in both the LHS and RHS.
2428 Known.One &= Known2.One;
2429 Known.Zero &= Known2.Zero;
2431 case ISD::SELECT_CC:
2432 computeKnownBits(Op.getOperand(3), Known, DemandedElts, Depth+1);
2433 // If we don't know any bits, early out.
2434 if (Known.isUnknown())
2436 computeKnownBits(Op.getOperand(2), Known2, DemandedElts, Depth+1);
2438 // Only known if known in both the LHS and RHS.
2439 Known.One &= Known2.One;
2440 Known.Zero &= Known2.Zero;
2444 if (Op.getResNo() != 1)
2446 // The boolean result conforms to getBooleanContents.
2447 // If we know the result of a setcc has the top bits zero, use this info.
2448 // We know that we have an integer-based boolean since these operations
2449 // are only available for integer.
2450 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2451 TargetLowering::ZeroOrOneBooleanContent &&
2453 Known.Zero.setBitsFrom(1);
2456 // If we know the result of a setcc has the top bits zero, use this info.
2457 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2458 TargetLowering::ZeroOrOneBooleanContent &&
2460 Known.Zero.setBitsFrom(1);
2463 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2464 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2465 unsigned Shift = ShAmt->getZExtValue();
2466 Known.Zero <<= Shift;
2467 Known.One <<= Shift;
2468 // Low bits are known zero.
2469 Known.Zero.setLowBits(Shift);
2473 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2474 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2475 unsigned Shift = ShAmt->getZExtValue();
2476 Known.Zero.lshrInPlace(Shift);
2477 Known.One.lshrInPlace(Shift);
2478 // High bits are known zero.
2479 Known.Zero.setHighBits(Shift);
2480 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) {
2481 // If the shift amount is a vector of constants see if we can bound
2482 // the number of upper zero bits.
2483 unsigned ShiftAmountMin = BitWidth;
2484 for (unsigned i = 0; i != BV->getNumOperands(); ++i) {
2485 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) {
2486 const APInt &ShAmt = C->getAPIntValue();
2487 if (ShAmt.ult(BitWidth)) {
2488 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin,
2489 ShAmt.getZExtValue());
2493 // Don't know anything.
2498 Known.Zero.setHighBits(ShiftAmountMin);
2502 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2503 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2504 unsigned Shift = ShAmt->getZExtValue();
2505 // Sign extend known zero/one bit (else is unknown).
2506 Known.Zero.ashrInPlace(Shift);
2507 Known.One.ashrInPlace(Shift);
2510 case ISD::SIGN_EXTEND_INREG: {
2511 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2512 unsigned EBits = EVT.getScalarSizeInBits();
2514 // Sign extension. Compute the demanded bits in the result that are not
2515 // present in the input.
2516 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2518 APInt InSignMask = APInt::getSignMask(EBits);
2519 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2521 // If the sign extended bits are demanded, we know that the sign
2523 InSignMask = InSignMask.zext(BitWidth);
2524 if (NewBits.getBoolValue())
2525 InputDemandedBits |= InSignMask;
2527 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2528 Known.One &= InputDemandedBits;
2529 Known.Zero &= InputDemandedBits;
2531 // If the sign bit of the input is known set or clear, then we know the
2532 // top bits of the result.
2533 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear
2534 Known.Zero |= NewBits;
2535 Known.One &= ~NewBits;
2536 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set
2537 Known.One |= NewBits;
2538 Known.Zero &= ~NewBits;
2539 } else { // Input sign bit unknown
2540 Known.Zero &= ~NewBits;
2541 Known.One &= ~NewBits;
2546 case ISD::CTTZ_ZERO_UNDEF: {
2547 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2548 // If we have a known 1, its position is our upper bound.
2549 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2550 unsigned LowBits = Log2_32(PossibleTZ) + 1;
2551 Known.Zero.setBitsFrom(LowBits);
2555 case ISD::CTLZ_ZERO_UNDEF: {
2556 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2557 // If we have a known 1, its position is our upper bound.
2558 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
2559 unsigned LowBits = Log2_32(PossibleLZ) + 1;
2560 Known.Zero.setBitsFrom(LowBits);
2564 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2565 // If we know some of the bits are zero, they can't be one.
2566 unsigned PossibleOnes = Known2.countMaxPopulation();
2567 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
2571 LoadSDNode *LD = cast<LoadSDNode>(Op);
2572 // If this is a ZEXTLoad and we are looking at the loaded value.
2573 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2574 EVT VT = LD->getMemoryVT();
2575 unsigned MemBits = VT.getScalarSizeInBits();
2576 Known.Zero.setBitsFrom(MemBits);
2577 } else if (const MDNode *Ranges = LD->getRanges()) {
2578 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2579 computeKnownBitsFromRangeMetadata(*Ranges, Known);
2583 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2584 EVT InVT = Op.getOperand(0).getValueType();
2585 APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements());
2586 computeKnownBits(Op.getOperand(0), Known, InDemandedElts, Depth + 1);
2587 Known = Known.zext(BitWidth);
2588 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
2591 case ISD::ZERO_EXTEND: {
2592 EVT InVT = Op.getOperand(0).getValueType();
2593 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2594 Known = Known.zext(BitWidth);
2595 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
2598 // TODO ISD::SIGN_EXTEND_VECTOR_INREG
2599 case ISD::SIGN_EXTEND: {
2600 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2601 // If the sign bit is known to be zero or one, then sext will extend
2602 // it to the top bits, else it will just zext.
2603 Known = Known.sext(BitWidth);
2606 case ISD::ANY_EXTEND: {
2607 computeKnownBits(Op.getOperand(0), Known, Depth+1);
2608 Known = Known.zext(BitWidth);
2611 case ISD::TRUNCATE: {
2612 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2613 Known = Known.trunc(BitWidth);
2616 case ISD::AssertZext: {
2617 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2618 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2619 computeKnownBits(Op.getOperand(0), Known, Depth+1);
2620 Known.Zero |= (~InMask);
2621 Known.One &= (~Known.Zero);
2625 // All bits are zero except the low bit.
2626 Known.Zero.setBitsFrom(1);
2630 if (Op.getResNo() == 1) {
2631 // If we know the result of a setcc has the top bits zero, use this info.
2632 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2633 TargetLowering::ZeroOrOneBooleanContent &&
2635 Known.Zero.setBitsFrom(1);
2641 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) {
2642 // We know that the top bits of C-X are clear if X contains less bits
2643 // than C (i.e. no wrap-around can happen). For example, 20-X is
2644 // positive if we can prove that X is >= 0 and < 16.
2645 if (CLHS->getAPIntValue().isNonNegative()) {
2646 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2647 // NLZ can't be BitWidth with no sign bit
2648 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2649 computeKnownBits(Op.getOperand(1), Known2, DemandedElts,
2652 // If all of the MaskV bits are known to be zero, then we know the
2653 // output top bits are zero, because we now know that the output is
2655 if ((Known2.Zero & MaskV) == MaskV) {
2656 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2657 // Top bits known zero.
2658 Known.Zero.setHighBits(NLZ2);
2663 // If low bits are know to be zero in both operands, then we know they are
2664 // going to be 0 in the result. Both addition and complement operations
2665 // preserve the low zero bits.
2666 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2667 unsigned KnownZeroLow = Known2.countMinTrailingZeros();
2668 if (KnownZeroLow == 0)
2671 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1);
2672 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
2673 Known.Zero.setLowBits(KnownZeroLow);
2679 if (Op.getResNo() == 1) {
2680 // If we know the result of a setcc has the top bits zero, use this info.
2681 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2682 TargetLowering::ZeroOrOneBooleanContent &&
2684 Known.Zero.setBitsFrom(1);
2691 // Output known-0 bits are known if clear or set in both the low clear bits
2692 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2693 // low 3 bits clear.
2694 // Output known-0 bits are also known if the top bits of each input are
2695 // known to be clear. For example, if one input has the top 10 bits clear
2696 // and the other has the top 8 bits clear, we know the top 7 bits of the
2697 // output must be clear.
2698 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2699 unsigned KnownZeroHigh = Known2.countMinLeadingZeros();
2700 unsigned KnownZeroLow = Known2.countMinTrailingZeros();
2702 computeKnownBits(Op.getOperand(1), Known2, DemandedElts,
2704 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros());
2705 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
2707 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) {
2708 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only
2709 // use this information if we know (at least) that the low two bits are
2710 // clear. We then return to the caller that the low bit is unknown but
2711 // that other bits are known zero.
2712 if (KnownZeroLow >= 2)
2713 Known.Zero.setBits(1, KnownZeroLow);
2717 Known.Zero.setLowBits(KnownZeroLow);
2718 if (KnownZeroHigh > 1)
2719 Known.Zero.setHighBits(KnownZeroHigh - 1);
2723 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2724 const APInt &RA = Rem->getAPIntValue().abs();
2725 if (RA.isPowerOf2()) {
2726 APInt LowBits = RA - 1;
2727 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2729 // The low bits of the first operand are unchanged by the srem.
2730 Known.Zero = Known2.Zero & LowBits;
2731 Known.One = Known2.One & LowBits;
2733 // If the first operand is non-negative or has all low bits zero, then
2734 // the upper bits are all zero.
2735 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits))
2736 Known.Zero |= ~LowBits;
2738 // If the first operand is negative and not all low bits are zero, then
2739 // the upper bits are all one.
2740 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0))
2741 Known.One |= ~LowBits;
2742 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?");
2747 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2748 const APInt &RA = Rem->getAPIntValue();
2749 if (RA.isPowerOf2()) {
2750 APInt LowBits = (RA - 1);
2751 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2753 // The upper bits are all zero, the lower ones are unchanged.
2754 Known.Zero = Known2.Zero | ~LowBits;
2755 Known.One = Known2.One & LowBits;
2760 // Since the result is less than or equal to either operand, any leading
2761 // zero bits in either operand must also exist in the result.
2762 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2763 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1);
2766 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
2768 Known.Zero.setHighBits(Leaders);
2771 case ISD::EXTRACT_ELEMENT: {
2772 computeKnownBits(Op.getOperand(0), Known, Depth+1);
2773 const unsigned Index = Op.getConstantOperandVal(1);
2774 const unsigned BitWidth = Op.getValueSizeInBits();
2776 // Remove low part of known bits mask
2777 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth);
2778 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth);
2780 // Remove high part of known bit mask
2781 Known = Known.trunc(BitWidth);
2784 case ISD::EXTRACT_VECTOR_ELT: {
2785 SDValue InVec = Op.getOperand(0);
2786 SDValue EltNo = Op.getOperand(1);
2787 EVT VecVT = InVec.getValueType();
2788 const unsigned BitWidth = Op.getValueSizeInBits();
2789 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
2790 const unsigned NumSrcElts = VecVT.getVectorNumElements();
2791 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
2792 // anything about the extended bits.
2793 if (BitWidth > EltBitWidth)
2794 Known = Known.trunc(EltBitWidth);
2795 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
2796 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
2797 // If we know the element index, just demand that vector element.
2798 unsigned Idx = ConstEltNo->getZExtValue();
2799 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
2800 computeKnownBits(InVec, Known, DemandedElt, Depth + 1);
2802 // Unknown element index, so ignore DemandedElts and demand them all.
2803 computeKnownBits(InVec, Known, Depth + 1);
2805 if (BitWidth > EltBitWidth)
2806 Known = Known.zext(BitWidth);
2809 case ISD::INSERT_VECTOR_ELT: {
2810 SDValue InVec = Op.getOperand(0);
2811 SDValue InVal = Op.getOperand(1);
2812 SDValue EltNo = Op.getOperand(2);
2814 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
2815 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
2816 // If we know the element index, split the demand between the
2817 // source vector and the inserted element.
2818 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth);
2819 unsigned EltIdx = CEltNo->getZExtValue();
2821 // If we demand the inserted element then add its common known bits.
2822 if (DemandedElts[EltIdx]) {
2823 computeKnownBits(InVal, Known2, Depth + 1);
2824 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
2825 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
2828 // If we demand the source vector then add its common known bits, ensuring
2829 // that we don't demand the inserted element.
2830 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
2832 computeKnownBits(InVec, Known2, VectorElts, Depth + 1);
2833 Known.One &= Known2.One;
2834 Known.Zero &= Known2.Zero;
2837 // Unknown element index, so ignore DemandedElts and demand them all.
2838 computeKnownBits(InVec, Known, Depth + 1);
2839 computeKnownBits(InVal, Known2, Depth + 1);
2840 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
2841 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
2845 case ISD::BITREVERSE: {
2846 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2847 Known.Zero = Known2.Zero.reverseBits();
2848 Known.One = Known2.One.reverseBits();
2852 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2853 Known.Zero = Known2.Zero.byteSwap();
2854 Known.One = Known2.One.byteSwap();
2858 computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
2860 // If the source's MSB is zero then we know the rest of the bits already.
2861 if (Known2.isNonNegative()) {
2862 Known.Zero = Known2.Zero;
2863 Known.One = Known2.One;
2867 // We only know that the absolute values's MSB will be zero iff there is
2868 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
2869 Known2.One.clearSignBit();
2870 if (Known2.One.getBoolValue()) {
2871 Known.Zero = APInt::getSignMask(BitWidth);
2877 computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
2878 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1);
2880 // UMIN - we know that the result will have the maximum of the
2881 // known zero leading bits of the inputs.
2882 unsigned LeadZero = Known.countMinLeadingZeros();
2883 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros());
2885 Known.Zero &= Known2.Zero;
2886 Known.One &= Known2.One;
2887 Known.Zero.setHighBits(LeadZero);
2891 computeKnownBits(Op.getOperand(0), Known, DemandedElts,
2893 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1);
2895 // UMAX - we know that the result will have the maximum of the
2896 // known one leading bits of the inputs.
2897 unsigned LeadOne = Known.countMinLeadingOnes();
2898 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes());
2900 Known.Zero &= Known2.Zero;
2901 Known.One &= Known2.One;
2902 Known.One.setHighBits(LeadOne);
2907 computeKnownBits(Op.getOperand(0), Known, DemandedElts,
2909 // If we don't know any bits, early out.
2910 if (Known.isUnknown())
2912 computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth + 1);
2913 Known.Zero &= Known2.Zero;
2914 Known.One &= Known2.One;
2917 case ISD::FrameIndex:
2918 case ISD::TargetFrameIndex:
2919 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth);
2923 if (Opcode < ISD::BUILTIN_OP_END)
2926 case ISD::INTRINSIC_WO_CHAIN:
2927 case ISD::INTRINSIC_W_CHAIN:
2928 case ISD::INTRINSIC_VOID:
2929 // Allow the target to implement this method for its nodes.
2930 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
2934 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
2937 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
2939 // X + 0 never overflow
2940 if (isNullConstant(N1))
2944 computeKnownBits(N1, N1Known);
2945 if (N1Known.Zero.getBoolValue()) {
2947 computeKnownBits(N0, N0Known);
2950 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow);
2955 // mulhi + 1 never overflow
2956 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
2957 (~N1Known.Zero & 0x01) == ~N1Known.Zero)
2960 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
2962 computeKnownBits(N0, N0Known);
2964 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero)
2968 return OFK_Sometime;
2971 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
2972 EVT OpVT = Val.getValueType();
2973 unsigned BitWidth = OpVT.getScalarSizeInBits();
2975 // Is the constant a known power of 2?
2976 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
2977 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
2979 // A left-shift of a constant one will have exactly one bit set because
2980 // shifting the bit off the end is undefined.
2981 if (Val.getOpcode() == ISD::SHL) {
2982 auto *C = isConstOrConstSplat(Val.getOperand(0));
2983 if (C && C->getAPIntValue() == 1)
2987 // Similarly, a logical right-shift of a constant sign-bit will have exactly
2989 if (Val.getOpcode() == ISD::SRL) {
2990 auto *C = isConstOrConstSplat(Val.getOperand(0));
2991 if (C && C->getAPIntValue().isSignMask())
2995 // Are all operands of a build vector constant powers of two?
2996 if (Val.getOpcode() == ISD::BUILD_VECTOR)
2997 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
2998 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
2999 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3004 // More could be done here, though the above checks are enough
3005 // to handle some common cases.
3007 // Fall back to computeKnownBits to catch other known cases.
3009 computeKnownBits(Val, Known);
3010 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3013 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3014 EVT VT = Op.getValueType();
3015 APInt DemandedElts = VT.isVector()
3016 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3018 return ComputeNumSignBits(Op, DemandedElts, Depth);
3021 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3022 unsigned Depth) const {
3023 EVT VT = Op.getValueType();
3024 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3025 unsigned VTBits = VT.getScalarSizeInBits();
3026 unsigned NumElts = DemandedElts.getBitWidth();
3028 unsigned FirstAnswer = 1;
3030 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3031 const APInt &Val = C->getAPIntValue();
3032 return Val.getNumSignBits();
3036 return 1; // Limit search depth.
3039 return 1; // No demanded elts, better to assume we don't know anything.
3041 switch (Op.getOpcode()) {
3043 case ISD::AssertSext:
3044 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3045 return VTBits-Tmp+1;
3046 case ISD::AssertZext:
3047 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3050 case ISD::BUILD_VECTOR:
3052 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3053 if (!DemandedElts[i])
3056 SDValue SrcOp = Op.getOperand(i);
3057 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1);
3059 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3060 if (SrcOp.getValueSizeInBits() != VTBits) {
3061 assert(SrcOp.getValueSizeInBits() > VTBits &&
3062 "Expected BUILD_VECTOR implicit truncation");
3063 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3064 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3066 Tmp = std::min(Tmp, Tmp2);
3070 case ISD::VECTOR_SHUFFLE: {
3071 // Collect the minimum number of sign bits that are shared by every vector
3072 // element referenced by the shuffle.
3073 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3074 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3075 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3076 for (unsigned i = 0; i != NumElts; ++i) {
3077 int M = SVN->getMaskElt(i);
3078 if (!DemandedElts[i])
3080 // For UNDEF elements, we don't know anything about the common state of
3081 // the shuffle result.
3084 if ((unsigned)M < NumElts)
3085 DemandedLHS.setBit((unsigned)M % NumElts);
3087 DemandedRHS.setBit((unsigned)M % NumElts);
3089 Tmp = std::numeric_limits<unsigned>::max();
3091 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3092 if (!!DemandedRHS) {
3093 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3094 Tmp = std::min(Tmp, Tmp2);
3096 // If we don't know anything, early out and try computeKnownBits fall-back.
3099 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3103 case ISD::BITCAST: {
3104 SDValue N0 = Op.getOperand(0);
3105 EVT SrcVT = N0.getValueType();
3106 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3108 // Ignore bitcasts from unsupported types..
3109 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3112 // Fast handling of 'identity' bitcasts.
3113 if (VTBits == SrcBits)
3114 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3116 // Bitcast 'large element' scalar/vector to 'small element' vector.
3117 // TODO: Handle cases other than 'sign splat' when we have a use case.
3118 // Requires handling of DemandedElts and Endianness.
3119 if ((SrcBits % VTBits) == 0) {
3120 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
3121 Tmp = ComputeNumSignBits(N0, Depth + 1);
3128 case ISD::SIGN_EXTEND:
3129 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3130 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3131 case ISD::SIGN_EXTEND_INREG:
3132 // Max of the input and what this extends.
3133 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3135 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3136 return std::max(Tmp, Tmp2);
3137 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3138 SDValue Src = Op.getOperand(0);
3139 EVT SrcVT = Src.getValueType();
3140 APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements());
3141 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3142 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3146 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3147 // SRA X, C -> adds C sign bits.
3148 if (ConstantSDNode *C =
3149 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
3150 APInt ShiftVal = C->getAPIntValue();
3152 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
3156 if (ConstantSDNode *C =
3157 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
3158 // shl destroys sign bits.
3159 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3160 if (C->getAPIntValue().uge(VTBits) || // Bad shift.
3161 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
3162 return Tmp - C->getZExtValue();
3167 case ISD::XOR: // NOT is handled here.
3168 // Logical binary ops preserve the number of sign bits at the worst.
3169 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3171 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3172 FirstAnswer = std::min(Tmp, Tmp2);
3173 // We computed what we know about the sign bits as our first
3174 // answer. Now proceed to the generic code that uses
3175 // computeKnownBits, and pick whichever answer is better.
3181 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3182 if (Tmp == 1) return 1; // Early out.
3183 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3184 return std::min(Tmp, Tmp2);
3185 case ISD::SELECT_CC:
3186 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3187 if (Tmp == 1) return 1; // Early out.
3188 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3189 return std::min(Tmp, Tmp2);
3195 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3197 return 1; // Early out.
3198 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3199 return std::min(Tmp, Tmp2);
3206 if (Op.getResNo() != 1)
3208 // The boolean result conforms to getBooleanContents. Fall through.
3209 // If setcc returns 0/-1, all bits are sign bits.
3210 // We know that we have an integer-based boolean since these operations
3211 // are only available for integer.
3212 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3213 TargetLowering::ZeroOrNegativeOneBooleanContent)
3217 // If setcc returns 0/-1, all bits are sign bits.
3218 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3219 TargetLowering::ZeroOrNegativeOneBooleanContent)
3224 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3225 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3227 // Handle rotate right by N like a rotate left by 32-N.
3228 if (Op.getOpcode() == ISD::ROTR)
3229 RotAmt = (VTBits - RotAmt) % VTBits;
3231 // If we aren't rotating out all of the known-in sign bits, return the
3232 // number that are left. This handles rotl(sext(x), 1) for example.
3233 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3234 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3239 // Add can have at most one carry bit. Thus we know that the output
3240 // is, at worst, one more bit than the inputs.
3241 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3242 if (Tmp == 1) return 1; // Early out.
3244 // Special case decrementing a value (ADD X, -1):
3245 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3246 if (CRHS->isAllOnesValue()) {
3248 computeKnownBits(Op.getOperand(0), Known, Depth+1);
3250 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3252 if ((Known.Zero | 1).isAllOnesValue())
3255 // If we are subtracting one from a positive number, there is no carry
3256 // out of the result.
3257 if (Known.isNonNegative())
3261 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3262 if (Tmp2 == 1) return 1;
3263 return std::min(Tmp, Tmp2)-1;
3266 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3267 if (Tmp2 == 1) return 1;
3270 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
3271 if (CLHS->isNullValue()) {
3273 computeKnownBits(Op.getOperand(1), Known, Depth+1);
3274 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3276 if ((Known.Zero | 1).isAllOnesValue())
3279 // If the input is known to be positive (the sign bit is known clear),
3280 // the output of the NEG has the same number of sign bits as the input.
3281 if (Known.isNonNegative())
3284 // Otherwise, we treat this like a SUB.
3287 // Sub can have at most one carry bit. Thus we know that the output
3288 // is, at worst, one more bit than the inputs.
3289 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3290 if (Tmp == 1) return 1; // Early out.
3291 return std::min(Tmp, Tmp2)-1;
3292 case ISD::TRUNCATE: {
3293 // Check if the sign bits of source go down as far as the truncated value.
3294 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3295 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3296 if (NumSrcSignBits > (NumSrcBits - VTBits))
3297 return NumSrcSignBits - (NumSrcBits - VTBits);
3300 case ISD::EXTRACT_ELEMENT: {
3301 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3302 const int BitWidth = Op.getValueSizeInBits();
3303 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3305 // Get reverse index (starting from 1), Op1 value indexes elements from
3306 // little end. Sign starts at big end.
3307 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3309 // If the sign portion ends in our element the subtraction gives correct
3310 // result. Otherwise it gives either negative or > bitwidth result
3311 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3313 case ISD::INSERT_VECTOR_ELT: {
3314 SDValue InVec = Op.getOperand(0);
3315 SDValue InVal = Op.getOperand(1);
3316 SDValue EltNo = Op.getOperand(2);
3317 unsigned NumElts = InVec.getValueType().getVectorNumElements();
3319 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3320 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3321 // If we know the element index, split the demand between the
3322 // source vector and the inserted element.
3323 unsigned EltIdx = CEltNo->getZExtValue();
3325 // If we demand the inserted element then get its sign bits.
3326 Tmp = std::numeric_limits<unsigned>::max();
3327 if (DemandedElts[EltIdx]) {
3328 // TODO - handle implicit truncation of inserted elements.
3329 if (InVal.getScalarValueSizeInBits() != VTBits)
3331 Tmp = ComputeNumSignBits(InVal, Depth + 1);
3334 // If we demand the source vector then get its sign bits, and determine
3336 APInt VectorElts = DemandedElts;
3337 VectorElts.clearBit(EltIdx);
3339 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
3340 Tmp = std::min(Tmp, Tmp2);
3343 // Unknown element index, so ignore DemandedElts and demand them all.
3344 Tmp = ComputeNumSignBits(InVec, Depth + 1);
3345 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3346 Tmp = std::min(Tmp, Tmp2);
3348 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3351 case ISD::EXTRACT_VECTOR_ELT: {
3352 SDValue InVec = Op.getOperand(0);
3353 SDValue EltNo = Op.getOperand(1);
3354 EVT VecVT = InVec.getValueType();
3355 const unsigned BitWidth = Op.getValueSizeInBits();
3356 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3357 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3359 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3360 // anything about sign bits. But if the sizes match we can derive knowledge
3361 // about sign bits from the vector operand.
3362 if (BitWidth != EltBitWidth)
3365 // If we know the element index, just demand that vector element, else for
3366 // an unknown element index, ignore DemandedElts and demand them all.
3367 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3368 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3369 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3371 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3373 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3375 case ISD::EXTRACT_SUBVECTOR: {
3376 // If we know the element index, just demand that subvector elements,
3377 // otherwise demand them all.
3378 SDValue Src = Op.getOperand(0);
3379 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3380 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3381 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
3382 // Offset the demanded elts by the subvector index.
3383 uint64_t Idx = SubIdx->getZExtValue();
3384 APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
3385 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
3387 return ComputeNumSignBits(Src, Depth + 1);
3389 case ISD::CONCAT_VECTORS:
3390 // Determine the minimum number of sign bits across all demanded
3391 // elts of the input vectors. Early out if the result is already 1.
3392 Tmp = std::numeric_limits<unsigned>::max();
3393 EVT SubVectorVT = Op.getOperand(0).getValueType();
3394 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3395 unsigned NumSubVectors = Op.getNumOperands();
3396 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3397 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3398 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3401 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3402 Tmp = std::min(Tmp, Tmp2);
3404 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3408 // If we are looking at the loaded value of the SDNode.
3409 if (Op.getResNo() == 0) {
3410 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3411 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3412 unsigned ExtType = LD->getExtensionType();
3415 case ISD::SEXTLOAD: // '17' bits known
3416 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3417 return VTBits-Tmp+1;
3418 case ISD::ZEXTLOAD: // '16' bits known
3419 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3425 // Allow the target to implement this method for its nodes.
3426 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
3427 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3428 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
3429 Op.getOpcode() == ISD::INTRINSIC_VOID) {
3431 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
3433 FirstAnswer = std::max(FirstAnswer, NumBits);
3436 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3437 // use this information.
3439 computeKnownBits(Op, Known, DemandedElts, Depth);
3442 if (Known.isNonNegative()) { // sign bit is 0
3444 } else if (Known.isNegative()) { // sign bit is 1;
3451 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3452 // the number of identical bits in the top of the input value.
3454 Mask <<= Mask.getBitWidth()-VTBits;
3455 // Return # leading zeros. We use 'min' here in case Val was zero before
3456 // shifting. We don't want to return '64' as for an i32 "0".
3457 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3460 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
3461 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3462 !isa<ConstantSDNode>(Op.getOperand(1)))
3465 if (Op.getOpcode() == ISD::OR &&
3466 !MaskedValueIsZero(Op.getOperand(0),
3467 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
3473 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
3474 // If we're told that NaNs won't happen, assume they won't.
3475 if (getTarget().Options.NoNaNsFPMath)
3478 if (Op->getFlags().hasNoNaNs())
3481 // If the value is a constant, we can obviously see if it is a NaN or not.
3482 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3483 return !C->getValueAPF().isNaN();
3485 // TODO: Recognize more cases here.
3490 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
3491 // If the value is a constant, we can obviously see if it is a zero or not.
3492 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3493 return !C->isZero();
3495 // TODO: Recognize more cases here.
3496 switch (Op.getOpcode()) {
3499 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3500 return !C->isNullValue();
3507 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
3508 // Check the obvious case.
3509 if (A == B) return true;
3511 // For for negative and positive zero.
3512 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
3513 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
3514 if (CA->isZero() && CB->isZero()) return true;
3516 // Otherwise they may not be equal.
3520 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
3521 assert(A.getValueType() == B.getValueType() &&
3522 "Values must have the same type");
3523 KnownBits AKnown, BKnown;
3524 computeKnownBits(A, AKnown);
3525 computeKnownBits(B, BKnown);
3526 return (AKnown.Zero | BKnown.Zero).isAllOnesValue();
3529 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
3530 ArrayRef<SDValue> Ops,
3531 SelectionDAG &DAG) {
3532 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
3533 assert(llvm::all_of(Ops,
3535 return Ops[0].getValueType() == Op.getValueType();
3537 "Concatenation of vectors with inconsistent value types!");
3538 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
3539 VT.getVectorNumElements() &&
3540 "Incorrect element count in vector concatenation!");
3542 if (Ops.size() == 1)
3545 // Concat of UNDEFs is UNDEF.
3546 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
3547 return DAG.getUNDEF(VT);
3549 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
3550 // simplified to one big BUILD_VECTOR.
3551 // FIXME: Add support for SCALAR_TO_VECTOR as well.
3552 EVT SVT = VT.getScalarType();
3553 SmallVector<SDValue, 16> Elts;
3554 for (SDValue Op : Ops) {
3555 EVT OpVT = Op.getValueType();
3557 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
3558 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
3559 Elts.append(Op->op_begin(), Op->op_end());
3564 // BUILD_VECTOR requires all inputs to be of the same type, find the
3565 // maximum type and extend them all.
3566 for (SDValue Op : Elts)
3567 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
3569 if (SVT.bitsGT(VT.getScalarType()))
3570 for (SDValue &Op : Elts)
3571 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
3572 ? DAG.getZExtOrTrunc(Op, DL, SVT)
3573 : DAG.getSExtOrTrunc(Op, DL, SVT);
3575 SDValue V = DAG.getBuildVector(VT, DL, Elts);
3576 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
3580 /// Gets or creates the specified node.
3581 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
3582 FoldingSetNodeID ID;
3583 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
3585 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
3586 return SDValue(E, 0);
3588 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
3590 CSEMap.InsertNode(N, IP);
3593 SDValue V = SDValue(N, 0);
3594 NewSDValueDbgMsg(V, "Creating new node: ", this);
3598 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
3599 SDValue Operand, const SDNodeFlags Flags) {
3600 // Constant fold unary operations with an integer constant operand. Even
3601 // opaque constant will be folded, because the folding of unary operations
3602 // doesn't create new constants with different values. Nevertheless, the
3603 // opaque flag is preserved during folding to prevent future folding with
3605 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
3606 const APInt &Val = C->getAPIntValue();
3609 case ISD::SIGN_EXTEND:
3610 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
3611 C->isTargetOpcode(), C->isOpaque());
3612 case ISD::ANY_EXTEND:
3613 case ISD::ZERO_EXTEND:
3615 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
3616 C->isTargetOpcode(), C->isOpaque());
3617 case ISD::UINT_TO_FP:
3618 case ISD::SINT_TO_FP: {
3619 APFloat apf(EVTToAPFloatSemantics(VT),
3620 APInt::getNullValue(VT.getSizeInBits()));
3621 (void)apf.convertFromAPInt(Val,
3622 Opcode==ISD::SINT_TO_FP,
3623 APFloat::rmNearestTiesToEven);
3624 return getConstantFP(apf, DL, VT);
3627 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
3628 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
3629 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
3630 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
3631 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
3632 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
3633 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
3634 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
3637 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
3639 case ISD::BITREVERSE:
3640 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
3643 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
3646 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
3649 case ISD::CTLZ_ZERO_UNDEF:
3650 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
3653 case ISD::CTTZ_ZERO_UNDEF:
3654 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
3656 case ISD::FP16_TO_FP: {
3658 APFloat FPV(APFloat::IEEEhalf(),
3659 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
3661 // This can return overflow, underflow, or inexact; we don't care.
3662 // FIXME need to be more flexible about rounding mode.
3663 (void)FPV.convert(EVTToAPFloatSemantics(VT),
3664 APFloat::rmNearestTiesToEven, &Ignored);
3665 return getConstantFP(FPV, DL, VT);
3670 // Constant fold unary operations with a floating point constant operand.
3671 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
3672 APFloat V = C->getValueAPF(); // make copy
3676 return getConstantFP(V, DL, VT);
3679 return getConstantFP(V, DL, VT);
3681 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
3682 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3683 return getConstantFP(V, DL, VT);
3687 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
3688 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3689 return getConstantFP(V, DL, VT);
3693 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
3694 if (fs == APFloat::opOK || fs == APFloat::opInexact)
3695 return getConstantFP(V, DL, VT);
3698 case ISD::FP_EXTEND: {
3700 // This can return overflow, underflow, or inexact; we don't care.
3701 // FIXME need to be more flexible about rounding mode.
3702 (void)V.convert(EVTToAPFloatSemantics(VT),
3703 APFloat::rmNearestTiesToEven, &ignored);
3704 return getConstantFP(V, DL, VT);
3706 case ISD::FP_TO_SINT:
3707 case ISD::FP_TO_UINT: {
3709 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
3710 // FIXME need to be more flexible about rounding mode.
3711 APFloat::opStatus s =
3712 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
3713 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
3715 return getConstant(IntVal, DL, VT);
3718 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
3719 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3720 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
3721 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
3722 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
3723 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
3725 case ISD::FP_TO_FP16: {
3727 // This can return overflow, underflow, or inexact; we don't care.
3728 // FIXME need to be more flexible about rounding mode.
3729 (void)V.convert(APFloat::IEEEhalf(),
3730 APFloat::rmNearestTiesToEven, &Ignored);
3731 return getConstant(V.bitcastToAPInt(), DL, VT);
3736 // Constant fold unary operations with a vector integer or float operand.
3737 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
3738 if (BV->isConstant()) {
3741 // FIXME: Entirely reasonable to perform folding of other unary
3742 // operations here as the need arises.
3749 case ISD::FP_EXTEND:
3750 case ISD::FP_TO_SINT:
3751 case ISD::FP_TO_UINT:
3753 case ISD::ANY_EXTEND:
3754 case ISD::ZERO_EXTEND:
3755 case ISD::SIGN_EXTEND:
3756 case ISD::UINT_TO_FP:
3757 case ISD::SINT_TO_FP:
3759 case ISD::BITREVERSE:
3762 case ISD::CTLZ_ZERO_UNDEF:
3764 case ISD::CTTZ_ZERO_UNDEF:
3766 SDValue Ops = { Operand };
3767 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
3774 unsigned OpOpcode = Operand.getNode()->getOpcode();
3776 case ISD::TokenFactor:
3777 case ISD::MERGE_VALUES:
3778 case ISD::CONCAT_VECTORS:
3779 return Operand; // Factor, merge or concat of one node? No need.
3780 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
3781 case ISD::FP_EXTEND:
3782 assert(VT.isFloatingPoint() &&
3783 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
3784 if (Operand.getValueType() == VT) return Operand; // noop conversion.
3785 assert((!VT.isVector() ||
3786 VT.getVectorNumElements() ==
3787 Operand.getValueType().getVectorNumElements()) &&
3788 "Vector element count mismatch!");
3789 assert(Operand.getValueType().bitsLT(VT) &&
3790 "Invalid fpext node, dst < src!");
3791 if (Operand.isUndef())
3792 return getUNDEF(VT);
3794 case ISD::SIGN_EXTEND:
3795 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3796 "Invalid SIGN_EXTEND!");
3797 if (Operand.getValueType() == VT) return Operand; // noop extension
3798 assert((!VT.isVector() ||
3799 VT.getVectorNumElements() ==
3800 Operand.getValueType().getVectorNumElements()) &&
3801 "Vector element count mismatch!");
3802 assert(Operand.getValueType().bitsLT(VT) &&
3803 "Invalid sext node, dst < src!");
3804 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
3805 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
3806 else if (OpOpcode == ISD::UNDEF)
3807 // sext(undef) = 0, because the top bits will all be the same.
3808 return getConstant(0, DL, VT);
3810 case ISD::ZERO_EXTEND:
3811 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3812 "Invalid ZERO_EXTEND!");
3813 if (Operand.getValueType() == VT) return Operand; // noop extension
3814 assert((!VT.isVector() ||
3815 VT.getVectorNumElements() ==
3816 Operand.getValueType().getVectorNumElements()) &&
3817 "Vector element count mismatch!");
3818 assert(Operand.getValueType().bitsLT(VT) &&
3819 "Invalid zext node, dst < src!");
3820 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
3821 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
3822 else if (OpOpcode == ISD::UNDEF)
3823 // zext(undef) = 0, because the top bits will be zero.
3824 return getConstant(0, DL, VT);
3826 case ISD::ANY_EXTEND:
3827 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3828 "Invalid ANY_EXTEND!");
3829 if (Operand.getValueType() == VT) return Operand; // noop extension
3830 assert((!VT.isVector() ||
3831 VT.getVectorNumElements() ==
3832 Operand.getValueType().getVectorNumElements()) &&
3833 "Vector element count mismatch!");
3834 assert(Operand.getValueType().bitsLT(VT) &&
3835 "Invalid anyext node, dst < src!");
3837 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3838 OpOpcode == ISD::ANY_EXTEND)
3839 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
3840 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
3841 else if (OpOpcode == ISD::UNDEF)
3842 return getUNDEF(VT);
3844 // (ext (trunx x)) -> x
3845 if (OpOpcode == ISD::TRUNCATE) {
3846 SDValue OpOp = Operand.getOperand(0);
3847 if (OpOp.getValueType() == VT)
3852 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
3853 "Invalid TRUNCATE!");
3854 if (Operand.getValueType() == VT) return Operand; // noop truncate
3855 assert((!VT.isVector() ||
3856 VT.getVectorNumElements() ==
3857 Operand.getValueType().getVectorNumElements()) &&
3858 "Vector element count mismatch!");
3859 assert(Operand.getValueType().bitsGT(VT) &&
3860 "Invalid truncate node, src < dst!");
3861 if (OpOpcode == ISD::TRUNCATE)
3862 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
3863 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
3864 OpOpcode == ISD::ANY_EXTEND) {
3865 // If the source is smaller than the dest, we still need an extend.
3866 if (Operand.getOperand(0).getValueType().getScalarType()
3867 .bitsLT(VT.getScalarType()))
3868 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
3869 if (Operand.getOperand(0).getValueType().bitsGT(VT))
3870 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
3871 return Operand.getOperand(0);
3873 if (OpOpcode == ISD::UNDEF)
3874 return getUNDEF(VT);
3877 assert(VT.isInteger() && VT == Operand.getValueType() &&
3879 if (OpOpcode == ISD::UNDEF)
3880 return getUNDEF(VT);
3883 assert(VT.isInteger() && VT == Operand.getValueType() &&
3885 assert((VT.getScalarSizeInBits() % 16 == 0) &&
3886 "BSWAP types must be a multiple of 16 bits!");
3887 if (OpOpcode == ISD::UNDEF)
3888 return getUNDEF(VT);
3890 case ISD::BITREVERSE:
3891 assert(VT.isInteger() && VT == Operand.getValueType() &&
3892 "Invalid BITREVERSE!");
3893 if (OpOpcode == ISD::UNDEF)
3894 return getUNDEF(VT);
3897 // Basic sanity checking.
3898 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
3899 "Cannot BITCAST between types of different sizes!");
3900 if (VT == Operand.getValueType()) return Operand; // noop conversion.
3901 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
3902 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
3903 if (OpOpcode == ISD::UNDEF)
3904 return getUNDEF(VT);
3906 case ISD::SCALAR_TO_VECTOR:
3907 assert(VT.isVector() && !Operand.getValueType().isVector() &&
3908 (VT.getVectorElementType() == Operand.getValueType() ||
3909 (VT.getVectorElementType().isInteger() &&
3910 Operand.getValueType().isInteger() &&
3911 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
3912 "Illegal SCALAR_TO_VECTOR node!");
3913 if (OpOpcode == ISD::UNDEF)
3914 return getUNDEF(VT);
3915 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
3916 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
3917 isa<ConstantSDNode>(Operand.getOperand(1)) &&
3918 Operand.getConstantOperandVal(1) == 0 &&
3919 Operand.getOperand(0).getValueType() == VT)
3920 return Operand.getOperand(0);
3923 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
3924 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
3925 // FIXME: FNEG has no fast-math-flags to propagate; use the FSUB's flags?
3926 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1),
3927 Operand.getOperand(0), Operand.getNode()->getFlags());
3928 if (OpOpcode == ISD::FNEG) // --X -> X
3929 return Operand.getOperand(0);
3932 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
3933 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
3938 SDVTList VTs = getVTList(VT);
3939 SDValue Ops[] = {Operand};
3940 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
3941 FoldingSetNodeID ID;
3942 AddNodeIDNode(ID, Opcode, VTs, Ops);
3944 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
3945 E->intersectFlagsWith(Flags);
3946 return SDValue(E, 0);
3949 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3951 createOperands(N, Ops);
3952 CSEMap.InsertNode(N, IP);
3954 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
3955 createOperands(N, Ops);
3959 SDValue V = SDValue(N, 0);
3960 NewSDValueDbgMsg(V, "Creating new node: ", this);
3964 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
3967 case ISD::ADD: return std::make_pair(C1 + C2, true);
3968 case ISD::SUB: return std::make_pair(C1 - C2, true);
3969 case ISD::MUL: return std::make_pair(C1 * C2, true);
3970 case ISD::AND: return std::make_pair(C1 & C2, true);
3971 case ISD::OR: return std::make_pair(C1 | C2, true);
3972 case ISD::XOR: return std::make_pair(C1 ^ C2, true);
3973 case ISD::SHL: return std::make_pair(C1 << C2, true);
3974 case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
3975 case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
3976 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
3977 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
3978 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
3979 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
3980 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
3981 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
3983 if (!C2.getBoolValue())
3985 return std::make_pair(C1.udiv(C2), true);
3987 if (!C2.getBoolValue())
3989 return std::make_pair(C1.urem(C2), true);
3991 if (!C2.getBoolValue())
3993 return std::make_pair(C1.sdiv(C2), true);
3995 if (!C2.getBoolValue())
3997 return std::make_pair(C1.srem(C2), true);
3999 return std::make_pair(APInt(1, 0), false);
4002 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4003 EVT VT, const ConstantSDNode *Cst1,
4004 const ConstantSDNode *Cst2) {
4005 if (Cst1->isOpaque() || Cst2->isOpaque())
4008 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
4009 Cst2->getAPIntValue());
4012 return getConstant(Folded.first, DL, VT);
4015 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4016 const GlobalAddressSDNode *GA,
4018 if (GA->getOpcode() != ISD::GlobalAddress)
4020 if (!TLI->isOffsetFoldingLegal(GA))
4022 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2);
4025 int64_t Offset = Cst2->getSExtValue();
4027 case ISD::ADD: break;
4028 case ISD::SUB: Offset = -uint64_t(Offset); break;
4029 default: return SDValue();
4031 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT,
4032 GA->getOffset() + uint64_t(Offset));
4035 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4041 // If a divisor is zero/undef or any element of a divisor vector is
4042 // zero/undef, the whole op is undef.
4043 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4044 SDValue Divisor = Ops[1];
4045 if (Divisor.isUndef() || isNullConstant(Divisor))
4048 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4049 llvm::any_of(Divisor->op_values(),
4050 [](SDValue V) { return V.isUndef() ||
4051 isNullConstant(V); });
4052 // TODO: Handle signed overflow.
4054 // TODO: Handle oversized shifts.
4060 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4061 EVT VT, SDNode *Cst1,
4063 // If the opcode is a target-specific ISD node, there's nothing we can
4064 // do here and the operand rules may not line up with the below, so
4066 if (Opcode >= ISD::BUILTIN_OP_END)
4069 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)}))
4070 return getUNDEF(VT);
4072 // Handle the case of two scalars.
4073 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
4074 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
4075 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2);
4076 assert((!Folded || !VT.isVector()) &&
4077 "Can't fold vectors ops with scalar operands");
4082 // fold (add Sym, c) -> Sym+c
4083 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1))
4084 return FoldSymbolOffset(Opcode, VT, GA, Cst2);
4085 if (TLI->isCommutativeBinOp(Opcode))
4086 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2))
4087 return FoldSymbolOffset(Opcode, VT, GA, Cst1);
4089 // For vectors extract each constant element into Inputs so we can constant
4090 // fold them individually.
4091 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
4092 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
4096 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
4098 EVT SVT = VT.getScalarType();
4100 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4101 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4102 if (LegalSVT.bitsLT(SVT))
4105 SmallVector<SDValue, 4> Outputs;
4106 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
4107 SDValue V1 = BV1->getOperand(I);
4108 SDValue V2 = BV2->getOperand(I);
4110 if (SVT.isInteger()) {
4111 if (V1->getValueType(0).bitsGT(SVT))
4112 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
4113 if (V2->getValueType(0).bitsGT(SVT))
4114 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
4117 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
4120 // Fold one vector element.
4121 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
4122 if (LegalSVT != SVT)
4123 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4125 // Scalar folding only succeeded if the result is a constant or UNDEF.
4126 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4127 ScalarResult.getOpcode() != ISD::ConstantFP)
4129 Outputs.push_back(ScalarResult);
4132 assert(VT.getVectorNumElements() == Outputs.size() &&
4133 "Vector size mismatch!");
4135 // We may have a vector type but a scalar result. Create a splat.
4136 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
4138 // Build a big vector out of the scalar elements we generated.
4139 return getBuildVector(VT, SDLoc(), Outputs);
4142 // TODO: Merge with FoldConstantArithmetic
4143 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
4144 const SDLoc &DL, EVT VT,
4145 ArrayRef<SDValue> Ops,
4146 const SDNodeFlags Flags) {
4147 // If the opcode is a target-specific ISD node, there's nothing we can
4148 // do here and the operand rules may not line up with the below, so
4150 if (Opcode >= ISD::BUILTIN_OP_END)
4153 if (isUndef(Opcode, Ops))
4154 return getUNDEF(VT);
4156 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4160 unsigned NumElts = VT.getVectorNumElements();
4162 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
4163 return !Op.getValueType().isVector() ||
4164 Op.getValueType().getVectorNumElements() == NumElts;
4167 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
4168 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
4169 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
4170 (BV && BV->isConstant());
4173 // All operands must be vector types with the same number of elements as
4174 // the result type and must be either UNDEF or a build vector of constant
4175 // or UNDEF scalars.
4176 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
4177 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
4180 // If we are comparing vectors, then the result needs to be a i1 boolean
4181 // that is then sign-extended back to the legal result type.
4182 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
4184 // Find legal integer scalar type for constant promotion and
4185 // ensure that its scalar size is at least as large as source.
4186 EVT LegalSVT = VT.getScalarType();
4187 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4188 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4189 if (LegalSVT.bitsLT(VT.getScalarType()))
4193 // Constant fold each scalar lane separately.
4194 SmallVector<SDValue, 4> ScalarResults;
4195 for (unsigned i = 0; i != NumElts; i++) {
4196 SmallVector<SDValue, 4> ScalarOps;
4197 for (SDValue Op : Ops) {
4198 EVT InSVT = Op.getValueType().getScalarType();
4199 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
4201 // We've checked that this is UNDEF or a constant of some kind.
4203 ScalarOps.push_back(getUNDEF(InSVT));
4205 ScalarOps.push_back(Op);
4209 SDValue ScalarOp = InBV->getOperand(i);
4210 EVT ScalarVT = ScalarOp.getValueType();
4212 // Build vector (integer) scalar operands may need implicit
4213 // truncation - do this before constant folding.
4214 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
4215 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
4217 ScalarOps.push_back(ScalarOp);
4220 // Constant fold the scalar operands.
4221 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
4223 // Legalize the (integer) scalar constant if necessary.
4224 if (LegalSVT != SVT)
4225 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4227 // Scalar folding only succeeded if the result is a constant or UNDEF.
4228 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4229 ScalarResult.getOpcode() != ISD::ConstantFP)
4231 ScalarResults.push_back(ScalarResult);
4234 SDValue V = getBuildVector(VT, DL, ScalarResults);
4235 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
4239 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4240 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
4241 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4242 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
4243 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4244 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4246 // Canonicalize constant to RHS if commutative.
4247 if (TLI->isCommutativeBinOp(Opcode)) {
4249 std::swap(N1C, N2C);
4251 } else if (N1CFP && !N2CFP) {
4252 std::swap(N1CFP, N2CFP);
4259 case ISD::TokenFactor:
4260 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
4261 N2.getValueType() == MVT::Other && "Invalid token factor!");
4262 // Fold trivial token factors.
4263 if (N1.getOpcode() == ISD::EntryToken) return N2;
4264 if (N2.getOpcode() == ISD::EntryToken) return N1;
4265 if (N1 == N2) return N1;
4267 case ISD::CONCAT_VECTORS: {
4268 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4269 SDValue Ops[] = {N1, N2};
4270 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4275 assert(VT.isInteger() && "This operator does not apply to FP types!");
4276 assert(N1.getValueType() == N2.getValueType() &&
4277 N1.getValueType() == VT && "Binary operator types must match!");
4278 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
4279 // worth handling here.
4280 if (N2C && N2C->isNullValue())
4282 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
4289 assert(VT.isInteger() && "This operator does not apply to FP types!");
4290 assert(N1.getValueType() == N2.getValueType() &&
4291 N1.getValueType() == VT && "Binary operator types must match!");
4292 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
4293 // it's worth handling here.
4294 if (N2C && N2C->isNullValue())
4308 assert(VT.isInteger() && "This operator does not apply to FP types!");
4309 assert(N1.getValueType() == N2.getValueType() &&
4310 N1.getValueType() == VT && "Binary operator types must match!");
4317 if (getTarget().Options.UnsafeFPMath) {
4318 if (Opcode == ISD::FADD) {
4320 if (N2CFP && N2CFP->getValueAPF().isZero())
4322 } else if (Opcode == ISD::FSUB) {
4324 if (N2CFP && N2CFP->getValueAPF().isZero())
4326 } else if (Opcode == ISD::FMUL) {
4328 if (N2CFP && N2CFP->isZero())
4331 if (N2CFP && N2CFP->isExactlyValue(1.0))
4335 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
4336 assert(N1.getValueType() == N2.getValueType() &&
4337 N1.getValueType() == VT && "Binary operator types must match!");
4339 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
4340 assert(N1.getValueType() == VT &&
4341 N1.getValueType().isFloatingPoint() &&
4342 N2.getValueType().isFloatingPoint() &&
4343 "Invalid FCOPYSIGN!");
4350 assert(VT == N1.getValueType() &&
4351 "Shift operators return type must be the same as their first arg");
4352 assert(VT.isInteger() && N2.getValueType().isInteger() &&
4353 "Shifts only work on integers");
4354 assert((!VT.isVector() || VT == N2.getValueType()) &&
4355 "Vector shift amounts must be in the same as their first arg");
4356 // Verify that the shift amount VT is bit enough to hold valid shift
4357 // amounts. This catches things like trying to shift an i1024 value by an
4358 // i8, which is easy to fall into in generic code that uses
4359 // TLI.getShiftAmount().
4360 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
4361 "Invalid use of small shift amount with oversized value!");
4363 // Always fold shifts of i1 values so the code generator doesn't need to
4364 // handle them. Since we know the size of the shift has to be less than the
4365 // size of the value, the shift/rotate count is guaranteed to be zero.
4368 if (N2C && N2C->isNullValue())
4371 case ISD::FP_ROUND_INREG: {
4372 EVT EVT = cast<VTSDNode>(N2)->getVT();
4373 assert(VT == N1.getValueType() && "Not an inreg round!");
4374 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
4375 "Cannot FP_ROUND_INREG integer types");
4376 assert(EVT.isVector() == VT.isVector() &&
4377 "FP_ROUND_INREG type should be vector iff the operand "
4379 assert((!EVT.isVector() ||
4380 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
4381 "Vector element counts must match in FP_ROUND_INREG");
4382 assert(EVT.bitsLE(VT) && "Not rounding down!");
4384 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
4388 assert(VT.isFloatingPoint() &&
4389 N1.getValueType().isFloatingPoint() &&
4390 VT.bitsLE(N1.getValueType()) &&
4391 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
4392 "Invalid FP_ROUND!");
4393 if (N1.getValueType() == VT) return N1; // noop conversion.
4395 case ISD::AssertSext:
4396 case ISD::AssertZext: {
4397 EVT EVT = cast<VTSDNode>(N2)->getVT();
4398 assert(VT == N1.getValueType() && "Not an inreg extend!");
4399 assert(VT.isInteger() && EVT.isInteger() &&
4400 "Cannot *_EXTEND_INREG FP types");
4401 assert(!EVT.isVector() &&
4402 "AssertSExt/AssertZExt type should be the vector element type "
4403 "rather than the vector type!");
4404 assert(EVT.bitsLE(VT) && "Not extending!");
4405 if (VT == EVT) return N1; // noop assertion.
4408 case ISD::SIGN_EXTEND_INREG: {
4409 EVT EVT = cast<VTSDNode>(N2)->getVT();
4410 assert(VT == N1.getValueType() && "Not an inreg extend!");
4411 assert(VT.isInteger() && EVT.isInteger() &&
4412 "Cannot *_EXTEND_INREG FP types");
4413 assert(EVT.isVector() == VT.isVector() &&
4414 "SIGN_EXTEND_INREG type should be vector iff the operand "
4416 assert((!EVT.isVector() ||
4417 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
4418 "Vector element counts must match in SIGN_EXTEND_INREG");
4419 assert(EVT.bitsLE(VT) && "Not extending!");
4420 if (EVT == VT) return N1; // Not actually extending
4422 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
4423 unsigned FromBits = EVT.getScalarSizeInBits();
4424 Val <<= Val.getBitWidth() - FromBits;
4425 Val.ashrInPlace(Val.getBitWidth() - FromBits);
4426 return getConstant(Val, DL, ConstantVT);
4430 const APInt &Val = N1C->getAPIntValue();
4431 return SignExtendInReg(Val, VT);
4433 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
4434 SmallVector<SDValue, 8> Ops;
4435 llvm::EVT OpVT = N1.getOperand(0).getValueType();
4436 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
4437 SDValue Op = N1.getOperand(i);
4439 Ops.push_back(getUNDEF(OpVT));
4442 ConstantSDNode *C = cast<ConstantSDNode>(Op);
4443 APInt Val = C->getAPIntValue();
4444 Ops.push_back(SignExtendInReg(Val, OpVT));
4446 return getBuildVector(VT, DL, Ops);
4450 case ISD::EXTRACT_VECTOR_ELT:
4451 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
4453 return getUNDEF(VT);
4455 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
4456 if (N2C && N2C->getZExtValue() >= N1.getValueType().getVectorNumElements())
4457 return getUNDEF(VT);
4459 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
4460 // expanding copies of large vectors from registers.
4462 N1.getOpcode() == ISD::CONCAT_VECTORS &&
4463 N1.getNumOperands() > 0) {
4465 N1.getOperand(0).getValueType().getVectorNumElements();
4466 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
4467 N1.getOperand(N2C->getZExtValue() / Factor),
4468 getConstant(N2C->getZExtValue() % Factor, DL,
4469 N2.getValueType()));
4472 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
4473 // expanding large vector constants.
4474 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
4475 SDValue Elt = N1.getOperand(N2C->getZExtValue());
4477 if (VT != Elt.getValueType())
4478 // If the vector element type is not legal, the BUILD_VECTOR operands
4479 // are promoted and implicitly truncated, and the result implicitly
4480 // extended. Make that explicit here.
4481 Elt = getAnyExtOrTrunc(Elt, DL, VT);
4486 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
4487 // operations are lowered to scalars.
4488 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
4489 // If the indices are the same, return the inserted element else
4490 // if the indices are known different, extract the element from
4491 // the original vector.
4492 SDValue N1Op2 = N1.getOperand(2);
4493 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
4495 if (N1Op2C && N2C) {
4496 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
4497 if (VT == N1.getOperand(1).getValueType())
4498 return N1.getOperand(1);
4500 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
4503 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
4507 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
4508 // when vector types are scalarized and v1iX is legal.
4509 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
4510 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4511 N1.getValueType().getVectorNumElements() == 1) {
4512 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
4516 case ISD::EXTRACT_ELEMENT:
4517 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
4518 assert(!N1.getValueType().isVector() && !VT.isVector() &&
4519 (N1.getValueType().isInteger() == VT.isInteger()) &&
4520 N1.getValueType() != VT &&
4521 "Wrong types for EXTRACT_ELEMENT!");
4523 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
4524 // 64-bit integers into 32-bit parts. Instead of building the extract of
4525 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
4526 if (N1.getOpcode() == ISD::BUILD_PAIR)
4527 return N1.getOperand(N2C->getZExtValue());
4529 // EXTRACT_ELEMENT of a constant int is also very common.
4531 unsigned ElementSize = VT.getSizeInBits();
4532 unsigned Shift = ElementSize * N2C->getZExtValue();
4533 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
4534 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
4537 case ISD::EXTRACT_SUBVECTOR:
4538 if (VT.isSimple() && N1.getValueType().isSimple()) {
4539 assert(VT.isVector() && N1.getValueType().isVector() &&
4540 "Extract subvector VTs must be a vectors!");
4541 assert(VT.getVectorElementType() ==
4542 N1.getValueType().getVectorElementType() &&
4543 "Extract subvector VTs must have the same element type!");
4544 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
4545 "Extract subvector must be from larger vector to smaller vector!");
4548 assert((VT.getVectorNumElements() + N2C->getZExtValue()
4549 <= N1.getValueType().getVectorNumElements())
4550 && "Extract subvector overflow!");
4553 // Trivial extraction.
4554 if (VT.getSimpleVT() == N1.getSimpleValueType())
4557 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
4559 return getUNDEF(VT);
4561 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
4562 // the concat have the same type as the extract.
4563 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
4564 N1.getNumOperands() > 0 &&
4565 VT == N1.getOperand(0).getValueType()) {
4566 unsigned Factor = VT.getVectorNumElements();
4567 return N1.getOperand(N2C->getZExtValue() / Factor);
4570 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
4571 // during shuffle legalization.
4572 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
4573 VT == N1.getOperand(1).getValueType())
4574 return N1.getOperand(1);
4579 // Perform trivial constant folding.
4581 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
4584 // Constant fold FP operations.
4585 bool HasFPExceptions = TLI->hasFloatingPointExceptions();
4588 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
4589 APFloat::opStatus s;
4592 s = V1.add(V2, APFloat::rmNearestTiesToEven);
4593 if (!HasFPExceptions || s != APFloat::opInvalidOp)
4594 return getConstantFP(V1, DL, VT);
4597 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
4598 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4599 return getConstantFP(V1, DL, VT);
4602 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
4603 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
4604 return getConstantFP(V1, DL, VT);
4607 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
4608 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4609 s!=APFloat::opDivByZero)) {
4610 return getConstantFP(V1, DL, VT);
4615 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
4616 s!=APFloat::opDivByZero)) {
4617 return getConstantFP(V1, DL, VT);
4620 case ISD::FCOPYSIGN:
4622 return getConstantFP(V1, DL, VT);
4627 if (Opcode == ISD::FP_ROUND) {
4628 APFloat V = N1CFP->getValueAPF(); // make copy
4630 // This can return overflow, underflow, or inexact; we don't care.
4631 // FIXME need to be more flexible about rounding mode.
4632 (void)V.convert(EVTToAPFloatSemantics(VT),
4633 APFloat::rmNearestTiesToEven, &ignored);
4634 return getConstantFP(V, DL, VT);
4638 // Canonicalize an UNDEF to the RHS, even over a constant.
4640 if (TLI->isCommutativeBinOp(Opcode)) {
4644 case ISD::FP_ROUND_INREG:
4645 case ISD::SIGN_EXTEND_INREG:
4651 return N1; // fold op(undef, arg2) -> undef
4659 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
4660 // For vectors, we can't easily build an all zero vector, just return
4667 // Fold a bunch of operators when the RHS is undef.
4672 // Handle undef ^ undef -> 0 special case. This is a common
4674 return getConstant(0, DL, VT);
4684 return N2; // fold op(arg1, undef) -> undef
4690 if (getTarget().Options.UnsafeFPMath)
4698 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
4699 // For vectors, we can't easily build an all zero vector, just return
4704 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
4705 // For vectors, we can't easily build an all one vector, just return
4713 // Memoize this node if possible.
4715 SDVTList VTs = getVTList(VT);
4716 SDValue Ops[] = {N1, N2};
4717 if (VT != MVT::Glue) {
4718 FoldingSetNodeID ID;
4719 AddNodeIDNode(ID, Opcode, VTs, Ops);
4721 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4722 E->intersectFlagsWith(Flags);
4723 return SDValue(E, 0);
4726 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4728 createOperands(N, Ops);
4729 CSEMap.InsertNode(N, IP);
4731 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4732 createOperands(N, Ops);
4736 SDValue V = SDValue(N, 0);
4737 NewSDValueDbgMsg(V, "Creating new node: ", this);
4741 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4742 SDValue N1, SDValue N2, SDValue N3) {
4743 // Perform various simplifications.
4746 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4747 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4748 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
4749 if (N1CFP && N2CFP && N3CFP) {
4750 APFloat V1 = N1CFP->getValueAPF();
4751 const APFloat &V2 = N2CFP->getValueAPF();
4752 const APFloat &V3 = N3CFP->getValueAPF();
4753 APFloat::opStatus s =
4754 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
4755 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
4756 return getConstantFP(V1, DL, VT);
4760 case ISD::CONCAT_VECTORS: {
4761 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4762 SDValue Ops[] = {N1, N2, N3};
4763 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4768 // Use FoldSetCC to simplify SETCC's.
4769 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
4771 // Vector constant folding.
4772 SDValue Ops[] = {N1, N2, N3};
4773 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
4774 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
4780 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
4781 if (N1C->getZExtValue())
4782 return N2; // select true, X, Y -> X
4783 return N3; // select false, X, Y -> Y
4786 if (N2 == N3) return N2; // select C, X, X -> X
4788 case ISD::VECTOR_SHUFFLE:
4789 llvm_unreachable("should use getVectorShuffle constructor!");
4790 case ISD::INSERT_VECTOR_ELT: {
4791 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
4792 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
4793 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
4794 return getUNDEF(VT);
4797 case ISD::INSERT_SUBVECTOR: {
4799 if (VT.isSimple() && N1.getValueType().isSimple()
4800 && N2.getValueType().isSimple()) {
4801 assert(VT.isVector() && N1.getValueType().isVector() &&
4802 N2.getValueType().isVector() &&
4803 "Insert subvector VTs must be a vectors");
4804 assert(VT == N1.getValueType() &&
4805 "Dest and insert subvector source types must match!");
4806 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
4807 "Insert subvector must be from smaller vector to larger vector!");
4808 if (isa<ConstantSDNode>(Index)) {
4809 assert((N2.getValueType().getVectorNumElements() +
4810 cast<ConstantSDNode>(Index)->getZExtValue()
4811 <= VT.getVectorNumElements())
4812 && "Insert subvector overflow!");
4815 // Trivial insertion.
4816 if (VT.getSimpleVT() == N2.getSimpleValueType())
4822 // Fold bit_convert nodes from a type to themselves.
4823 if (N1.getValueType() == VT)
4828 // Memoize node if it doesn't produce a flag.
4830 SDVTList VTs = getVTList(VT);
4831 SDValue Ops[] = {N1, N2, N3};
4832 if (VT != MVT::Glue) {
4833 FoldingSetNodeID ID;
4834 AddNodeIDNode(ID, Opcode, VTs, Ops);
4836 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4837 return SDValue(E, 0);
4839 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4840 createOperands(N, Ops);
4841 CSEMap.InsertNode(N, IP);
4843 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4844 createOperands(N, Ops);
4848 SDValue V = SDValue(N, 0);
4849 NewSDValueDbgMsg(V, "Creating new node: ", this);
4853 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4854 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
4855 SDValue Ops[] = { N1, N2, N3, N4 };
4856 return getNode(Opcode, DL, VT, Ops);
4859 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4860 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
4862 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4863 return getNode(Opcode, DL, VT, Ops);
4866 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
4867 /// the incoming stack arguments to be loaded from the stack.
4868 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
4869 SmallVector<SDValue, 8> ArgChains;
4871 // Include the original chain at the beginning of the list. When this is
4872 // used by target LowerCall hooks, this helps legalize find the
4873 // CALLSEQ_BEGIN node.
4874 ArgChains.push_back(Chain);
4876 // Add a chain value for each stack argument.
4877 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
4878 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
4879 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
4880 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
4881 if (FI->getIndex() < 0)
4882 ArgChains.push_back(SDValue(L, 1));
4884 // Build a tokenfactor for all the chains.
4885 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
4888 /// getMemsetValue - Vectorized representation of the memset value
4890 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
4892 assert(!Value.isUndef());
4894 unsigned NumBits = VT.getScalarSizeInBits();
4895 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
4896 assert(C->getAPIntValue().getBitWidth() == 8);
4897 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
4899 return DAG.getConstant(Val, dl, VT);
4900 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
4904 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
4905 EVT IntVT = VT.getScalarType();
4906 if (!IntVT.isInteger())
4907 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
4909 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
4911 // Use a multiplication with 0x010101... to extend the input to the
4913 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
4914 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
4915 DAG.getConstant(Magic, dl, IntVT));
4918 if (VT != Value.getValueType() && !VT.isInteger())
4919 Value = DAG.getBitcast(VT.getScalarType(), Value);
4920 if (VT != Value.getValueType())
4921 Value = DAG.getSplatBuildVector(VT, dl, Value);
4926 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
4927 /// used when a memcpy is turned into a memset when the source is a constant
4929 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
4930 const TargetLowering &TLI,
4931 const ConstantDataArraySlice &Slice) {
4932 // Handle vector with all elements zero.
4933 if (Slice.Array == nullptr) {
4935 return DAG.getConstant(0, dl, VT);
4936 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
4937 return DAG.getConstantFP(0.0, dl, VT);
4938 else if (VT.isVector()) {
4939 unsigned NumElts = VT.getVectorNumElements();
4940 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
4941 return DAG.getNode(ISD::BITCAST, dl, VT,
4942 DAG.getConstant(0, dl,
4943 EVT::getVectorVT(*DAG.getContext(),
4946 llvm_unreachable("Expected type!");
4949 assert(!VT.isVector() && "Can't handle vector type here!");
4950 unsigned NumVTBits = VT.getSizeInBits();
4951 unsigned NumVTBytes = NumVTBits / 8;
4952 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
4954 APInt Val(NumVTBits, 0);
4955 if (DAG.getDataLayout().isLittleEndian()) {
4956 for (unsigned i = 0; i != NumBytes; ++i)
4957 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
4959 for (unsigned i = 0; i != NumBytes; ++i)
4960 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
4963 // If the "cost" of materializing the integer immediate is less than the cost
4964 // of a load, then it is cost effective to turn the load into the immediate.
4965 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
4966 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
4967 return DAG.getConstant(Val, dl, VT);
4968 return SDValue(nullptr, 0);
4971 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
4973 EVT VT = Base.getValueType();
4974 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
4977 /// Returns true if memcpy source is constant data.
4978 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
4979 uint64_t SrcDelta = 0;
4980 GlobalAddressSDNode *G = nullptr;
4981 if (Src.getOpcode() == ISD::GlobalAddress)
4982 G = cast<GlobalAddressSDNode>(Src);
4983 else if (Src.getOpcode() == ISD::ADD &&
4984 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
4985 Src.getOperand(1).getOpcode() == ISD::Constant) {
4986 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
4987 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
4992 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
4993 SrcDelta + G->getOffset());
4996 /// Determines the optimal series of memory ops to replace the memset / memcpy.
4997 /// Return true if the number of memory ops is below the threshold (Limit).
4998 /// It returns the types of the sequence of memory ops to perform
4999 /// memset / memcpy by reference.
5000 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
5001 unsigned Limit, uint64_t Size,
5002 unsigned DstAlign, unsigned SrcAlign,
5007 unsigned DstAS, unsigned SrcAS,
5009 const TargetLowering &TLI) {
5010 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
5011 "Expecting memcpy / memset source to meet alignment requirement!");
5012 // If 'SrcAlign' is zero, that means the memory operation does not need to
5013 // load the value, i.e. memset or memcpy from constant string. Otherwise,
5014 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
5015 // is the specified alignment of the memory operation. If it is zero, that
5016 // means it's possible to change the alignment of the destination.
5017 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
5018 // not need to be loaded.
5019 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
5020 IsMemset, ZeroMemset, MemcpyStrSrc,
5021 DAG.getMachineFunction());
5023 if (VT == MVT::Other) {
5024 // Use the largest integer type whose alignment constraints are satisfied.
5025 // We only need to check DstAlign here as SrcAlign is always greater or
5026 // equal to DstAlign (or zero).
5028 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 &&
5029 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign))
5030 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
5031 assert(VT.isInteger());
5033 // Find the largest legal integer type.
5035 while (!TLI.isTypeLegal(LVT))
5036 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
5037 assert(LVT.isInteger());
5039 // If the type we've chosen is larger than the largest legal integer type
5040 // then use that instead.
5045 unsigned NumMemOps = 0;
5047 unsigned VTSize = VT.getSizeInBits() / 8;
5048 while (VTSize > Size) {
5049 // For now, only use non-vector load / store's for the left-over pieces.
5054 if (VT.isVector() || VT.isFloatingPoint()) {
5055 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
5056 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
5057 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
5059 else if (NewVT == MVT::i64 &&
5060 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
5061 TLI.isSafeMemOpType(MVT::f64)) {
5062 // i64 is usually not legal on 32-bit targets, but f64 may be.
5070 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
5071 if (NewVT == MVT::i8)
5073 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
5075 NewVTSize = NewVT.getSizeInBits() / 8;
5077 // If the new VT cannot cover all of the remaining bits, then consider
5078 // issuing a (or a pair of) unaligned and overlapping load / store.
5079 // FIXME: Only does this for 64-bit or more since we don't have proper
5080 // cost model for unaligned load / store.
5082 if (NumMemOps && AllowOverlap &&
5083 VTSize >= 8 && NewVTSize < Size &&
5084 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast)
5092 if (++NumMemOps > Limit)
5095 MemOps.push_back(VT);
5102 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
5103 // On Darwin, -Os means optimize for size without hurting performance, so
5104 // only really optimize for size when -Oz (MinSize) is used.
5105 if (MF.getTarget().getTargetTriple().isOSDarwin())
5106 return MF.getFunction().optForMinSize();
5107 return MF.getFunction().optForSize();
5110 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5111 SDValue Chain, SDValue Dst, SDValue Src,
5112 uint64_t Size, unsigned Align,
5113 bool isVol, bool AlwaysInline,
5114 MachinePointerInfo DstPtrInfo,
5115 MachinePointerInfo SrcPtrInfo) {
5116 // Turn a memcpy of undef to nop.
5120 // Expand memcpy to a series of load and store ops if the size operand falls
5121 // below a certain threshold.
5122 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5123 // rather than maybe a humongous number of loads and stores.
5124 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5125 const DataLayout &DL = DAG.getDataLayout();
5126 LLVMContext &C = *DAG.getContext();
5127 std::vector<EVT> MemOps;
5128 bool DstAlignCanChange = false;
5129 MachineFunction &MF = DAG.getMachineFunction();
5130 MachineFrameInfo &MFI = MF.getFrameInfo();
5131 bool OptSize = shouldLowerMemFuncForSize(MF);
5132 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5133 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5134 DstAlignCanChange = true;
5135 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5136 if (Align > SrcAlign)
5138 ConstantDataArraySlice Slice;
5139 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
5140 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
5141 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
5143 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
5144 (DstAlignCanChange ? 0 : Align),
5145 (isZeroConstant ? 0 : SrcAlign),
5146 false, false, CopyFromConstant, true,
5147 DstPtrInfo.getAddrSpace(),
5148 SrcPtrInfo.getAddrSpace(),
5152 if (DstAlignCanChange) {
5153 Type *Ty = MemOps[0].getTypeForEVT(C);
5154 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5156 // Don't promote to an alignment that would require dynamic stack
5158 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
5159 if (!TRI->needsStackRealignment(MF))
5160 while (NewAlign > Align &&
5161 DL.exceedsNaturalStackAlignment(NewAlign))
5164 if (NewAlign > Align) {
5165 // Give the stack frame object a larger alignment if needed.
5166 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5167 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5172 MachineMemOperand::Flags MMOFlags =
5173 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5174 SmallVector<SDValue, 8> OutChains;
5175 unsigned NumMemOps = MemOps.size();
5176 uint64_t SrcOff = 0, DstOff = 0;
5177 for (unsigned i = 0; i != NumMemOps; ++i) {
5179 unsigned VTSize = VT.getSizeInBits() / 8;
5180 SDValue Value, Store;
5182 if (VTSize > Size) {
5183 // Issuing an unaligned load / store pair that overlaps with the previous
5184 // pair. Adjust the offset accordingly.
5185 assert(i == NumMemOps-1 && i != 0);
5186 SrcOff -= VTSize - Size;
5187 DstOff -= VTSize - Size;
5190 if (CopyFromConstant &&
5191 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
5192 // It's unlikely a store of a vector immediate can be done in a single
5193 // instruction. It would require a load from a constantpool first.
5194 // We only handle zero vectors here.
5195 // FIXME: Handle other cases where store of vector immediate is done in
5196 // a single instruction.
5197 ConstantDataArraySlice SubSlice;
5198 if (SrcOff < Slice.Length) {
5200 SubSlice.move(SrcOff);
5202 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5203 SubSlice.Array = nullptr;
5204 SubSlice.Offset = 0;
5205 SubSlice.Length = VTSize;
5207 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
5208 if (Value.getNode())
5209 Store = DAG.getStore(Chain, dl, Value,
5210 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5211 DstPtrInfo.getWithOffset(DstOff), Align,
5215 if (!Store.getNode()) {
5216 // The type might not be legal for the target. This should only happen
5217 // if the type is smaller than a legal type, as on PPC, so the right
5218 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5219 // to Load/Store if NVT==VT.
5220 // FIXME does the case above also need this?
5221 EVT NVT = TLI.getTypeToTransformTo(C, VT);
5222 assert(NVT.bitsGE(VT));
5224 bool isDereferenceable =
5225 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5226 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5227 if (isDereferenceable)
5228 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5230 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
5231 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5232 SrcPtrInfo.getWithOffset(SrcOff), VT,
5233 MinAlign(SrcAlign, SrcOff), SrcMMOFlags);
5234 OutChains.push_back(Value.getValue(1));
5235 Store = DAG.getTruncStore(
5236 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5237 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
5239 OutChains.push_back(Store);
5245 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5248 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5249 SDValue Chain, SDValue Dst, SDValue Src,
5250 uint64_t Size, unsigned Align,
5251 bool isVol, bool AlwaysInline,
5252 MachinePointerInfo DstPtrInfo,
5253 MachinePointerInfo SrcPtrInfo) {
5254 // Turn a memmove of undef to nop.
5258 // Expand memmove to a series of load and store ops if the size operand falls
5259 // below a certain threshold.
5260 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5261 const DataLayout &DL = DAG.getDataLayout();
5262 LLVMContext &C = *DAG.getContext();
5263 std::vector<EVT> MemOps;
5264 bool DstAlignCanChange = false;
5265 MachineFunction &MF = DAG.getMachineFunction();
5266 MachineFrameInfo &MFI = MF.getFrameInfo();
5267 bool OptSize = shouldLowerMemFuncForSize(MF);
5268 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5269 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5270 DstAlignCanChange = true;
5271 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5272 if (Align > SrcAlign)
5274 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
5276 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
5277 (DstAlignCanChange ? 0 : Align), SrcAlign,
5278 false, false, false, false,
5279 DstPtrInfo.getAddrSpace(),
5280 SrcPtrInfo.getAddrSpace(),
5284 if (DstAlignCanChange) {
5285 Type *Ty = MemOps[0].getTypeForEVT(C);
5286 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5287 if (NewAlign > Align) {
5288 // Give the stack frame object a larger alignment if needed.
5289 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5290 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5295 MachineMemOperand::Flags MMOFlags =
5296 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5297 uint64_t SrcOff = 0, DstOff = 0;
5298 SmallVector<SDValue, 8> LoadValues;
5299 SmallVector<SDValue, 8> LoadChains;
5300 SmallVector<SDValue, 8> OutChains;
5301 unsigned NumMemOps = MemOps.size();
5302 for (unsigned i = 0; i < NumMemOps; i++) {
5304 unsigned VTSize = VT.getSizeInBits() / 8;
5307 bool isDereferenceable =
5308 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5309 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5310 if (isDereferenceable)
5311 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5314 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5315 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags);
5316 LoadValues.push_back(Value);
5317 LoadChains.push_back(Value.getValue(1));
5320 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
5322 for (unsigned i = 0; i < NumMemOps; i++) {
5324 unsigned VTSize = VT.getSizeInBits() / 8;
5327 Store = DAG.getStore(Chain, dl, LoadValues[i],
5328 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5329 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
5330 OutChains.push_back(Store);
5334 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5337 /// \brief Lower the call to 'memset' intrinsic function into a series of store
5340 /// \param DAG Selection DAG where lowered code is placed.
5341 /// \param dl Link to corresponding IR location.
5342 /// \param Chain Control flow dependency.
5343 /// \param Dst Pointer to destination memory location.
5344 /// \param Src Value of byte to write into the memory.
5345 /// \param Size Number of bytes to write.
5346 /// \param Align Alignment of the destination in bytes.
5347 /// \param isVol True if destination is volatile.
5348 /// \param DstPtrInfo IR information on the memory pointer.
5349 /// \returns New head in the control flow, if lowering was successful, empty
5350 /// SDValue otherwise.
5352 /// The function tries to replace 'llvm.memset' intrinsic with several store
5353 /// operations and value calculation code. This is usually profitable for small
5355 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
5356 SDValue Chain, SDValue Dst, SDValue Src,
5357 uint64_t Size, unsigned Align, bool isVol,
5358 MachinePointerInfo DstPtrInfo) {
5359 // Turn a memset of undef to nop.
5363 // Expand memset to a series of load/store ops if the size operand
5364 // falls below a certain threshold.
5365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5366 std::vector<EVT> MemOps;
5367 bool DstAlignCanChange = false;
5368 MachineFunction &MF = DAG.getMachineFunction();
5369 MachineFrameInfo &MFI = MF.getFrameInfo();
5370 bool OptSize = shouldLowerMemFuncForSize(MF);
5371 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5372 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5373 DstAlignCanChange = true;
5375 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
5376 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
5377 Size, (DstAlignCanChange ? 0 : Align), 0,
5378 true, IsZeroVal, false, true,
5379 DstPtrInfo.getAddrSpace(), ~0u,
5383 if (DstAlignCanChange) {
5384 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
5385 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
5386 if (NewAlign > Align) {
5387 // Give the stack frame object a larger alignment if needed.
5388 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5389 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5394 SmallVector<SDValue, 8> OutChains;
5395 uint64_t DstOff = 0;
5396 unsigned NumMemOps = MemOps.size();
5398 // Find the largest store and generate the bit pattern for it.
5399 EVT LargestVT = MemOps[0];
5400 for (unsigned i = 1; i < NumMemOps; i++)
5401 if (MemOps[i].bitsGT(LargestVT))
5402 LargestVT = MemOps[i];
5403 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
5405 for (unsigned i = 0; i < NumMemOps; i++) {
5407 unsigned VTSize = VT.getSizeInBits() / 8;
5408 if (VTSize > Size) {
5409 // Issuing an unaligned load / store pair that overlaps with the previous
5410 // pair. Adjust the offset accordingly.
5411 assert(i == NumMemOps-1 && i != 0);
5412 DstOff -= VTSize - Size;
5415 // If this store is smaller than the largest store see whether we can get
5416 // the smaller value for free with a truncate.
5417 SDValue Value = MemSetValue;
5418 if (VT.bitsLT(LargestVT)) {
5419 if (!LargestVT.isVector() && !VT.isVector() &&
5420 TLI.isTruncateFree(LargestVT, VT))
5421 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
5423 Value = getMemsetValue(Src, VT, DAG, dl);
5425 assert(Value.getValueType() == VT && "Value with wrong type.");
5426 SDValue Store = DAG.getStore(
5427 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5428 DstPtrInfo.getWithOffset(DstOff), Align,
5429 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
5430 OutChains.push_back(Store);
5431 DstOff += VT.getSizeInBits() / 8;
5435 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5438 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
5440 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
5441 // pointer operands can be losslessly bitcasted to pointers of address space 0
5442 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
5443 report_fatal_error("cannot lower memory intrinsic in address space " +
5448 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
5449 SDValue Src, SDValue Size, unsigned Align,
5450 bool isVol, bool AlwaysInline, bool isTailCall,
5451 MachinePointerInfo DstPtrInfo,
5452 MachinePointerInfo SrcPtrInfo) {
5453 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5455 // Check to see if we should lower the memcpy to loads and stores first.
5456 // For cases within the target-specified limits, this is the best choice.
5457 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5459 // Memcpy with size zero? Just return the original chain.
5460 if (ConstantSize->isNullValue())
5463 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
5464 ConstantSize->getZExtValue(),Align,
5465 isVol, false, DstPtrInfo, SrcPtrInfo);
5466 if (Result.getNode())
5470 // Then check to see if we should lower the memcpy with target-specific
5471 // code. If the target chooses to do this, this is the next best.
5473 SDValue Result = TSI->EmitTargetCodeForMemcpy(
5474 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
5475 DstPtrInfo, SrcPtrInfo);
5476 if (Result.getNode())
5480 // If we really need inline code and the target declined to provide it,
5481 // use a (potentially long) sequence of loads and stores.
5483 assert(ConstantSize && "AlwaysInline requires a constant size!");
5484 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
5485 ConstantSize->getZExtValue(), Align, isVol,
5486 true, DstPtrInfo, SrcPtrInfo);
5489 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5490 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
5492 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
5493 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
5494 // respect volatile, so they may do things like read or write memory
5495 // beyond the given memory regions. But fixing this isn't easy, and most
5496 // people don't care.
5498 // Emit a library call.
5499 TargetLowering::ArgListTy Args;
5500 TargetLowering::ArgListEntry Entry;
5501 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5502 Entry.Node = Dst; Args.push_back(Entry);
5503 Entry.Node = Src; Args.push_back(Entry);
5504 Entry.Node = Size; Args.push_back(Entry);
5505 // FIXME: pass in SDLoc
5506 TargetLowering::CallLoweringInfo CLI(*this);
5509 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
5510 Dst.getValueType().getTypeForEVT(*getContext()),
5511 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
5512 TLI->getPointerTy(getDataLayout())),
5515 .setTailCall(isTailCall);
5517 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5518 return CallResult.second;
5521 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
5522 SDValue Src, SDValue Size, unsigned Align,
5523 bool isVol, bool isTailCall,
5524 MachinePointerInfo DstPtrInfo,
5525 MachinePointerInfo SrcPtrInfo) {
5526 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5528 // Check to see if we should lower the memmove to loads and stores first.
5529 // For cases within the target-specified limits, this is the best choice.
5530 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5532 // Memmove with size zero? Just return the original chain.
5533 if (ConstantSize->isNullValue())
5537 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
5538 ConstantSize->getZExtValue(), Align, isVol,
5539 false, DstPtrInfo, SrcPtrInfo);
5540 if (Result.getNode())
5544 // Then check to see if we should lower the memmove with target-specific
5545 // code. If the target chooses to do this, this is the next best.
5547 SDValue Result = TSI->EmitTargetCodeForMemmove(
5548 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
5549 if (Result.getNode())
5553 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5554 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
5556 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
5557 // not be safe. See memcpy above for more details.
5559 // Emit a library call.
5560 TargetLowering::ArgListTy Args;
5561 TargetLowering::ArgListEntry Entry;
5562 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
5563 Entry.Node = Dst; Args.push_back(Entry);
5564 Entry.Node = Src; Args.push_back(Entry);
5565 Entry.Node = Size; Args.push_back(Entry);
5566 // FIXME: pass in SDLoc
5567 TargetLowering::CallLoweringInfo CLI(*this);
5570 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
5571 Dst.getValueType().getTypeForEVT(*getContext()),
5572 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
5573 TLI->getPointerTy(getDataLayout())),
5576 .setTailCall(isTailCall);
5578 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5579 return CallResult.second;
5582 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
5583 SDValue Src, SDValue Size, unsigned Align,
5584 bool isVol, bool isTailCall,
5585 MachinePointerInfo DstPtrInfo) {
5586 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
5588 // Check to see if we should lower the memset to stores first.
5589 // For cases within the target-specified limits, this is the best choice.
5590 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
5592 // Memset with size zero? Just return the original chain.
5593 if (ConstantSize->isNullValue())
5597 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
5598 Align, isVol, DstPtrInfo);
5600 if (Result.getNode())
5604 // Then check to see if we should lower the memset with target-specific
5605 // code. If the target chooses to do this, this is the next best.
5607 SDValue Result = TSI->EmitTargetCodeForMemset(
5608 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
5609 if (Result.getNode())
5613 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
5615 // Emit a library call.
5616 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
5617 TargetLowering::ArgListTy Args;
5618 TargetLowering::ArgListEntry Entry;
5619 Entry.Node = Dst; Entry.Ty = IntPtrTy;
5620 Args.push_back(Entry);
5622 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
5623 Args.push_back(Entry);
5625 Entry.Ty = IntPtrTy;
5626 Args.push_back(Entry);
5628 // FIXME: pass in SDLoc
5629 TargetLowering::CallLoweringInfo CLI(*this);
5632 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
5633 Dst.getValueType().getTypeForEVT(*getContext()),
5634 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
5635 TLI->getPointerTy(getDataLayout())),
5638 .setTailCall(isTailCall);
5640 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
5641 return CallResult.second;
5644 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5645 SDVTList VTList, ArrayRef<SDValue> Ops,
5646 MachineMemOperand *MMO) {
5647 FoldingSetNodeID ID;
5648 ID.AddInteger(MemVT.getRawBits());
5649 AddNodeIDNode(ID, Opcode, VTList, Ops);
5650 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5652 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5653 cast<AtomicSDNode>(E)->refineAlignment(MMO);
5654 return SDValue(E, 0);
5657 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5658 VTList, MemVT, MMO);
5659 createOperands(N, Ops);
5661 CSEMap.InsertNode(N, IP);
5663 return SDValue(N, 0);
5666 SDValue SelectionDAG::getAtomicCmpSwap(
5667 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
5668 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
5669 unsigned Alignment, AtomicOrdering SuccessOrdering,
5670 AtomicOrdering FailureOrdering, SyncScope::ID SSID) {
5671 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5672 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
5673 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5675 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5676 Alignment = getEVTAlignment(MemVT);
5678 MachineFunction &MF = getMachineFunction();
5680 // FIXME: Volatile isn't really correct; we should keep track of atomic
5681 // orderings in the memoperand.
5682 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad |
5683 MachineMemOperand::MOStore;
5684 MachineMemOperand *MMO =
5685 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
5686 AAMDNodes(), nullptr, SSID, SuccessOrdering,
5689 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
5692 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
5693 EVT MemVT, SDVTList VTs, SDValue Chain,
5694 SDValue Ptr, SDValue Cmp, SDValue Swp,
5695 MachineMemOperand *MMO) {
5696 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
5697 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
5698 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
5700 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
5701 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5704 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5705 SDValue Chain, SDValue Ptr, SDValue Val,
5706 const Value *PtrVal, unsigned Alignment,
5707 AtomicOrdering Ordering,
5708 SyncScope::ID SSID) {
5709 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5710 Alignment = getEVTAlignment(MemVT);
5712 MachineFunction &MF = getMachineFunction();
5713 // An atomic store does not load. An atomic load does not store.
5714 // (An atomicrmw obviously both loads and stores.)
5715 // For now, atomics are considered to be volatile always, and they are
5717 // FIXME: Volatile isn't really correct; we should keep track of atomic
5718 // orderings in the memoperand.
5719 auto Flags = MachineMemOperand::MOVolatile;
5720 if (Opcode != ISD::ATOMIC_STORE)
5721 Flags |= MachineMemOperand::MOLoad;
5722 if (Opcode != ISD::ATOMIC_LOAD)
5723 Flags |= MachineMemOperand::MOStore;
5725 MachineMemOperand *MMO =
5726 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
5727 MemVT.getStoreSize(), Alignment, AAMDNodes(),
5728 nullptr, SSID, Ordering);
5730 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
5733 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5734 SDValue Chain, SDValue Ptr, SDValue Val,
5735 MachineMemOperand *MMO) {
5736 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
5737 Opcode == ISD::ATOMIC_LOAD_SUB ||
5738 Opcode == ISD::ATOMIC_LOAD_AND ||
5739 Opcode == ISD::ATOMIC_LOAD_OR ||
5740 Opcode == ISD::ATOMIC_LOAD_XOR ||
5741 Opcode == ISD::ATOMIC_LOAD_NAND ||
5742 Opcode == ISD::ATOMIC_LOAD_MIN ||
5743 Opcode == ISD::ATOMIC_LOAD_MAX ||
5744 Opcode == ISD::ATOMIC_LOAD_UMIN ||
5745 Opcode == ISD::ATOMIC_LOAD_UMAX ||
5746 Opcode == ISD::ATOMIC_SWAP ||
5747 Opcode == ISD::ATOMIC_STORE) &&
5748 "Invalid Atomic Op");
5750 EVT VT = Val.getValueType();
5752 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
5753 getVTList(VT, MVT::Other);
5754 SDValue Ops[] = {Chain, Ptr, Val};
5755 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5758 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
5759 EVT VT, SDValue Chain, SDValue Ptr,
5760 MachineMemOperand *MMO) {
5761 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
5763 SDVTList VTs = getVTList(VT, MVT::Other);
5764 SDValue Ops[] = {Chain, Ptr};
5765 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
5768 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
5769 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
5770 if (Ops.size() == 1)
5773 SmallVector<EVT, 4> VTs;
5774 VTs.reserve(Ops.size());
5775 for (unsigned i = 0; i < Ops.size(); ++i)
5776 VTs.push_back(Ops[i].getValueType());
5777 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
5780 SDValue SelectionDAG::getMemIntrinsicNode(
5781 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
5782 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
5783 MachineMemOperand::Flags Flags, unsigned Size) {
5784 if (Align == 0) // Ensure that codegen never sees alignment 0
5785 Align = getEVTAlignment(MemVT);
5788 Size = MemVT.getStoreSize();
5790 MachineFunction &MF = getMachineFunction();
5791 MachineMemOperand *MMO =
5792 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
5794 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
5797 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
5799 ArrayRef<SDValue> Ops, EVT MemVT,
5800 MachineMemOperand *MMO) {
5801 assert((Opcode == ISD::INTRINSIC_VOID ||
5802 Opcode == ISD::INTRINSIC_W_CHAIN ||
5803 Opcode == ISD::PREFETCH ||
5804 Opcode == ISD::LIFETIME_START ||
5805 Opcode == ISD::LIFETIME_END ||
5806 ((int)Opcode <= std::numeric_limits<int>::max() &&
5807 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
5808 "Opcode is not a memory-accessing opcode!");
5810 // Memoize the node unless it returns a flag.
5811 MemIntrinsicSDNode *N;
5812 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5813 FoldingSetNodeID ID;
5814 AddNodeIDNode(ID, Opcode, VTList, Ops);
5815 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
5816 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
5817 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5819 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5820 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
5821 return SDValue(E, 0);
5824 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5825 VTList, MemVT, MMO);
5826 createOperands(N, Ops);
5828 CSEMap.InsertNode(N, IP);
5830 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
5831 VTList, MemVT, MMO);
5832 createOperands(N, Ops);
5835 return SDValue(N, 0);
5838 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5839 /// MachinePointerInfo record from it. This is particularly useful because the
5840 /// code generator has many cases where it doesn't bother passing in a
5841 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5842 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
5843 SelectionDAG &DAG, SDValue Ptr,
5844 int64_t Offset = 0) {
5845 // If this is FI+Offset, we can model it.
5846 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
5847 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
5848 FI->getIndex(), Offset);
5850 // If this is (FI+Offset1)+Offset2, we can model it.
5851 if (Ptr.getOpcode() != ISD::ADD ||
5852 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
5853 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
5856 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5857 return MachinePointerInfo::getFixedStack(
5858 DAG.getMachineFunction(), FI,
5859 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
5862 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
5863 /// MachinePointerInfo record from it. This is particularly useful because the
5864 /// code generator has many cases where it doesn't bother passing in a
5865 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
5866 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
5867 SelectionDAG &DAG, SDValue Ptr,
5869 // If the 'Offset' value isn't a constant, we can't handle this.
5870 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
5871 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
5872 if (OffsetOp.isUndef())
5873 return InferPointerInfo(Info, DAG, Ptr);
5877 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5878 EVT VT, const SDLoc &dl, SDValue Chain,
5879 SDValue Ptr, SDValue Offset,
5880 MachinePointerInfo PtrInfo, EVT MemVT,
5882 MachineMemOperand::Flags MMOFlags,
5883 const AAMDNodes &AAInfo, const MDNode *Ranges) {
5884 assert(Chain.getValueType() == MVT::Other &&
5885 "Invalid chain type");
5886 if (Alignment == 0) // Ensure that codegen never sees alignment 0
5887 Alignment = getEVTAlignment(MemVT);
5889 MMOFlags |= MachineMemOperand::MOLoad;
5890 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
5891 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
5893 if (PtrInfo.V.isNull())
5894 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
5896 MachineFunction &MF = getMachineFunction();
5897 MachineMemOperand *MMO = MF.getMachineMemOperand(
5898 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
5899 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
5902 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
5903 EVT VT, const SDLoc &dl, SDValue Chain,
5904 SDValue Ptr, SDValue Offset, EVT MemVT,
5905 MachineMemOperand *MMO) {
5907 ExtType = ISD::NON_EXTLOAD;
5908 } else if (ExtType == ISD::NON_EXTLOAD) {
5909 assert(VT == MemVT && "Non-extending load from different memory type!");
5912 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
5913 "Should only be an extending load, not truncating!");
5914 assert(VT.isInteger() == MemVT.isInteger() &&
5915 "Cannot convert from FP to Int or Int -> FP!");
5916 assert(VT.isVector() == MemVT.isVector() &&
5917 "Cannot use an ext load to convert to or from a vector!");
5918 assert((!VT.isVector() ||
5919 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
5920 "Cannot use an ext load to change the number of vector elements!");
5923 bool Indexed = AM != ISD::UNINDEXED;
5924 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
5926 SDVTList VTs = Indexed ?
5927 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
5928 SDValue Ops[] = { Chain, Ptr, Offset };
5929 FoldingSetNodeID ID;
5930 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
5931 ID.AddInteger(MemVT.getRawBits());
5932 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
5933 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
5934 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
5936 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
5937 cast<LoadSDNode>(E)->refineAlignment(MMO);
5938 return SDValue(E, 0);
5940 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
5941 ExtType, MemVT, MMO);
5942 createOperands(N, Ops);
5944 CSEMap.InsertNode(N, IP);
5947 NewSDValueDbgMsg(V, "Creating new node: ", this);
5951 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5952 SDValue Ptr, MachinePointerInfo PtrInfo,
5954 MachineMemOperand::Flags MMOFlags,
5955 const AAMDNodes &AAInfo, const MDNode *Ranges) {
5956 SDValue Undef = getUNDEF(Ptr.getValueType());
5957 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5958 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
5961 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
5962 SDValue Ptr, MachineMemOperand *MMO) {
5963 SDValue Undef = getUNDEF(Ptr.getValueType());
5964 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
5968 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
5969 EVT VT, SDValue Chain, SDValue Ptr,
5970 MachinePointerInfo PtrInfo, EVT MemVT,
5972 MachineMemOperand::Flags MMOFlags,
5973 const AAMDNodes &AAInfo) {
5974 SDValue Undef = getUNDEF(Ptr.getValueType());
5975 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
5976 MemVT, Alignment, MMOFlags, AAInfo);
5979 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
5980 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
5981 MachineMemOperand *MMO) {
5982 SDValue Undef = getUNDEF(Ptr.getValueType());
5983 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
5987 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
5988 SDValue Base, SDValue Offset,
5989 ISD::MemIndexedMode AM) {
5990 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
5991 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
5992 // Don't propagate the invariant or dereferenceable flags.
5994 LD->getMemOperand()->getFlags() &
5995 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
5996 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
5997 LD->getChain(), Base, Offset, LD->getPointerInfo(),
5998 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
6002 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6003 SDValue Ptr, MachinePointerInfo PtrInfo,
6005 MachineMemOperand::Flags MMOFlags,
6006 const AAMDNodes &AAInfo) {
6007 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
6008 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6009 Alignment = getEVTAlignment(Val.getValueType());
6011 MMOFlags |= MachineMemOperand::MOStore;
6012 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6014 if (PtrInfo.V.isNull())
6015 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6017 MachineFunction &MF = getMachineFunction();
6018 MachineMemOperand *MMO = MF.getMachineMemOperand(
6019 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
6020 return getStore(Chain, dl, Val, Ptr, MMO);
6023 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6024 SDValue Ptr, MachineMemOperand *MMO) {
6025 assert(Chain.getValueType() == MVT::Other &&
6026 "Invalid chain type");
6027 EVT VT = Val.getValueType();
6028 SDVTList VTs = getVTList(MVT::Other);
6029 SDValue Undef = getUNDEF(Ptr.getValueType());
6030 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6031 FoldingSetNodeID ID;
6032 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6033 ID.AddInteger(VT.getRawBits());
6034 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6035 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
6036 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6038 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6039 cast<StoreSDNode>(E)->refineAlignment(MMO);
6040 return SDValue(E, 0);
6042 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6043 ISD::UNINDEXED, false, VT, MMO);
6044 createOperands(N, Ops);
6046 CSEMap.InsertNode(N, IP);
6049 NewSDValueDbgMsg(V, "Creating new node: ", this);
6053 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6054 SDValue Ptr, MachinePointerInfo PtrInfo,
6055 EVT SVT, unsigned Alignment,
6056 MachineMemOperand::Flags MMOFlags,
6057 const AAMDNodes &AAInfo) {
6058 assert(Chain.getValueType() == MVT::Other &&
6059 "Invalid chain type");
6060 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6061 Alignment = getEVTAlignment(SVT);
6063 MMOFlags |= MachineMemOperand::MOStore;
6064 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6066 if (PtrInfo.V.isNull())
6067 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6069 MachineFunction &MF = getMachineFunction();
6070 MachineMemOperand *MMO = MF.getMachineMemOperand(
6071 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
6072 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
6075 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6076 SDValue Ptr, EVT SVT,
6077 MachineMemOperand *MMO) {
6078 EVT VT = Val.getValueType();
6080 assert(Chain.getValueType() == MVT::Other &&
6081 "Invalid chain type");
6083 return getStore(Chain, dl, Val, Ptr, MMO);
6085 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
6086 "Should only be a truncating store, not extending!");
6087 assert(VT.isInteger() == SVT.isInteger() &&
6088 "Can't do FP-INT conversion!");
6089 assert(VT.isVector() == SVT.isVector() &&
6090 "Cannot use trunc store to convert to or from a vector!");
6091 assert((!VT.isVector() ||
6092 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
6093 "Cannot use trunc store to change the number of vector elements!");
6095 SDVTList VTs = getVTList(MVT::Other);
6096 SDValue Undef = getUNDEF(Ptr.getValueType());
6097 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6098 FoldingSetNodeID ID;
6099 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6100 ID.AddInteger(SVT.getRawBits());
6101 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6102 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
6103 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6105 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6106 cast<StoreSDNode>(E)->refineAlignment(MMO);
6107 return SDValue(E, 0);
6109 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6110 ISD::UNINDEXED, true, SVT, MMO);
6111 createOperands(N, Ops);
6113 CSEMap.InsertNode(N, IP);
6116 NewSDValueDbgMsg(V, "Creating new node: ", this);
6120 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
6121 SDValue Base, SDValue Offset,
6122 ISD::MemIndexedMode AM) {
6123 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
6124 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
6125 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
6126 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
6127 FoldingSetNodeID ID;
6128 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6129 ID.AddInteger(ST->getMemoryVT().getRawBits());
6130 ID.AddInteger(ST->getRawSubclassData());
6131 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
6133 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6134 return SDValue(E, 0);
6136 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6137 ST->isTruncatingStore(), ST->getMemoryVT(),
6138 ST->getMemOperand());
6139 createOperands(N, Ops);
6141 CSEMap.InsertNode(N, IP);
6144 NewSDValueDbgMsg(V, "Creating new node: ", this);
6148 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6149 SDValue Ptr, SDValue Mask, SDValue Src0,
6150 EVT MemVT, MachineMemOperand *MMO,
6151 ISD::LoadExtType ExtTy, bool isExpanding) {
6152 SDVTList VTs = getVTList(VT, MVT::Other);
6153 SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
6154 FoldingSetNodeID ID;
6155 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
6156 ID.AddInteger(VT.getRawBits());
6157 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
6158 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
6159 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6161 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6162 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
6163 return SDValue(E, 0);
6165 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6166 ExtTy, isExpanding, MemVT, MMO);
6167 createOperands(N, Ops);
6169 CSEMap.InsertNode(N, IP);
6172 NewSDValueDbgMsg(V, "Creating new node: ", this);
6176 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
6177 SDValue Val, SDValue Ptr, SDValue Mask,
6178 EVT MemVT, MachineMemOperand *MMO,
6179 bool IsTruncating, bool IsCompressing) {
6180 assert(Chain.getValueType() == MVT::Other &&
6181 "Invalid chain type");
6182 EVT VT = Val.getValueType();
6183 SDVTList VTs = getVTList(MVT::Other);
6184 SDValue Ops[] = { Chain, Ptr, Mask, Val };
6185 FoldingSetNodeID ID;
6186 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
6187 ID.AddInteger(VT.getRawBits());
6188 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
6189 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
6190 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6192 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6193 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
6194 return SDValue(E, 0);
6196 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6197 IsTruncating, IsCompressing, MemVT, MMO);
6198 createOperands(N, Ops);
6200 CSEMap.InsertNode(N, IP);
6203 NewSDValueDbgMsg(V, "Creating new node: ", this);
6207 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
6208 ArrayRef<SDValue> Ops,
6209 MachineMemOperand *MMO) {
6210 assert(Ops.size() == 5 && "Incompatible number of operands");
6212 FoldingSetNodeID ID;
6213 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
6214 ID.AddInteger(VT.getRawBits());
6215 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
6216 dl.getIROrder(), VTs, VT, MMO));
6217 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6219 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6220 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
6221 return SDValue(E, 0);
6224 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
6226 createOperands(N, Ops);
6228 assert(N->getValue().getValueType() == N->getValueType(0) &&
6229 "Incompatible type of the PassThru value in MaskedGatherSDNode");
6230 assert(N->getMask().getValueType().getVectorNumElements() ==
6231 N->getValueType(0).getVectorNumElements() &&
6232 "Vector width mismatch between mask and data");
6233 assert(N->getIndex().getValueType().getVectorNumElements() ==
6234 N->getValueType(0).getVectorNumElements() &&
6235 "Vector width mismatch between index and data");
6237 CSEMap.InsertNode(N, IP);
6240 NewSDValueDbgMsg(V, "Creating new node: ", this);
6244 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
6245 ArrayRef<SDValue> Ops,
6246 MachineMemOperand *MMO) {
6247 assert(Ops.size() == 5 && "Incompatible number of operands");
6249 FoldingSetNodeID ID;
6250 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
6251 ID.AddInteger(VT.getRawBits());
6252 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
6253 dl.getIROrder(), VTs, VT, MMO));
6254 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6256 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6257 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
6258 return SDValue(E, 0);
6260 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
6262 createOperands(N, Ops);
6264 assert(N->getMask().getValueType().getVectorNumElements() ==
6265 N->getValue().getValueType().getVectorNumElements() &&
6266 "Vector width mismatch between mask and data");
6267 assert(N->getIndex().getValueType().getVectorNumElements() ==
6268 N->getValue().getValueType().getVectorNumElements() &&
6269 "Vector width mismatch between index and data");
6271 CSEMap.InsertNode(N, IP);
6274 NewSDValueDbgMsg(V, "Creating new node: ", this);
6278 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
6279 SDValue Ptr, SDValue SV, unsigned Align) {
6280 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
6281 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
6284 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6285 ArrayRef<SDUse> Ops) {
6286 switch (Ops.size()) {
6287 case 0: return getNode(Opcode, DL, VT);
6288 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
6289 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
6290 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
6294 // Copy from an SDUse array into an SDValue array for use with
6295 // the regular getNode logic.
6296 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
6297 return getNode(Opcode, DL, VT, NewOps);
6300 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6301 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
6302 unsigned NumOps = Ops.size();
6304 case 0: return getNode(Opcode, DL, VT);
6305 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
6306 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
6307 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
6313 case ISD::CONCAT_VECTORS:
6314 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
6315 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
6318 case ISD::SELECT_CC:
6319 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
6320 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
6321 "LHS and RHS of condition must have same type!");
6322 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
6323 "True and False arms of SelectCC must have same type!");
6324 assert(Ops[2].getValueType() == VT &&
6325 "select_cc node must be of same type as true and false value!");
6328 assert(NumOps == 5 && "BR_CC takes 5 operands!");
6329 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
6330 "LHS/RHS of comparison should match types!");
6336 SDVTList VTs = getVTList(VT);
6338 if (VT != MVT::Glue) {
6339 FoldingSetNodeID ID;
6340 AddNodeIDNode(ID, Opcode, VTs, Ops);
6343 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6344 return SDValue(E, 0);
6346 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6347 createOperands(N, Ops);
6349 CSEMap.InsertNode(N, IP);
6351 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6352 createOperands(N, Ops);
6357 NewSDValueDbgMsg(V, "Creating new node: ", this);
6361 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
6362 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
6363 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
6366 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6367 ArrayRef<SDValue> Ops) {
6368 if (VTList.NumVTs == 1)
6369 return getNode(Opcode, DL, VTList.VTs[0], Ops);
6373 // FIXME: figure out how to safely handle things like
6374 // int foo(int x) { return 1 << (x & 255); }
6375 // int bar() { return foo(256); }
6376 case ISD::SRA_PARTS:
6377 case ISD::SRL_PARTS:
6378 case ISD::SHL_PARTS:
6379 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
6380 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
6381 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
6382 else if (N3.getOpcode() == ISD::AND)
6383 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
6384 // If the and is only masking out bits that cannot effect the shift,
6385 // eliminate the and.
6386 unsigned NumBits = VT.getScalarSizeInBits()*2;
6387 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
6388 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
6394 // Memoize the node unless it returns a flag.
6396 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6397 FoldingSetNodeID ID;
6398 AddNodeIDNode(ID, Opcode, VTList, Ops);
6400 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
6401 return SDValue(E, 0);
6403 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
6404 createOperands(N, Ops);
6405 CSEMap.InsertNode(N, IP);
6407 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
6408 createOperands(N, Ops);
6412 NewSDValueDbgMsg(V, "Creating new node: ", this);
6416 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
6418 return getNode(Opcode, DL, VTList, None);
6421 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6423 SDValue Ops[] = { N1 };
6424 return getNode(Opcode, DL, VTList, Ops);
6427 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6428 SDValue N1, SDValue N2) {
6429 SDValue Ops[] = { N1, N2 };
6430 return getNode(Opcode, DL, VTList, Ops);
6433 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6434 SDValue N1, SDValue N2, SDValue N3) {
6435 SDValue Ops[] = { N1, N2, N3 };
6436 return getNode(Opcode, DL, VTList, Ops);
6439 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6440 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
6441 SDValue Ops[] = { N1, N2, N3, N4 };
6442 return getNode(Opcode, DL, VTList, Ops);
6445 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
6446 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
6448 SDValue Ops[] = { N1, N2, N3, N4, N5 };
6449 return getNode(Opcode, DL, VTList, Ops);
6452 SDVTList SelectionDAG::getVTList(EVT VT) {
6453 return makeVTList(SDNode::getValueTypeList(VT), 1);
6456 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
6457 FoldingSetNodeID ID;
6459 ID.AddInteger(VT1.getRawBits());
6460 ID.AddInteger(VT2.getRawBits());
6463 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6465 EVT *Array = Allocator.Allocate<EVT>(2);
6468 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
6469 VTListMap.InsertNode(Result, IP);
6471 return Result->getSDVTList();
6474 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
6475 FoldingSetNodeID ID;
6477 ID.AddInteger(VT1.getRawBits());
6478 ID.AddInteger(VT2.getRawBits());
6479 ID.AddInteger(VT3.getRawBits());
6482 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6484 EVT *Array = Allocator.Allocate<EVT>(3);
6488 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
6489 VTListMap.InsertNode(Result, IP);
6491 return Result->getSDVTList();
6494 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
6495 FoldingSetNodeID ID;
6497 ID.AddInteger(VT1.getRawBits());
6498 ID.AddInteger(VT2.getRawBits());
6499 ID.AddInteger(VT3.getRawBits());
6500 ID.AddInteger(VT4.getRawBits());
6503 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6505 EVT *Array = Allocator.Allocate<EVT>(4);
6510 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
6511 VTListMap.InsertNode(Result, IP);
6513 return Result->getSDVTList();
6516 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
6517 unsigned NumVTs = VTs.size();
6518 FoldingSetNodeID ID;
6519 ID.AddInteger(NumVTs);
6520 for (unsigned index = 0; index < NumVTs; index++) {
6521 ID.AddInteger(VTs[index].getRawBits());
6525 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
6527 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
6528 std::copy(VTs.begin(), VTs.end(), Array);
6529 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
6530 VTListMap.InsertNode(Result, IP);
6532 return Result->getSDVTList();
6536 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
6537 /// specified operands. If the resultant node already exists in the DAG,
6538 /// this does not modify the specified node, instead it returns the node that
6539 /// already exists. If the resultant node does not exist in the DAG, the
6540 /// input node is returned. As a degenerate case, if you specify the same
6541 /// input operands as the node already has, the input node is returned.
6542 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
6543 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
6545 // Check to see if there is no change.
6546 if (Op == N->getOperand(0)) return N;
6548 // See if the modified node already exists.
6549 void *InsertPos = nullptr;
6550 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
6553 // Nope it doesn't. Remove the node from its current place in the maps.
6555 if (!RemoveNodeFromCSEMaps(N))
6556 InsertPos = nullptr;
6558 // Now we update the operands.
6559 N->OperandList[0].set(Op);
6561 // If this gets put into a CSE map, add it.
6562 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6566 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
6567 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
6569 // Check to see if there is no change.
6570 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
6571 return N; // No operands changed, just return the input node.
6573 // See if the modified node already exists.
6574 void *InsertPos = nullptr;
6575 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
6578 // Nope it doesn't. Remove the node from its current place in the maps.
6580 if (!RemoveNodeFromCSEMaps(N))
6581 InsertPos = nullptr;
6583 // Now we update the operands.
6584 if (N->OperandList[0] != Op1)
6585 N->OperandList[0].set(Op1);
6586 if (N->OperandList[1] != Op2)
6587 N->OperandList[1].set(Op2);
6589 // If this gets put into a CSE map, add it.
6590 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6594 SDNode *SelectionDAG::
6595 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
6596 SDValue Ops[] = { Op1, Op2, Op3 };
6597 return UpdateNodeOperands(N, Ops);
6600 SDNode *SelectionDAG::
6601 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
6602 SDValue Op3, SDValue Op4) {
6603 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
6604 return UpdateNodeOperands(N, Ops);
6607 SDNode *SelectionDAG::
6608 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
6609 SDValue Op3, SDValue Op4, SDValue Op5) {
6610 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
6611 return UpdateNodeOperands(N, Ops);
6614 SDNode *SelectionDAG::
6615 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
6616 unsigned NumOps = Ops.size();
6617 assert(N->getNumOperands() == NumOps &&
6618 "Update with wrong number of operands");
6620 // If no operands changed just return the input node.
6621 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
6624 // See if the modified node already exists.
6625 void *InsertPos = nullptr;
6626 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
6629 // Nope it doesn't. Remove the node from its current place in the maps.
6631 if (!RemoveNodeFromCSEMaps(N))
6632 InsertPos = nullptr;
6634 // Now we update the operands.
6635 for (unsigned i = 0; i != NumOps; ++i)
6636 if (N->OperandList[i] != Ops[i])
6637 N->OperandList[i].set(Ops[i]);
6639 // If this gets put into a CSE map, add it.
6640 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
6644 /// DropOperands - Release the operands and set this node to have
6646 void SDNode::DropOperands() {
6647 // Unlike the code in MorphNodeTo that does this, we don't need to
6648 // watch for dead nodes here.
6649 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
6655 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
6658 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6660 SDVTList VTs = getVTList(VT);
6661 return SelectNodeTo(N, MachineOpc, VTs, None);
6664 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6665 EVT VT, SDValue Op1) {
6666 SDVTList VTs = getVTList(VT);
6667 SDValue Ops[] = { Op1 };
6668 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6671 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6672 EVT VT, SDValue Op1,
6674 SDVTList VTs = getVTList(VT);
6675 SDValue Ops[] = { Op1, Op2 };
6676 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6679 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6680 EVT VT, SDValue Op1,
6681 SDValue Op2, SDValue Op3) {
6682 SDVTList VTs = getVTList(VT);
6683 SDValue Ops[] = { Op1, Op2, Op3 };
6684 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6687 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6688 EVT VT, ArrayRef<SDValue> Ops) {
6689 SDVTList VTs = getVTList(VT);
6690 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6693 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6694 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
6695 SDVTList VTs = getVTList(VT1, VT2);
6696 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6699 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6701 SDVTList VTs = getVTList(VT1, VT2);
6702 return SelectNodeTo(N, MachineOpc, VTs, None);
6705 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6706 EVT VT1, EVT VT2, EVT VT3,
6707 ArrayRef<SDValue> Ops) {
6708 SDVTList VTs = getVTList(VT1, VT2, VT3);
6709 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6712 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6714 SDValue Op1, SDValue Op2) {
6715 SDVTList VTs = getVTList(VT1, VT2);
6716 SDValue Ops[] = { Op1, Op2 };
6717 return SelectNodeTo(N, MachineOpc, VTs, Ops);
6720 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
6721 SDVTList VTs,ArrayRef<SDValue> Ops) {
6722 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
6723 // Reset the NodeID to -1.
6726 ReplaceAllUsesWith(N, New);
6732 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
6733 /// the line number information on the merged node since it is not possible to
6734 /// preserve the information that operation is associated with multiple lines.
6735 /// This will make the debugger working better at -O0, were there is a higher
6736 /// probability having other instructions associated with that line.
6738 /// For IROrder, we keep the smaller of the two
6739 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
6740 DebugLoc NLoc = N->getDebugLoc();
6741 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
6742 N->setDebugLoc(DebugLoc());
6744 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
6745 N->setIROrder(Order);
6749 /// MorphNodeTo - This *mutates* the specified node to have the specified
6750 /// return type, opcode, and operands.
6752 /// Note that MorphNodeTo returns the resultant node. If there is already a
6753 /// node of the specified opcode and operands, it returns that node instead of
6754 /// the current one. Note that the SDLoc need not be the same.
6756 /// Using MorphNodeTo is faster than creating a new node and swapping it in
6757 /// with ReplaceAllUsesWith both because it often avoids allocating a new
6758 /// node, and because it doesn't require CSE recalculation for any of
6759 /// the node's users.
6761 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
6762 /// As a consequence it isn't appropriate to use from within the DAG combiner or
6763 /// the legalizer which maintain worklists that would need to be updated when
6764 /// deleting things.
6765 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
6766 SDVTList VTs, ArrayRef<SDValue> Ops) {
6767 // If an identical node already exists, use it.
6769 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
6770 FoldingSetNodeID ID;
6771 AddNodeIDNode(ID, Opc, VTs, Ops);
6772 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
6773 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
6776 if (!RemoveNodeFromCSEMaps(N))
6779 // Start the morphing.
6781 N->ValueList = VTs.VTs;
6782 N->NumValues = VTs.NumVTs;
6784 // Clear the operands list, updating used nodes to remove this from their
6785 // use list. Keep track of any operands that become dead as a result.
6786 SmallPtrSet<SDNode*, 16> DeadNodeSet;
6787 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
6789 SDNode *Used = Use.getNode();
6791 if (Used->use_empty())
6792 DeadNodeSet.insert(Used);
6795 // For MachineNode, initialize the memory references information.
6796 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
6797 MN->setMemRefs(nullptr, nullptr);
6799 // Swap for an appropriately sized array from the recycler.
6801 createOperands(N, Ops);
6803 // Delete any nodes that are still dead after adding the uses for the
6805 if (!DeadNodeSet.empty()) {
6806 SmallVector<SDNode *, 16> DeadNodes;
6807 for (SDNode *N : DeadNodeSet)
6809 DeadNodes.push_back(N);
6810 RemoveDeadNodes(DeadNodes);
6814 CSEMap.InsertNode(N, IP); // Memoize the new node.
6818 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
6819 unsigned OrigOpc = Node->getOpcode();
6821 bool IsUnary = false;
6822 bool IsTernary = false;
6825 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
6826 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break;
6827 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break;
6828 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break;
6829 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break;
6830 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break;
6831 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break;
6832 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break;
6833 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break;
6834 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break;
6835 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break;
6836 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break;
6837 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break;
6838 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break;
6839 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break;
6840 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break;
6841 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break;
6842 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break;
6843 case ISD::STRICT_FNEARBYINT:
6844 NewOpc = ISD::FNEARBYINT;
6849 // We're taking this node out of the chain, so we need to re-link things.
6850 SDValue InputChain = Node->getOperand(0);
6851 SDValue OutputChain = SDValue(Node, 1);
6852 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
6854 SDVTList VTs = getVTList(Node->getOperand(1).getValueType());
6855 SDNode *Res = nullptr;
6857 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) });
6859 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1),
6860 Node->getOperand(2),
6861 Node->getOperand(3)});
6863 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1),
6864 Node->getOperand(2) });
6866 // MorphNodeTo can operate in two ways: if an existing node with the
6867 // specified operands exists, it can just return it. Otherwise, it
6868 // updates the node in place to have the requested operands.
6870 // If we updated the node in place, reset the node ID. To the isel,
6871 // this should be just like a newly allocated machine node.
6874 ReplaceAllUsesWith(Node, Res);
6875 RemoveDeadNode(Node);
6881 /// getMachineNode - These are used for target selectors to create a new node
6882 /// with specified return type(s), MachineInstr opcode, and operands.
6884 /// Note that getMachineNode returns the resultant node. If there is already a
6885 /// node of the specified opcode and operands, it returns that node instead of
6886 /// the current one.
6887 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6889 SDVTList VTs = getVTList(VT);
6890 return getMachineNode(Opcode, dl, VTs, None);
6893 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6894 EVT VT, SDValue Op1) {
6895 SDVTList VTs = getVTList(VT);
6896 SDValue Ops[] = { Op1 };
6897 return getMachineNode(Opcode, dl, VTs, Ops);
6900 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6901 EVT VT, SDValue Op1, SDValue Op2) {
6902 SDVTList VTs = getVTList(VT);
6903 SDValue Ops[] = { Op1, Op2 };
6904 return getMachineNode(Opcode, dl, VTs, Ops);
6907 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6908 EVT VT, SDValue Op1, SDValue Op2,
6910 SDVTList VTs = getVTList(VT);
6911 SDValue Ops[] = { Op1, Op2, Op3 };
6912 return getMachineNode(Opcode, dl, VTs, Ops);
6915 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6916 EVT VT, ArrayRef<SDValue> Ops) {
6917 SDVTList VTs = getVTList(VT);
6918 return getMachineNode(Opcode, dl, VTs, Ops);
6921 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6922 EVT VT1, EVT VT2, SDValue Op1,
6924 SDVTList VTs = getVTList(VT1, VT2);
6925 SDValue Ops[] = { Op1, Op2 };
6926 return getMachineNode(Opcode, dl, VTs, Ops);
6929 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6930 EVT VT1, EVT VT2, SDValue Op1,
6931 SDValue Op2, SDValue Op3) {
6932 SDVTList VTs = getVTList(VT1, VT2);
6933 SDValue Ops[] = { Op1, Op2, Op3 };
6934 return getMachineNode(Opcode, dl, VTs, Ops);
6937 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6939 ArrayRef<SDValue> Ops) {
6940 SDVTList VTs = getVTList(VT1, VT2);
6941 return getMachineNode(Opcode, dl, VTs, Ops);
6944 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6945 EVT VT1, EVT VT2, EVT VT3,
6946 SDValue Op1, SDValue Op2) {
6947 SDVTList VTs = getVTList(VT1, VT2, VT3);
6948 SDValue Ops[] = { Op1, Op2 };
6949 return getMachineNode(Opcode, dl, VTs, Ops);
6952 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6953 EVT VT1, EVT VT2, EVT VT3,
6954 SDValue Op1, SDValue Op2,
6956 SDVTList VTs = getVTList(VT1, VT2, VT3);
6957 SDValue Ops[] = { Op1, Op2, Op3 };
6958 return getMachineNode(Opcode, dl, VTs, Ops);
6961 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6962 EVT VT1, EVT VT2, EVT VT3,
6963 ArrayRef<SDValue> Ops) {
6964 SDVTList VTs = getVTList(VT1, VT2, VT3);
6965 return getMachineNode(Opcode, dl, VTs, Ops);
6968 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
6969 ArrayRef<EVT> ResultTys,
6970 ArrayRef<SDValue> Ops) {
6971 SDVTList VTs = getVTList(ResultTys);
6972 return getMachineNode(Opcode, dl, VTs, Ops);
6975 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
6977 ArrayRef<SDValue> Ops) {
6978 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
6983 FoldingSetNodeID ID;
6984 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
6986 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6987 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
6991 // Allocate a new MachineSDNode.
6992 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6993 createOperands(N, Ops);
6996 CSEMap.InsertNode(N, IP);
7002 /// getTargetExtractSubreg - A convenience function for creating
7003 /// TargetOpcode::EXTRACT_SUBREG nodes.
7004 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7006 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7007 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
7008 VT, Operand, SRIdxVal);
7009 return SDValue(Subreg, 0);
7012 /// getTargetInsertSubreg - A convenience function for creating
7013 /// TargetOpcode::INSERT_SUBREG nodes.
7014 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7015 SDValue Operand, SDValue Subreg) {
7016 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7017 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
7018 VT, Operand, Subreg, SRIdxVal);
7019 return SDValue(Result, 0);
7022 /// getNodeIfExists - Get the specified node if it's already available, or
7023 /// else return NULL.
7024 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
7025 ArrayRef<SDValue> Ops,
7026 const SDNodeFlags Flags) {
7027 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
7028 FoldingSetNodeID ID;
7029 AddNodeIDNode(ID, Opcode, VTList, Ops);
7031 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
7032 E->intersectFlagsWith(Flags);
7039 /// getDbgValue - Creates a SDDbgValue node.
7042 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
7043 SDNode *N, unsigned R, bool IsIndirect,
7044 const DebugLoc &DL, unsigned O) {
7045 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7046 "Expected inlined-at fields to agree");
7047 return new (DbgInfo->getAlloc())
7048 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
7052 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
7055 const DebugLoc &DL, unsigned O) {
7056 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7057 "Expected inlined-at fields to agree");
7058 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
7062 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
7063 DIExpression *Expr, unsigned FI,
7066 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7067 "Expected inlined-at fields to agree");
7068 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, FI, DL, O);
7071 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
7072 unsigned OffsetInBits, unsigned SizeInBits,
7073 bool InvalidateDbg) {
7074 SDNode *FromNode = From.getNode();
7075 SDNode *ToNode = To.getNode();
7076 assert(FromNode && ToNode && "Can't modify dbg values");
7079 // TODO: assert(From != To && "Redundant dbg value transfer");
7080 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
7081 if (From == To || FromNode == ToNode)
7084 if (!FromNode->getHasDebugValue())
7087 SmallVector<SDDbgValue *, 2> ClonedDVs;
7088 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
7089 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
7092 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
7094 // Just transfer the dbg value attached to From.
7095 if (Dbg->getResNo() != From.getResNo())
7098 DIVariable *Var = Dbg->getVariable();
7099 auto *Expr = Dbg->getExpression();
7100 // If a fragment is requested, update the expression.
7102 // When splitting a larger (e.g., sign-extended) value whose
7103 // lower bits are described with an SDDbgValue, do not attempt
7104 // to transfer the SDDbgValue to the upper bits.
7105 if (auto FI = Expr->getFragmentInfo())
7106 if (OffsetInBits + SizeInBits > FI->SizeInBits)
7108 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
7114 // Clone the SDDbgValue and move it to To.
7116 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(),
7117 Dbg->getDebugLoc(), Dbg->getOrder());
7118 ClonedDVs.push_back(Clone);
7121 Dbg->setIsInvalidated();
7124 for (SDDbgValue *Dbg : ClonedDVs)
7125 AddDbgValue(Dbg, ToNode, false);
7128 void SelectionDAG::salvageDebugInfo(SDNode &N) {
7129 if (!N.getHasDebugValue())
7132 SmallVector<SDDbgValue *, 2> ClonedDVs;
7133 for (auto DV : GetDbgValues(&N)) {
7134 if (DV->isInvalidated())
7136 switch (N.getOpcode()) {
7140 SDValue N0 = N.getOperand(0);
7141 SDValue N1 = N.getOperand(1);
7142 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
7143 isConstantIntBuildVectorOrConstantInt(N1)) {
7144 uint64_t Offset = N.getConstantOperandVal(1);
7145 // Rewrite an ADD constant node into a DIExpression. Since we are
7146 // performing arithmetic to compute the variable's *value* in the
7147 // DIExpression, we need to mark the expression with a
7148 // DW_OP_stack_value.
7149 auto *DIExpr = DV->getExpression();
7150 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
7151 DIExpression::NoDeref,
7152 DIExpression::WithStackValue);
7154 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
7155 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
7156 ClonedDVs.push_back(Clone);
7157 DV->setIsInvalidated();
7158 DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this);
7159 dbgs() << " into " << *DIExpr << '\n');
7164 for (SDDbgValue *Dbg : ClonedDVs)
7165 AddDbgValue(Dbg, Dbg->getSDNode(), false);
7170 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
7171 /// pointed to by a use iterator is deleted, increment the use iterator
7172 /// so that it doesn't dangle.
7174 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
7175 SDNode::use_iterator &UI;
7176 SDNode::use_iterator &UE;
7178 void NodeDeleted(SDNode *N, SDNode *E) override {
7179 // Increment the iterator as needed.
7180 while (UI != UE && N == *UI)
7185 RAUWUpdateListener(SelectionDAG &d,
7186 SDNode::use_iterator &ui,
7187 SDNode::use_iterator &ue)
7188 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
7191 } // end anonymous namespace
7193 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
7194 /// This can cause recursive merging of nodes in the DAG.
7196 /// This version assumes From has a single result value.
7198 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
7199 SDNode *From = FromN.getNode();
7200 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
7201 "Cannot replace with this method!");
7202 assert(From != To.getNode() && "Cannot replace uses of with self");
7204 // Preserve Debug Values
7205 transferDbgValues(FromN, To);
7207 // Iterate over all the existing uses of From. New uses will be added
7208 // to the beginning of the use list, which we avoid visiting.
7209 // This specifically avoids visiting uses of From that arise while the
7210 // replacement is happening, because any such uses would be the result
7211 // of CSE: If an existing node looks like From after one of its operands
7212 // is replaced by To, we don't want to replace of all its users with To
7213 // too. See PR3018 for more info.
7214 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
7215 RAUWUpdateListener Listener(*this, UI, UE);
7219 // This node is about to morph, remove its old self from the CSE maps.
7220 RemoveNodeFromCSEMaps(User);
7222 // A user can appear in a use list multiple times, and when this
7223 // happens the uses are usually next to each other in the list.
7224 // To help reduce the number of CSE recomputations, process all
7225 // the uses of this user that we can find this way.
7227 SDUse &Use = UI.getUse();
7230 } while (UI != UE && *UI == User);
7232 // Now that we have modified User, add it back to the CSE maps. If it
7233 // already exists there, recursively merge the results together.
7234 AddModifiedNodeToCSEMaps(User);
7237 // If we just RAUW'd the root, take note.
7238 if (FromN == getRoot())
7242 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
7243 /// This can cause recursive merging of nodes in the DAG.
7245 /// This version assumes that for each value of From, there is a
7246 /// corresponding value in To in the same position with the same type.
7248 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
7250 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
7251 assert((!From->hasAnyUseOfValue(i) ||
7252 From->getValueType(i) == To->getValueType(i)) &&
7253 "Cannot use this version of ReplaceAllUsesWith!");
7256 // Handle the trivial case.
7260 // Preserve Debug Info. Only do this if there's a use.
7261 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
7262 if (From->hasAnyUseOfValue(i)) {
7263 assert((i < To->getNumValues()) && "Invalid To location");
7264 transferDbgValues(SDValue(From, i), SDValue(To, i));
7267 // Iterate over just the existing users of From. See the comments in
7268 // the ReplaceAllUsesWith above.
7269 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
7270 RAUWUpdateListener Listener(*this, UI, UE);
7274 // This node is about to morph, remove its old self from the CSE maps.
7275 RemoveNodeFromCSEMaps(User);
7277 // A user can appear in a use list multiple times, and when this
7278 // happens the uses are usually next to each other in the list.
7279 // To help reduce the number of CSE recomputations, process all
7280 // the uses of this user that we can find this way.
7282 SDUse &Use = UI.getUse();
7285 } while (UI != UE && *UI == User);
7287 // Now that we have modified User, add it back to the CSE maps. If it
7288 // already exists there, recursively merge the results together.
7289 AddModifiedNodeToCSEMaps(User);
7292 // If we just RAUW'd the root, take note.
7293 if (From == getRoot().getNode())
7294 setRoot(SDValue(To, getRoot().getResNo()));
7297 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
7298 /// This can cause recursive merging of nodes in the DAG.
7300 /// This version can replace From with any result values. To must match the
7301 /// number and types of values returned by From.
7302 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
7303 if (From->getNumValues() == 1) // Handle the simple case efficiently.
7304 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
7306 // Preserve Debug Info.
7307 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
7308 transferDbgValues(SDValue(From, i), *To);
7310 // Iterate over just the existing users of From. See the comments in
7311 // the ReplaceAllUsesWith above.
7312 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
7313 RAUWUpdateListener Listener(*this, UI, UE);
7317 // This node is about to morph, remove its old self from the CSE maps.
7318 RemoveNodeFromCSEMaps(User);
7320 // A user can appear in a use list multiple times, and when this
7321 // happens the uses are usually next to each other in the list.
7322 // To help reduce the number of CSE recomputations, process all
7323 // the uses of this user that we can find this way.
7325 SDUse &Use = UI.getUse();
7326 const SDValue &ToOp = To[Use.getResNo()];
7329 } while (UI != UE && *UI == User);
7331 // Now that we have modified User, add it back to the CSE maps. If it
7332 // already exists there, recursively merge the results together.
7333 AddModifiedNodeToCSEMaps(User);
7336 // If we just RAUW'd the root, take note.
7337 if (From == getRoot().getNode())
7338 setRoot(SDValue(To[getRoot().getResNo()]));
7341 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
7342 /// uses of other values produced by From.getNode() alone. The Deleted
7343 /// vector is handled the same way as for ReplaceAllUsesWith.
7344 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
7345 // Handle the really simple, really trivial case efficiently.
7346 if (From == To) return;
7348 // Handle the simple, trivial, case efficiently.
7349 if (From.getNode()->getNumValues() == 1) {
7350 ReplaceAllUsesWith(From, To);
7354 // Preserve Debug Info.
7355 transferDbgValues(From, To);
7357 // Iterate over just the existing users of From. See the comments in
7358 // the ReplaceAllUsesWith above.
7359 SDNode::use_iterator UI = From.getNode()->use_begin(),
7360 UE = From.getNode()->use_end();
7361 RAUWUpdateListener Listener(*this, UI, UE);
7364 bool UserRemovedFromCSEMaps = false;
7366 // A user can appear in a use list multiple times, and when this
7367 // happens the uses are usually next to each other in the list.
7368 // To help reduce the number of CSE recomputations, process all
7369 // the uses of this user that we can find this way.
7371 SDUse &Use = UI.getUse();
7373 // Skip uses of different values from the same node.
7374 if (Use.getResNo() != From.getResNo()) {
7379 // If this node hasn't been modified yet, it's still in the CSE maps,
7380 // so remove its old self from the CSE maps.
7381 if (!UserRemovedFromCSEMaps) {
7382 RemoveNodeFromCSEMaps(User);
7383 UserRemovedFromCSEMaps = true;
7388 } while (UI != UE && *UI == User);
7390 // We are iterating over all uses of the From node, so if a use
7391 // doesn't use the specific value, no changes are made.
7392 if (!UserRemovedFromCSEMaps)
7395 // Now that we have modified User, add it back to the CSE maps. If it
7396 // already exists there, recursively merge the results together.
7397 AddModifiedNodeToCSEMaps(User);
7400 // If we just RAUW'd the root, take note.
7401 if (From == getRoot())
7407 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
7408 /// to record information about a use.
7415 /// operator< - Sort Memos by User.
7416 bool operator<(const UseMemo &L, const UseMemo &R) {
7417 return (intptr_t)L.User < (intptr_t)R.User;
7420 } // end anonymous namespace
7422 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
7423 /// uses of other values produced by From.getNode() alone. The same value
7424 /// may appear in both the From and To list. The Deleted vector is
7425 /// handled the same way as for ReplaceAllUsesWith.
7426 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
7429 // Handle the simple, trivial case efficiently.
7431 return ReplaceAllUsesOfValueWith(*From, *To);
7433 transferDbgValues(*From, *To);
7435 // Read up all the uses and make records of them. This helps
7436 // processing new uses that are introduced during the
7437 // replacement process.
7438 SmallVector<UseMemo, 4> Uses;
7439 for (unsigned i = 0; i != Num; ++i) {
7440 unsigned FromResNo = From[i].getResNo();
7441 SDNode *FromNode = From[i].getNode();
7442 for (SDNode::use_iterator UI = FromNode->use_begin(),
7443 E = FromNode->use_end(); UI != E; ++UI) {
7444 SDUse &Use = UI.getUse();
7445 if (Use.getResNo() == FromResNo) {
7446 UseMemo Memo = { *UI, i, &Use };
7447 Uses.push_back(Memo);
7452 // Sort the uses, so that all the uses from a given User are together.
7453 std::sort(Uses.begin(), Uses.end());
7455 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
7456 UseIndex != UseIndexEnd; ) {
7457 // We know that this user uses some value of From. If it is the right
7458 // value, update it.
7459 SDNode *User = Uses[UseIndex].User;
7461 // This node is about to morph, remove its old self from the CSE maps.
7462 RemoveNodeFromCSEMaps(User);
7464 // The Uses array is sorted, so all the uses for a given User
7465 // are next to each other in the list.
7466 // To help reduce the number of CSE recomputations, process all
7467 // the uses of this user that we can find this way.
7469 unsigned i = Uses[UseIndex].Index;
7470 SDUse &Use = *Uses[UseIndex].Use;
7474 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
7476 // Now that we have modified User, add it back to the CSE maps. If it
7477 // already exists there, recursively merge the results together.
7478 AddModifiedNodeToCSEMaps(User);
7482 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
7483 /// based on their topological order. It returns the maximum id and a vector
7484 /// of the SDNodes* in assigned order by reference.
7485 unsigned SelectionDAG::AssignTopologicalOrder() {
7486 unsigned DAGSize = 0;
7488 // SortedPos tracks the progress of the algorithm. Nodes before it are
7489 // sorted, nodes after it are unsorted. When the algorithm completes
7490 // it is at the end of the list.
7491 allnodes_iterator SortedPos = allnodes_begin();
7493 // Visit all the nodes. Move nodes with no operands to the front of
7494 // the list immediately. Annotate nodes that do have operands with their
7495 // operand count. Before we do this, the Node Id fields of the nodes
7496 // may contain arbitrary values. After, the Node Id fields for nodes
7497 // before SortedPos will contain the topological sort index, and the
7498 // Node Id fields for nodes At SortedPos and after will contain the
7499 // count of outstanding operands.
7500 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
7502 checkForCycles(N, this);
7503 unsigned Degree = N->getNumOperands();
7505 // A node with no uses, add it to the result array immediately.
7506 N->setNodeId(DAGSize++);
7507 allnodes_iterator Q(N);
7509 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
7510 assert(SortedPos != AllNodes.end() && "Overran node list");
7513 // Temporarily use the Node Id as scratch space for the degree count.
7514 N->setNodeId(Degree);
7518 // Visit all the nodes. As we iterate, move nodes into sorted order,
7519 // such that by the time the end is reached all nodes will be sorted.
7520 for (SDNode &Node : allnodes()) {
7522 checkForCycles(N, this);
7523 // N is in sorted position, so all its uses have one less operand
7524 // that needs to be sorted.
7525 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
7528 unsigned Degree = P->getNodeId();
7529 assert(Degree != 0 && "Invalid node degree");
7532 // All of P's operands are sorted, so P may sorted now.
7533 P->setNodeId(DAGSize++);
7534 if (P->getIterator() != SortedPos)
7535 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
7536 assert(SortedPos != AllNodes.end() && "Overran node list");
7539 // Update P's outstanding operand count.
7540 P->setNodeId(Degree);
7543 if (Node.getIterator() == SortedPos) {
7545 allnodes_iterator I(N);
7547 dbgs() << "Overran sorted position:\n";
7548 S->dumprFull(this); dbgs() << "\n";
7549 dbgs() << "Checking if this is due to cycles\n";
7550 checkForCycles(this, true);
7552 llvm_unreachable(nullptr);
7556 assert(SortedPos == AllNodes.end() &&
7557 "Topological sort incomplete!");
7558 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
7559 "First node in topological sort is not the entry token!");
7560 assert(AllNodes.front().getNodeId() == 0 &&
7561 "First node in topological sort has non-zero id!");
7562 assert(AllNodes.front().getNumOperands() == 0 &&
7563 "First node in topological sort has operands!");
7564 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
7565 "Last node in topologic sort has unexpected id!");
7566 assert(AllNodes.back().use_empty() &&
7567 "Last node in topologic sort has users!");
7568 assert(DAGSize == allnodes_size() && "Node count mismatch!");
7572 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
7573 /// value is produced by SD.
7574 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
7576 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
7577 SD->setHasDebugValue(true);
7579 DbgInfo->add(DB, SD, isParameter);
7582 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
7584 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
7585 // The new memory operation must have the same position as the old load in
7586 // terms of memory dependency. Create a TokenFactor for the old load and new
7587 // memory operation and update uses of the old load's output chain to use that
7589 SDValue OldChain = SDValue(OldLoad, 1);
7590 SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
7591 if (!OldLoad->hasAnyUseOfValue(1))
7594 SDValue TokenFactor =
7595 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
7596 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
7597 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
7601 //===----------------------------------------------------------------------===//
7603 //===----------------------------------------------------------------------===//
7605 bool llvm::isNullConstant(SDValue V) {
7606 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
7607 return Const != nullptr && Const->isNullValue();
7610 bool llvm::isNullFPConstant(SDValue V) {
7611 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
7612 return Const != nullptr && Const->isZero() && !Const->isNegative();
7615 bool llvm::isAllOnesConstant(SDValue V) {
7616 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
7617 return Const != nullptr && Const->isAllOnesValue();
7620 bool llvm::isOneConstant(SDValue V) {
7621 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
7622 return Const != nullptr && Const->isOne();
7625 bool llvm::isBitwiseNot(SDValue V) {
7626 return V.getOpcode() == ISD::XOR && isAllOnesConstant(V.getOperand(1));
7629 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) {
7630 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
7633 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
7634 BitVector UndefElements;
7635 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
7637 // BuildVectors can truncate their operands. Ignore that case here.
7638 // FIXME: We blindly ignore splats which include undef which is overly
7640 if (CN && UndefElements.none() &&
7641 CN->getValueType(0) == N.getValueType().getScalarType())
7648 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) {
7649 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
7652 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
7653 BitVector UndefElements;
7654 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
7656 if (CN && UndefElements.none())
7663 HandleSDNode::~HandleSDNode() {
7667 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
7669 const GlobalValue *GA, EVT VT,
7670 int64_t o, unsigned char TF)
7671 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
7675 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
7676 EVT VT, unsigned SrcAS,
7678 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
7679 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
7681 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
7682 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
7683 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
7684 MemSDNodeBits.IsVolatile = MMO->isVolatile();
7685 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
7686 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
7687 MemSDNodeBits.IsInvariant = MMO->isInvariant();
7689 // We check here that the size of the memory operand fits within the size of
7690 // the MMO. This is because the MMO might indicate only a possible address
7691 // range instead of specifying the affected memory addresses precisely.
7692 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
7695 /// Profile - Gather unique data for the node.
7697 void SDNode::Profile(FoldingSetNodeID &ID) const {
7698 AddNodeIDNode(ID, this);
7704 std::vector<EVT> VTs;
7707 VTs.reserve(MVT::LAST_VALUETYPE);
7708 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
7709 VTs.push_back(MVT((MVT::SimpleValueType)i));
7713 } // end anonymous namespace
7715 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
7716 static ManagedStatic<EVTArray> SimpleVTArray;
7717 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
7719 /// getValueTypeList - Return a pointer to the specified value type.
7721 const EVT *SDNode::getValueTypeList(EVT VT) {
7722 if (VT.isExtended()) {
7723 sys::SmartScopedLock<true> Lock(*VTMutex);
7724 return &(*EVTs->insert(VT).first);
7726 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
7727 "Value type out of range!");
7728 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
7732 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
7733 /// indicated value. This method ignores uses of other values defined by this
7735 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
7736 assert(Value < getNumValues() && "Bad value!");
7738 // TODO: Only iterate over uses of a given value of the node
7739 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
7740 if (UI.getUse().getResNo() == Value) {
7747 // Found exactly the right number of uses?
7751 /// hasAnyUseOfValue - Return true if there are any use of the indicated
7752 /// value. This method ignores uses of other values defined by this operation.
7753 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
7754 assert(Value < getNumValues() && "Bad value!");
7756 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
7757 if (UI.getUse().getResNo() == Value)
7763 /// isOnlyUserOf - Return true if this node is the only use of N.
7764 bool SDNode::isOnlyUserOf(const SDNode *N) const {
7766 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
7777 /// Return true if the only users of N are contained in Nodes.
7778 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
7780 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
7782 if (llvm::any_of(Nodes,
7783 [&User](const SDNode *Node) { return User == Node; }))
7792 /// isOperand - Return true if this node is an operand of N.
7793 bool SDValue::isOperandOf(const SDNode *N) const {
7794 for (const SDValue &Op : N->op_values())
7800 bool SDNode::isOperandOf(const SDNode *N) const {
7801 for (const SDValue &Op : N->op_values())
7802 if (this == Op.getNode())
7807 /// reachesChainWithoutSideEffects - Return true if this operand (which must
7808 /// be a chain) reaches the specified operand without crossing any
7809 /// side-effecting instructions on any chain path. In practice, this looks
7810 /// through token factors and non-volatile loads. In order to remain efficient,
7811 /// this only looks a couple of nodes in, it does not do an exhaustive search.
7813 /// Note that we only need to examine chains when we're searching for
7814 /// side-effects; SelectionDAG requires that all side-effects are represented
7815 /// by chains, even if another operand would force a specific ordering. This
7816 /// constraint is necessary to allow transformations like splitting loads.
7817 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
7818 unsigned Depth) const {
7819 if (*this == Dest) return true;
7821 // Don't search too deeply, we just want to be able to see through
7822 // TokenFactor's etc.
7823 if (Depth == 0) return false;
7825 // If this is a token factor, all inputs to the TF happen in parallel.
7826 if (getOpcode() == ISD::TokenFactor) {
7827 // First, try a shallow search.
7828 if (is_contained((*this)->ops(), Dest)) {
7829 // We found the chain we want as an operand of this TokenFactor.
7830 // Essentially, we reach the chain without side-effects if we could
7831 // serialize the TokenFactor into a simple chain of operations with
7832 // Dest as the last operation. This is automatically true if the
7833 // chain has one use: there are no other ordering constraints.
7834 // If the chain has more than one use, we give up: some other
7835 // use of Dest might force a side-effect between Dest and the current
7837 if (Dest.hasOneUse())
7840 // Next, try a deep search: check whether every operand of the TokenFactor
7842 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
7843 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
7847 // Loads don't have side effects, look through them.
7848 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
7849 if (!Ld->isVolatile())
7850 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
7855 bool SDNode::hasPredecessor(const SDNode *N) const {
7856 SmallPtrSet<const SDNode *, 32> Visited;
7857 SmallVector<const SDNode *, 16> Worklist;
7858 Worklist.push_back(this);
7859 return hasPredecessorHelper(N, Visited, Worklist);
7862 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
7863 this->Flags.intersectWith(Flags);
7866 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
7867 assert(N->getNumValues() == 1 &&
7868 "Can't unroll a vector with multiple results!");
7870 EVT VT = N->getValueType(0);
7871 unsigned NE = VT.getVectorNumElements();
7872 EVT EltVT = VT.getVectorElementType();
7875 SmallVector<SDValue, 8> Scalars;
7876 SmallVector<SDValue, 4> Operands(N->getNumOperands());
7878 // If ResNE is 0, fully unroll the vector op.
7881 else if (NE > ResNE)
7885 for (i= 0; i != NE; ++i) {
7886 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
7887 SDValue Operand = N->getOperand(j);
7888 EVT OperandVT = Operand.getValueType();
7889 if (OperandVT.isVector()) {
7890 // A vector operand; extract a single element.
7891 EVT OperandEltVT = OperandVT.getVectorElementType();
7893 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
7894 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
7896 // A scalar operand; just use it as is.
7897 Operands[j] = Operand;
7901 switch (N->getOpcode()) {
7903 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
7908 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
7915 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
7916 getShiftAmountOperand(Operands[0].getValueType(),
7919 case ISD::SIGN_EXTEND_INREG:
7920 case ISD::FP_ROUND_INREG: {
7921 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
7922 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
7924 getValueType(ExtVT)));
7929 for (; i < ResNE; ++i)
7930 Scalars.push_back(getUNDEF(EltVT));
7932 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
7933 return getBuildVector(VecVT, dl, Scalars);
7936 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
7940 if (LD->isVolatile() || Base->isVolatile())
7942 if (LD->isIndexed() || Base->isIndexed())
7944 if (LD->getChain() != Base->getChain())
7946 EVT VT = LD->getValueType(0);
7947 if (VT.getSizeInBits() / 8 != Bytes)
7950 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
7951 auto LocDecomp = BaseIndexOffset::match(LD, *this);
7954 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
7955 return (Dist * Bytes == Offset);
7959 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
7960 /// it cannot be inferred.
7961 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
7962 // If this is a GlobalAddress + cst, return the alignment.
7963 const GlobalValue *GV;
7964 int64_t GVOffset = 0;
7965 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
7966 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
7967 KnownBits Known(PtrWidth);
7968 llvm::computeKnownBits(GV, Known, getDataLayout());
7969 unsigned AlignBits = Known.countMinTrailingZeros();
7970 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
7972 return MinAlign(Align, GVOffset);
7975 // If this is a direct reference to a stack slot, use information about the
7976 // stack slot's alignment.
7977 int FrameIdx = 1 << 31;
7978 int64_t FrameOffset = 0;
7979 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
7980 FrameIdx = FI->getIndex();
7981 } else if (isBaseWithConstantOffset(Ptr) &&
7982 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7984 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7985 FrameOffset = Ptr.getConstantOperandVal(1);
7988 if (FrameIdx != (1 << 31)) {
7989 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
7990 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
7998 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
7999 /// which is split (or expanded) into two not necessarily identical pieces.
8000 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
8001 // Currently all types are split in half.
8004 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
8006 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
8008 return std::make_pair(LoVT, HiVT);
8011 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
8013 std::pair<SDValue, SDValue>
8014 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
8016 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
8017 N.getValueType().getVectorNumElements() &&
8018 "More vector elements requested than available!");
8020 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
8021 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
8022 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
8023 getConstant(LoVT.getVectorNumElements(), DL,
8024 TLI->getVectorIdxTy(getDataLayout())));
8025 return std::make_pair(Lo, Hi);
8028 void SelectionDAG::ExtractVectorElements(SDValue Op,
8029 SmallVectorImpl<SDValue> &Args,
8030 unsigned Start, unsigned Count) {
8031 EVT VT = Op.getValueType();
8033 Count = VT.getVectorNumElements();
8035 EVT EltVT = VT.getVectorElementType();
8036 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
8038 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
8039 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
8040 Op, getConstant(i, SL, IdxTy)));
8044 // getAddressSpace - Return the address space this GlobalAddress belongs to.
8045 unsigned GlobalAddressSDNode::getAddressSpace() const {
8046 return getGlobal()->getType()->getAddressSpace();
8049 Type *ConstantPoolSDNode::getType() const {
8050 if (isMachineConstantPoolEntry())
8051 return Val.MachineCPVal->getType();
8052 return Val.ConstVal->getType();
8055 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
8056 unsigned &SplatBitSize,
8058 unsigned MinSplatBits,
8059 bool IsBigEndian) const {
8060 EVT VT = getValueType(0);
8061 assert(VT.isVector() && "Expected a vector type");
8062 unsigned VecWidth = VT.getSizeInBits();
8063 if (MinSplatBits > VecWidth)
8066 // FIXME: The widths are based on this node's type, but build vectors can
8067 // truncate their operands.
8068 SplatValue = APInt(VecWidth, 0);
8069 SplatUndef = APInt(VecWidth, 0);
8071 // Get the bits. Bits with undefined values (when the corresponding element
8072 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
8073 // in SplatValue. If any of the values are not constant, give up and return
8075 unsigned int NumOps = getNumOperands();
8076 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
8077 unsigned EltWidth = VT.getScalarSizeInBits();
8079 for (unsigned j = 0; j < NumOps; ++j) {
8080 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
8081 SDValue OpVal = getOperand(i);
8082 unsigned BitPos = j * EltWidth;
8084 if (OpVal.isUndef())
8085 SplatUndef.setBits(BitPos, BitPos + EltWidth);
8086 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
8087 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
8088 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
8089 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
8094 // The build_vector is all constants or undefs. Find the smallest element
8095 // size that splats the vector.
8096 HasAnyUndefs = (SplatUndef != 0);
8098 // FIXME: This does not work for vectors with elements less than 8 bits.
8099 while (VecWidth > 8) {
8100 unsigned HalfSize = VecWidth / 2;
8101 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
8102 APInt LowValue = SplatValue.trunc(HalfSize);
8103 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
8104 APInt LowUndef = SplatUndef.trunc(HalfSize);
8106 // If the two halves do not match (ignoring undef bits), stop here.
8107 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
8108 MinSplatBits > HalfSize)
8111 SplatValue = HighValue | LowValue;
8112 SplatUndef = HighUndef & LowUndef;
8114 VecWidth = HalfSize;
8117 SplatBitSize = VecWidth;
8121 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
8122 if (UndefElements) {
8123 UndefElements->clear();
8124 UndefElements->resize(getNumOperands());
8127 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
8128 SDValue Op = getOperand(i);
8131 (*UndefElements)[i] = true;
8132 } else if (!Splatted) {
8134 } else if (Splatted != Op) {
8140 assert(getOperand(0).isUndef() &&
8141 "Can only have a splat without a constant for all undefs.");
8142 return getOperand(0);
8149 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
8150 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
8154 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
8155 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
8159 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
8160 uint32_t BitWidth) const {
8161 if (ConstantFPSDNode *CN =
8162 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
8164 APSInt IntVal(BitWidth);
8165 const APFloat &APF = CN->getValueAPF();
8166 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
8171 return IntVal.exactLogBase2();
8176 bool BuildVectorSDNode::isConstant() const {
8177 for (const SDValue &Op : op_values()) {
8178 unsigned Opc = Op.getOpcode();
8179 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
8185 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
8186 // Find the first non-undef value in the shuffle mask.
8188 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
8191 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
8193 // Make sure all remaining elements are either undef or the same as the first
8195 for (int Idx = Mask[i]; i != e; ++i)
8196 if (Mask[i] >= 0 && Mask[i] != Idx)
8201 // \brief Returns the SDNode if it is a constant integer BuildVector
8202 // or constant integer.
8203 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
8204 if (isa<ConstantSDNode>(N))
8206 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
8208 // Treat a GlobalAddress supporting constant offset folding as a
8209 // constant integer.
8210 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
8211 if (GA->getOpcode() == ISD::GlobalAddress &&
8212 TLI->isOffsetFoldingLegal(GA))
8217 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
8218 if (isa<ConstantFPSDNode>(N))
8221 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
8228 static void checkForCyclesHelper(const SDNode *N,
8229 SmallPtrSetImpl<const SDNode*> &Visited,
8230 SmallPtrSetImpl<const SDNode*> &Checked,
8231 const llvm::SelectionDAG *DAG) {
8232 // If this node has already been checked, don't check it again.
8233 if (Checked.count(N))
8236 // If a node has already been visited on this depth-first walk, reject it as
8238 if (!Visited.insert(N).second) {
8239 errs() << "Detected cycle in SelectionDAG\n";
8240 dbgs() << "Offending node:\n";
8241 N->dumprFull(DAG); dbgs() << "\n";
8245 for (const SDValue &Op : N->op_values())
8246 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
8253 void llvm::checkForCycles(const llvm::SDNode *N,
8254 const llvm::SelectionDAG *DAG,
8258 #ifdef EXPENSIVE_CHECKS
8260 #endif // EXPENSIVE_CHECKS
8262 assert(N && "Checking nonexistent SDNode");
8263 SmallPtrSet<const SDNode*, 32> visited;
8264 SmallPtrSet<const SDNode*, 32> checked;
8265 checkForCyclesHelper(N, visited, checked, DAG);
8270 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
8271 checkForCycles(DAG->getRoot().getNode(), DAG, force);