1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineModuleInfo.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetSubtargetInfo.h"
30 #include "llvm/Transforms/Utils/GlobalStatus.h"
34 /// Compute the linearized index of a member in a nested aggregate/struct/array
35 /// by recursing and accumulating CurIndex as long as there are indices in the
37 unsigned llvm::ComputeLinearIndex(Type *Ty,
38 const unsigned *Indices,
39 const unsigned *IndicesEnd,
41 // Base case: We're done.
42 if (Indices && Indices == IndicesEnd)
45 // Given a struct type, recursively traverse the elements.
46 if (StructType *STy = dyn_cast<StructType>(Ty)) {
47 for (StructType::element_iterator EB = STy->element_begin(),
49 EE = STy->element_end();
51 if (Indices && *Indices == unsigned(EI - EB))
52 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
53 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
55 assert(!Indices && "Unexpected out of bound");
58 // Given an array type, recursively traverse the elements.
59 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
60 Type *EltTy = ATy->getElementType();
61 unsigned NumElts = ATy->getNumElements();
62 // Compute the Linear offset when jumping one element of the array
63 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
65 assert(*Indices < NumElts && "Unexpected out of bound");
66 // If the indice is inside the array, compute the index to the requested
67 // elt and recurse inside the element with the end of the indices list
68 CurIndex += EltLinearOffset* *Indices;
69 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
71 CurIndex += EltLinearOffset*NumElts;
74 // We haven't found the type we're looking for, so keep searching.
78 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
79 /// EVTs that represent all the individual underlying
80 /// non-aggregate types that comprise it.
82 /// If Offsets is non-null, it points to a vector to be filled in
83 /// with the in-memory offsets of each of the individual values.
85 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
86 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
87 SmallVectorImpl<uint64_t> *Offsets,
88 uint64_t StartingOffset) {
89 // Given a struct type, recursively traverse the elements.
90 if (StructType *STy = dyn_cast<StructType>(Ty)) {
91 const StructLayout *SL = DL.getStructLayout(STy);
92 for (StructType::element_iterator EB = STy->element_begin(),
94 EE = STy->element_end();
96 ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
97 StartingOffset + SL->getElementOffset(EI - EB));
100 // Given an array type, recursively traverse the elements.
101 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
102 Type *EltTy = ATy->getElementType();
103 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
104 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
105 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
106 StartingOffset + i * EltSize);
109 // Interpret void as zero return values.
112 // Base case: we can get an EVT for this LLVM IR type.
113 ValueVTs.push_back(TLI.getValueType(DL, Ty));
115 Offsets->push_back(StartingOffset);
118 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
119 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
120 V = V->stripPointerCasts();
121 GlobalValue *GV = dyn_cast<GlobalValue>(V);
122 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
124 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
125 assert(Var->hasInitializer() &&
126 "The EH catch-all value must have an initializer");
127 Value *Init = Var->getInitializer();
128 GV = dyn_cast<GlobalValue>(Init);
129 if (!GV) V = cast<ConstantPointerNull>(Init);
132 assert((GV || isa<ConstantPointerNull>(V)) &&
133 "TypeInfo must be a global variable or NULL");
137 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
138 /// processed uses a memory 'm' constraint.
140 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
141 const TargetLowering &TLI) {
142 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
143 InlineAsm::ConstraintInfo &CI = CInfos[i];
144 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
145 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
146 if (CType == TargetLowering::C_Memory)
150 // Indirect operand accesses access memory.
158 /// getFCmpCondCode - Return the ISD condition code corresponding to
159 /// the given LLVM IR floating-point condition code. This includes
160 /// consideration of global floating-point math flags.
162 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
164 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
165 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
166 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
167 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
168 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
169 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
170 case FCmpInst::FCMP_ONE: return ISD::SETONE;
171 case FCmpInst::FCMP_ORD: return ISD::SETO;
172 case FCmpInst::FCMP_UNO: return ISD::SETUO;
173 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
174 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
175 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
176 case FCmpInst::FCMP_ULT: return ISD::SETULT;
177 case FCmpInst::FCMP_ULE: return ISD::SETULE;
178 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
179 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
180 default: llvm_unreachable("Invalid FCmp predicate opcode!");
184 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
186 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
187 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
188 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
189 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
190 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
191 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
196 /// getICmpCondCode - Return the ISD condition code corresponding to
197 /// the given LLVM IR integer condition code.
199 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
201 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
202 case ICmpInst::ICMP_NE: return ISD::SETNE;
203 case ICmpInst::ICMP_SLE: return ISD::SETLE;
204 case ICmpInst::ICMP_ULE: return ISD::SETULE;
205 case ICmpInst::ICMP_SGE: return ISD::SETGE;
206 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
207 case ICmpInst::ICMP_SLT: return ISD::SETLT;
208 case ICmpInst::ICMP_ULT: return ISD::SETULT;
209 case ICmpInst::ICMP_SGT: return ISD::SETGT;
210 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
212 llvm_unreachable("Invalid ICmp predicate opcode!");
216 static bool isNoopBitcast(Type *T1, Type *T2,
217 const TargetLoweringBase& TLI) {
218 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
219 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
220 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
223 /// Look through operations that will be free to find the earliest source of
226 /// @param ValLoc If V has aggegate type, we will be interested in a particular
227 /// scalar component. This records its address; the reverse of this list gives a
228 /// sequence of indices appropriate for an extractvalue to locate the important
229 /// value. This value is updated during the function and on exit will indicate
230 /// similar information for the Value returned.
232 /// @param DataBits If this function looks through truncate instructions, this
233 /// will record the smallest size attained.
234 static const Value *getNoopInput(const Value *V,
235 SmallVectorImpl<unsigned> &ValLoc,
237 const TargetLoweringBase &TLI,
238 const DataLayout &DL) {
240 // Try to look through V1; if V1 is not an instruction, it can't be looked
242 const Instruction *I = dyn_cast<Instruction>(V);
243 if (!I || I->getNumOperands() == 0) return V;
244 const Value *NoopInput = nullptr;
246 Value *Op = I->getOperand(0);
247 if (isa<BitCastInst>(I)) {
248 // Look through truly no-op bitcasts.
249 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
251 } else if (isa<GetElementPtrInst>(I)) {
252 // Look through getelementptr
253 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
255 } else if (isa<IntToPtrInst>(I)) {
256 // Look through inttoptr.
257 // Make sure this isn't a truncating or extending cast. We could
258 // support this eventually, but don't bother for now.
259 if (!isa<VectorType>(I->getType()) &&
260 DL.getPointerSizeInBits() ==
261 cast<IntegerType>(Op->getType())->getBitWidth())
263 } else if (isa<PtrToIntInst>(I)) {
264 // Look through ptrtoint.
265 // Make sure this isn't a truncating or extending cast. We could
266 // support this eventually, but don't bother for now.
267 if (!isa<VectorType>(I->getType()) &&
268 DL.getPointerSizeInBits() ==
269 cast<IntegerType>(I->getType())->getBitWidth())
271 } else if (isa<TruncInst>(I) &&
272 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
273 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
275 } else if (auto CS = ImmutableCallSite(I)) {
276 const Value *ReturnedOp = CS.getReturnedArgOperand();
277 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
278 NoopInput = ReturnedOp;
279 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
280 // Value may come from either the aggregate or the scalar
281 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
282 if (ValLoc.size() >= InsertLoc.size() &&
283 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
284 // The type being inserted is a nested sub-type of the aggregate; we
285 // have to remove those initial indices to get the location we're
286 // interested in for the operand.
287 ValLoc.resize(ValLoc.size() - InsertLoc.size());
288 NoopInput = IVI->getInsertedValueOperand();
290 // The struct we're inserting into has the value we're interested in, no
291 // change of address.
294 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
295 // The part we're interested in will inevitably be some sub-section of the
296 // previous aggregate. Combine the two paths to obtain the true address of
298 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
299 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
302 // Terminate if we couldn't find anything to look through.
310 /// Return true if this scalar return value only has bits discarded on its path
311 /// from the "tail call" to the "ret". This includes the obvious noop
312 /// instructions handled by getNoopInput above as well as free truncations (or
313 /// extensions prior to the call).
314 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
315 SmallVectorImpl<unsigned> &RetIndices,
316 SmallVectorImpl<unsigned> &CallIndices,
317 bool AllowDifferingSizes,
318 const TargetLoweringBase &TLI,
319 const DataLayout &DL) {
321 // Trace the sub-value needed by the return value as far back up the graph as
322 // possible, in the hope that it will intersect with the value produced by the
323 // call. In the simple case with no "returned" attribute, the hope is actually
324 // that we end up back at the tail call instruction itself.
325 unsigned BitsRequired = UINT_MAX;
326 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
328 // If this slot in the value returned is undef, it doesn't matter what the
329 // call puts there, it'll be fine.
330 if (isa<UndefValue>(RetVal))
333 // Now do a similar search up through the graph to find where the value
334 // actually returned by the "tail call" comes from. In the simple case without
335 // a "returned" attribute, the search will be blocked immediately and the loop
337 unsigned BitsProvided = UINT_MAX;
338 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
340 // There's no hope if we can't actually trace them to (the same part of!) the
342 if (CallVal != RetVal || CallIndices != RetIndices)
345 // However, intervening truncates may have made the call non-tail. Make sure
346 // all the bits that are needed by the "ret" have been provided by the "tail
347 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
349 if (BitsProvided < BitsRequired ||
350 (!AllowDifferingSizes && BitsProvided != BitsRequired))
356 /// For an aggregate type, determine whether a given index is within bounds or
358 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
359 if (ArrayType *AT = dyn_cast<ArrayType>(T))
360 return Idx < AT->getNumElements();
362 return Idx < cast<StructType>(T)->getNumElements();
365 /// Move the given iterators to the next leaf type in depth first traversal.
367 /// Performs a depth-first traversal of the type as specified by its arguments,
368 /// stopping at the next leaf node (which may be a legitimate scalar type or an
369 /// empty struct or array).
371 /// @param SubTypes List of the partial components making up the type from
372 /// outermost to innermost non-empty aggregate. The element currently
373 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
375 /// @param Path Set of extractvalue indices leading from the outermost type
376 /// (SubTypes[0]) to the leaf node currently represented.
378 /// @returns true if a new type was found, false otherwise. Calling this
379 /// function again on a finished iterator will repeatedly return
380 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
381 /// aggregate or a non-aggregate
382 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
383 SmallVectorImpl<unsigned> &Path) {
384 // First march back up the tree until we can successfully increment one of the
385 // coordinates in Path.
386 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
391 // If we reached the top, then the iterator is done.
395 // We know there's *some* valid leaf now, so march back down the tree picking
396 // out the left-most element at each node.
398 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
399 while (DeeperType->isAggregateType()) {
400 CompositeType *CT = cast<CompositeType>(DeeperType);
401 if (!indexReallyValid(CT, 0))
404 SubTypes.push_back(CT);
407 DeeperType = CT->getTypeAtIndex(0U);
413 /// Find the first non-empty, scalar-like type in Next and setup the iterator
416 /// Assuming Next is an aggregate of some kind, this function will traverse the
417 /// tree from left to right (i.e. depth-first) looking for the first
418 /// non-aggregate type which will play a role in function return.
420 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
421 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
422 /// i32 in that type.
423 static bool firstRealType(Type *Next,
424 SmallVectorImpl<CompositeType *> &SubTypes,
425 SmallVectorImpl<unsigned> &Path) {
426 // First initialise the iterator components to the first "leaf" node
427 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
428 // despite nominally being an aggregate).
429 while (Next->isAggregateType() &&
430 indexReallyValid(cast<CompositeType>(Next), 0)) {
431 SubTypes.push_back(cast<CompositeType>(Next));
433 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
436 // If there's no Path now, Next was originally scalar already (or empty
437 // leaf). We're done.
441 // Otherwise, use normal iteration to keep looking through the tree until we
442 // find a non-aggregate type.
443 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
444 if (!advanceToNextLeafType(SubTypes, Path))
451 /// Set the iterator data-structures to the next non-empty, non-aggregate
453 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
454 SmallVectorImpl<unsigned> &Path) {
456 if (!advanceToNextLeafType(SubTypes, Path))
459 assert(!Path.empty() && "found a leaf but didn't set the path?");
460 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
466 /// Test if the given instruction is in a position to be optimized
467 /// with a tail-call. This roughly means that it's in a block with
468 /// a return and there's nothing that needs to be scheduled
469 /// between it and the return.
471 /// This function only tests target-independent requirements.
472 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
473 const Instruction *I = CS.getInstruction();
474 const BasicBlock *ExitBB = I->getParent();
475 const TerminatorInst *Term = ExitBB->getTerminator();
476 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
478 // The block must end in a return statement or unreachable.
480 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
481 // an unreachable, for now. The way tailcall optimization is currently
482 // implemented means it will add an epilogue followed by a jump. That is
483 // not profitable. Also, if the callee is a special function (e.g.
484 // longjmp on x86), it can end up causing miscompilation that has not
485 // been fully understood.
487 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
490 // If I will have a chain, make sure no other instruction that will have a
491 // chain interposes between I and the return.
492 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
493 !isSafeToSpeculativelyExecute(I))
494 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
497 // Debug info intrinsics do not get in the way of tail call optimization.
498 if (isa<DbgInfoIntrinsic>(BBI))
500 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
501 !isSafeToSpeculativelyExecute(&*BBI))
505 const Function *F = ExitBB->getParent();
506 return returnTypeIsEligibleForTailCall(
507 F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
510 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
511 const ReturnInst *Ret,
512 const TargetLoweringBase &TLI,
513 bool *AllowDifferingSizes) {
514 // ADS may be null, so don't write to it directly.
516 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
519 AttrBuilder CallerAttrs(F->getAttributes(),
520 AttributeSet::ReturnIndex);
521 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
522 AttributeSet::ReturnIndex);
524 // Noalias is completely benign as far as calling convention goes, it
525 // shouldn't affect whether the call is a tail call.
526 CallerAttrs.removeAttribute(Attribute::NoAlias);
527 CalleeAttrs.removeAttribute(Attribute::NoAlias);
529 if (CallerAttrs.contains(Attribute::ZExt)) {
530 if (!CalleeAttrs.contains(Attribute::ZExt))
534 CallerAttrs.removeAttribute(Attribute::ZExt);
535 CalleeAttrs.removeAttribute(Attribute::ZExt);
536 } else if (CallerAttrs.contains(Attribute::SExt)) {
537 if (!CalleeAttrs.contains(Attribute::SExt))
541 CallerAttrs.removeAttribute(Attribute::SExt);
542 CalleeAttrs.removeAttribute(Attribute::SExt);
545 // If they're still different, there's some facet we don't understand
546 // (currently only "inreg", but in future who knows). It may be OK but the
547 // only safe option is to reject the tail call.
548 return CallerAttrs == CalleeAttrs;
551 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
552 const Instruction *I,
553 const ReturnInst *Ret,
554 const TargetLoweringBase &TLI) {
555 // If the block ends with a void return or unreachable, it doesn't matter
556 // what the call's return type is.
557 if (!Ret || Ret->getNumOperands() == 0) return true;
559 // If the return value is undef, it doesn't matter what the call's
561 if (isa<UndefValue>(Ret->getOperand(0))) return true;
563 // Make sure the attributes attached to each return are compatible.
564 bool AllowDifferingSizes;
565 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
568 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
569 SmallVector<unsigned, 4> RetPath, CallPath;
570 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
572 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
573 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
575 // Nothing's actually returned, it doesn't matter what the callee put there
576 // it's a valid tail call.
580 // Iterate pairwise through each of the value types making up the tail call
581 // and the corresponding return. For each one we want to know whether it's
582 // essentially going directly from the tail call to the ret, via operations
583 // that end up not generating any code.
585 // We allow a certain amount of covariance here. For example it's permitted
586 // for the tail call to define more bits than the ret actually cares about
587 // (e.g. via a truncate).
590 // We've exhausted the values produced by the tail call instruction, the
591 // rest are essentially undef. The type doesn't really matter, but we need
593 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
594 CallVal = UndefValue::get(SlotType);
597 // The manipulations performed when we're looking through an insertvalue or
598 // an extractvalue would happen at the front of the RetPath list, so since
599 // we have to copy it anyway it's more efficient to create a reversed copy.
600 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
601 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
603 // Finally, we can check whether the value produced by the tail call at this
604 // index is compatible with the value we return.
605 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
606 AllowDifferingSizes, TLI,
607 F->getParent()->getDataLayout()))
610 CallEmpty = !nextRealType(CallSubTypes, CallPath);
611 } while(nextRealType(RetSubTypes, RetPath));
616 bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) {
617 if (!GV->hasLinkOnceODRLinkage())
620 // We assume that anyone who sets global unnamed_addr on a non-constant knows
621 // what they're doing.
622 if (GV->hasGlobalUnnamedAddr())
625 // If it is a non constant variable, it needs to be uniqued across shared
627 if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
628 if (!Var->isConstant())
632 return GV->hasAtLeastLocalUnnamedAddr();
635 static void collectFuncletMembers(
636 DenseMap<const MachineBasicBlock *, int> &FuncletMembership, int Funclet,
637 const MachineBasicBlock *MBB) {
638 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
639 while (!Worklist.empty()) {
640 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
641 // Don't follow blocks which start new funclets.
642 if (Visiting->isEHPad() && Visiting != MBB)
645 // Add this MBB to our funclet.
646 auto P = FuncletMembership.insert(std::make_pair(Visiting, Funclet));
648 // Don't revisit blocks.
650 assert(P.first->second == Funclet && "MBB is part of two funclets!");
654 // Returns are boundaries where funclet transfer can occur, don't follow
656 if (Visiting->isReturnBlock())
659 for (const MachineBasicBlock *Succ : Visiting->successors())
660 Worklist.push_back(Succ);
664 DenseMap<const MachineBasicBlock *, int>
665 llvm::getFuncletMembership(const MachineFunction &MF) {
666 DenseMap<const MachineBasicBlock *, int> FuncletMembership;
668 // We don't have anything to do if there aren't any EH pads.
669 if (!MF.hasEHFunclets())
670 return FuncletMembership;
672 int EntryBBNumber = MF.front().getNumber();
673 bool IsSEH = isAsynchronousEHPersonality(
674 classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
676 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
677 SmallVector<const MachineBasicBlock *, 16> FuncletBlocks;
678 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
679 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
680 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
681 for (const MachineBasicBlock &MBB : MF) {
682 if (MBB.isEHFuncletEntry()) {
683 FuncletBlocks.push_back(&MBB);
684 } else if (IsSEH && MBB.isEHPad()) {
685 SEHCatchPads.push_back(&MBB);
686 } else if (MBB.pred_empty()) {
687 UnreachableBlocks.push_back(&MBB);
690 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
692 // CatchPads are not funclets for SEH so do not consider CatchRet to
693 // transfer control to another funclet.
694 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
697 // FIXME: SEH CatchPads are not necessarily in the parent function:
698 // they could be inside a finally block.
699 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
700 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
701 CatchRetSuccessors.push_back(
702 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
705 // We don't have anything to do if there aren't any EH pads.
706 if (FuncletBlocks.empty())
707 return FuncletMembership;
709 // Identify all the basic blocks reachable from the function entry.
710 collectFuncletMembers(FuncletMembership, EntryBBNumber, &MF.front());
711 // All blocks not part of a funclet are in the parent function.
712 for (const MachineBasicBlock *MBB : UnreachableBlocks)
713 collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
714 // Next, identify all the blocks inside the funclets.
715 for (const MachineBasicBlock *MBB : FuncletBlocks)
716 collectFuncletMembers(FuncletMembership, MBB->getNumber(), MBB);
717 // SEH CatchPads aren't really funclets, handle them separately.
718 for (const MachineBasicBlock *MBB : SEHCatchPads)
719 collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
720 // Finally, identify all the targets of a catchret.
721 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
723 collectFuncletMembers(FuncletMembership, CatchRetPair.second,
725 return FuncletMembership;