1 //===-- MachineFunction.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Collect native machine code information for a function. This allows
11 // target-specific information about the generated code to be stored with each
14 //===----------------------------------------------------------------------===//
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallString.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/EHPersonalities.h"
21 #include "llvm/CodeGen/MachineConstantPool.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionInitializer.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/CodeGen/WinEHFuncInfo.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DebugInfo.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/ModuleSlotTracker.h"
37 #include "llvm/MC/MCAsmInfo.h"
38 #include "llvm/MC/MCContext.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/GraphWriter.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetFrameLowering.h"
43 #include "llvm/Target/TargetLowering.h"
44 #include "llvm/Target/TargetMachine.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
48 #define DEBUG_TYPE "codegen"
50 static cl::opt<unsigned>
51 AlignAllFunctions("align-all-functions",
52 cl::desc("Force the alignment of all functions."),
53 cl::init(0), cl::Hidden);
55 void MachineFunctionInitializer::anchor() {}
57 static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
58 typedef MachineFunctionProperties::Property P;
60 case P::FailedISel: return "FailedISel";
61 case P::IsSSA: return "IsSSA";
62 case P::Legalized: return "Legalized";
63 case P::NoPHIs: return "NoPHIs";
64 case P::NoVRegs: return "NoVRegs";
65 case P::RegBankSelected: return "RegBankSelected";
66 case P::Selected: return "Selected";
67 case P::TracksLiveness: return "TracksLiveness";
69 llvm_unreachable("Invalid machine function property");
72 void MachineFunctionProperties::print(raw_ostream &OS) const {
73 const char *Separator = "";
74 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
77 OS << Separator << getPropertyName(static_cast<Property>(I));
82 //===----------------------------------------------------------------------===//
83 // MachineFunction implementation
84 //===----------------------------------------------------------------------===//
86 // Out-of-line virtual method.
87 MachineFunctionInfo::~MachineFunctionInfo() {}
89 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
90 MBB->getParent()->DeleteMachineBasicBlock(MBB);
93 static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
95 if (Fn->hasFnAttribute(Attribute::StackAlignment))
96 return Fn->getFnStackAlignment();
97 return STI->getFrameLowering()->getStackAlignment();
100 MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
101 unsigned FunctionNum, MachineModuleInfo &mmi)
102 : Fn(F), Target(TM), STI(TM.getSubtargetImpl(*F)), Ctx(mmi.getContext()),
104 FunctionNumber = FunctionNum;
108 void MachineFunction::init() {
109 // Assume the function starts in SSA form with correct liveness.
110 Properties.set(MachineFunctionProperties::Property::IsSSA);
111 Properties.set(MachineFunctionProperties::Property::TracksLiveness);
112 if (STI->getRegisterInfo())
113 RegInfo = new (Allocator) MachineRegisterInfo(this);
118 // We can realign the stack if the target supports it and the user hasn't
119 // explicitly asked us not to.
120 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
121 !Fn->hasFnAttribute("no-realign-stack");
122 FrameInfo = new (Allocator) MachineFrameInfo(
123 getFnStackAlignment(STI, Fn), /*StackRealignable=*/CanRealignSP,
124 /*ForceRealign=*/CanRealignSP &&
125 Fn->hasFnAttribute(Attribute::StackAlignment));
127 if (Fn->hasFnAttribute(Attribute::StackAlignment))
128 FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
130 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
131 Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
133 // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
134 // FIXME: Use Function::optForSize().
135 if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
136 Alignment = std::max(Alignment,
137 STI->getTargetLowering()->getPrefFunctionAlignment());
139 if (AlignAllFunctions)
140 Alignment = AlignAllFunctions;
142 JumpTableInfo = nullptr;
144 if (isFuncletEHPersonality(classifyEHPersonality(
145 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr))) {
146 WinEHInfo = new (Allocator) WinEHFuncInfo();
149 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
150 "Can't create a MachineFunction using a Module with a "
151 "Target-incompatible DataLayout attached\n");
153 PSVManager = llvm::make_unique<PseudoSourceValueManager>();
156 MachineFunction::~MachineFunction() {
160 void MachineFunction::clear() {
162 // Don't call destructors on MachineInstr and MachineOperand. All of their
163 // memory comes from the BumpPtrAllocator which is about to be purged.
165 // Do call MachineBasicBlock destructors, it contains std::vectors.
166 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
167 I->Insts.clearAndLeakNodesUnsafely();
169 InstructionRecycler.clear(Allocator);
170 OperandRecycler.clear(Allocator);
171 BasicBlockRecycler.clear(Allocator);
173 RegInfo->~MachineRegisterInfo();
174 Allocator.Deallocate(RegInfo);
177 MFInfo->~MachineFunctionInfo();
178 Allocator.Deallocate(MFInfo);
181 FrameInfo->~MachineFrameInfo();
182 Allocator.Deallocate(FrameInfo);
184 ConstantPool->~MachineConstantPool();
185 Allocator.Deallocate(ConstantPool);
188 JumpTableInfo->~MachineJumpTableInfo();
189 Allocator.Deallocate(JumpTableInfo);
193 WinEHInfo->~WinEHFuncInfo();
194 Allocator.Deallocate(WinEHInfo);
198 const DataLayout &MachineFunction::getDataLayout() const {
199 return Fn->getParent()->getDataLayout();
202 /// Get the JumpTableInfo for this function.
203 /// If it does not already exist, allocate one.
204 MachineJumpTableInfo *MachineFunction::
205 getOrCreateJumpTableInfo(unsigned EntryKind) {
206 if (JumpTableInfo) return JumpTableInfo;
208 JumpTableInfo = new (Allocator)
209 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
210 return JumpTableInfo;
213 /// Should we be emitting segmented stack stuff for the function
214 bool MachineFunction::shouldSplitStack() const {
215 return getFunction()->hasFnAttribute("split-stack");
218 /// This discards all of the MachineBasicBlock numbers and recomputes them.
219 /// This guarantees that the MBB numbers are sequential, dense, and match the
220 /// ordering of the blocks within the function. If a specific MachineBasicBlock
221 /// is specified, only that block and those after it are renumbered.
222 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
223 if (empty()) { MBBNumbering.clear(); return; }
224 MachineFunction::iterator MBBI, E = end();
228 MBBI = MBB->getIterator();
230 // Figure out the block number this should have.
231 unsigned BlockNo = 0;
233 BlockNo = std::prev(MBBI)->getNumber() + 1;
235 for (; MBBI != E; ++MBBI, ++BlockNo) {
236 if (MBBI->getNumber() != (int)BlockNo) {
237 // Remove use of the old number.
238 if (MBBI->getNumber() != -1) {
239 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
240 "MBB number mismatch!");
241 MBBNumbering[MBBI->getNumber()] = nullptr;
244 // If BlockNo is already taken, set that block's number to -1.
245 if (MBBNumbering[BlockNo])
246 MBBNumbering[BlockNo]->setNumber(-1);
248 MBBNumbering[BlockNo] = &*MBBI;
249 MBBI->setNumber(BlockNo);
253 // Okay, all the blocks are renumbered. If we have compactified the block
254 // numbering, shrink MBBNumbering now.
255 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
256 MBBNumbering.resize(BlockNo);
259 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
260 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
263 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
264 MachineInstr(*this, MCID, DL, NoImp);
267 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
268 /// identical in all ways except the instruction has no parent, prev, or next.
270 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
271 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
272 MachineInstr(*this, *Orig);
275 /// Delete the given MachineInstr.
277 /// This function also serves as the MachineInstr destructor - the real
278 /// ~MachineInstr() destructor must be empty.
280 MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
281 // Strip it for parts. The operand array and the MI object itself are
282 // independently recyclable.
284 deallocateOperandArray(MI->CapOperands, MI->Operands);
285 // Don't call ~MachineInstr() which must be trivial anyway because
286 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
288 InstructionRecycler.Deallocate(Allocator, MI);
291 /// Allocate a new MachineBasicBlock. Use this instead of
292 /// `new MachineBasicBlock'.
294 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
295 return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
296 MachineBasicBlock(*this, bb);
299 /// Delete the given MachineBasicBlock.
301 MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
302 assert(MBB->getParent() == this && "MBB parent mismatch!");
303 MBB->~MachineBasicBlock();
304 BasicBlockRecycler.Deallocate(Allocator, MBB);
307 MachineMemOperand *MachineFunction::getMachineMemOperand(
308 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
309 unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
310 SynchronizationScope SynchScope, AtomicOrdering Ordering,
311 AtomicOrdering FailureOrdering) {
312 return new (Allocator)
313 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
314 SynchScope, Ordering, FailureOrdering);
318 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
319 int64_t Offset, uint64_t Size) {
321 return new (Allocator)
322 MachineMemOperand(MachinePointerInfo(MMO->getValue(),
323 MMO->getOffset()+Offset),
324 MMO->getFlags(), Size, MMO->getBaseAlignment(),
325 AAMDNodes(), nullptr, MMO->getSynchScope(),
326 MMO->getOrdering(), MMO->getFailureOrdering());
327 return new (Allocator)
328 MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
329 MMO->getOffset()+Offset),
330 MMO->getFlags(), Size, MMO->getBaseAlignment(),
331 AAMDNodes(), nullptr, MMO->getSynchScope(),
332 MMO->getOrdering(), MMO->getFailureOrdering());
335 MachineInstr::mmo_iterator
336 MachineFunction::allocateMemRefsArray(unsigned long Num) {
337 return Allocator.Allocate<MachineMemOperand *>(Num);
340 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
341 MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
342 MachineInstr::mmo_iterator End) {
343 // Count the number of load mem refs.
345 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
349 // Allocate a new array and populate it with the load information.
350 MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
352 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
353 if ((*I)->isLoad()) {
354 if (!(*I)->isStore())
358 // Clone the MMO and unset the store flag.
359 MachineMemOperand *JustLoad =
360 getMachineMemOperand((*I)->getPointerInfo(),
361 (*I)->getFlags() & ~MachineMemOperand::MOStore,
362 (*I)->getSize(), (*I)->getBaseAlignment(),
363 (*I)->getAAInfo(), nullptr,
364 (*I)->getSynchScope(), (*I)->getOrdering(),
365 (*I)->getFailureOrdering());
366 Result[Index] = JustLoad;
371 return std::make_pair(Result, Result + Num);
374 std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
375 MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
376 MachineInstr::mmo_iterator End) {
377 // Count the number of load mem refs.
379 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
383 // Allocate a new array and populate it with the store information.
384 MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
386 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
387 if ((*I)->isStore()) {
392 // Clone the MMO and unset the load flag.
393 MachineMemOperand *JustStore =
394 getMachineMemOperand((*I)->getPointerInfo(),
395 (*I)->getFlags() & ~MachineMemOperand::MOLoad,
396 (*I)->getSize(), (*I)->getBaseAlignment(),
397 (*I)->getAAInfo(), nullptr,
398 (*I)->getSynchScope(), (*I)->getOrdering(),
399 (*I)->getFailureOrdering());
400 Result[Index] = JustStore;
405 return std::make_pair(Result, Result + Num);
408 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
409 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
410 std::copy(Name.begin(), Name.end(), Dest);
411 Dest[Name.size()] = 0;
415 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
416 LLVM_DUMP_METHOD void MachineFunction::dump() const {
421 StringRef MachineFunction::getName() const {
422 assert(getFunction() && "No function!");
423 return getFunction()->getName();
426 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
427 OS << "# Machine code for function " << getName() << ": ";
428 getProperties().print(OS);
431 // Print Frame Information
432 FrameInfo->print(*this, OS);
434 // Print JumpTable Information
436 JumpTableInfo->print(OS);
438 // Print Constant Pool
439 ConstantPool->print(OS);
441 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
443 if (RegInfo && !RegInfo->livein_empty()) {
444 OS << "Function Live Ins: ";
445 for (MachineRegisterInfo::livein_iterator
446 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
447 OS << PrintReg(I->first, TRI);
449 OS << " in " << PrintReg(I->second, TRI);
450 if (std::next(I) != E)
456 ModuleSlotTracker MST(getFunction()->getParent());
457 MST.incorporateFunction(*getFunction());
458 for (const auto &BB : *this) {
460 BB.print(OS, MST, Indexes);
463 OS << "\n# End machine code for function " << getName() << ".\n\n";
468 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
470 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
472 static std::string getGraphName(const MachineFunction *F) {
473 return ("CFG for '" + F->getName() + "' function").str();
476 std::string getNodeLabel(const MachineBasicBlock *Node,
477 const MachineFunction *Graph) {
480 raw_string_ostream OSS(OutStr);
483 OSS << "BB#" << Node->getNumber();
484 if (const BasicBlock *BB = Node->getBasicBlock())
485 OSS << ": " << BB->getName();
490 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
492 // Process string output to make it nicer...
493 for (unsigned i = 0; i != OutStr.length(); ++i)
494 if (OutStr[i] == '\n') { // Left justify
496 OutStr.insert(OutStr.begin()+i+1, 'l');
503 void MachineFunction::viewCFG() const
506 ViewGraph(this, "mf" + getName());
508 errs() << "MachineFunction::viewCFG is only available in debug builds on "
509 << "systems with Graphviz or gv!\n";
513 void MachineFunction::viewCFGOnly() const
516 ViewGraph(this, "mf" + getName(), true);
518 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
519 << "systems with Graphviz or gv!\n";
523 /// Add the specified physical register as a live-in value and
524 /// create a corresponding virtual register for it.
525 unsigned MachineFunction::addLiveIn(unsigned PReg,
526 const TargetRegisterClass *RC) {
527 MachineRegisterInfo &MRI = getRegInfo();
528 unsigned VReg = MRI.getLiveInVirtReg(PReg);
530 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
532 // A physical register can be added several times.
533 // Between two calls, the register class of the related virtual register
534 // may have been constrained to match some operation constraints.
535 // In that case, check that the current register class includes the
536 // physical register and is a sub class of the specified RC.
537 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
538 RC->hasSubClassEq(VRegRC))) &&
539 "Register class mismatch!");
542 VReg = MRI.createVirtualRegister(RC);
543 MRI.addLiveIn(PReg, VReg);
547 /// Return the MCSymbol for the specified non-empty jump table.
548 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
549 /// normal 'L' label is returned.
550 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
551 bool isLinkerPrivate) const {
552 const DataLayout &DL = getDataLayout();
553 assert(JumpTableInfo && "No jump tables");
554 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
556 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
557 : DL.getPrivateGlobalPrefix();
558 SmallString<60> Name;
559 raw_svector_ostream(Name)
560 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
561 return Ctx.getOrCreateSymbol(Name);
564 /// Return a function-local symbol to represent the PIC base.
565 MCSymbol *MachineFunction::getPICBaseSymbol() const {
566 const DataLayout &DL = getDataLayout();
567 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
568 Twine(getFunctionNumber()) + "$pb");
571 /// \name Exception Handling
575 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
576 unsigned N = LandingPads.size();
577 for (unsigned i = 0; i < N; ++i) {
578 LandingPadInfo &LP = LandingPads[i];
579 if (LP.LandingPadBlock == LandingPad)
583 LandingPads.push_back(LandingPadInfo(LandingPad));
584 return LandingPads[N];
587 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
588 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
589 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
590 LP.BeginLabels.push_back(BeginLabel);
591 LP.EndLabels.push_back(EndLabel);
594 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
595 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
596 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
597 LP.LandingPadLabel = LandingPadLabel;
598 return LandingPadLabel;
601 void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
602 ArrayRef<const GlobalValue *> TyInfo) {
603 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
604 for (unsigned N = TyInfo.size(); N; --N)
605 LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
608 void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad,
609 ArrayRef<const GlobalValue *> TyInfo) {
610 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
611 std::vector<unsigned> IdsInFilter(TyInfo.size());
612 for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
613 IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
614 LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
617 void MachineFunction::tidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {
618 for (unsigned i = 0; i != LandingPads.size(); ) {
619 LandingPadInfo &LandingPad = LandingPads[i];
620 if (LandingPad.LandingPadLabel &&
621 !LandingPad.LandingPadLabel->isDefined() &&
622 (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
623 LandingPad.LandingPadLabel = nullptr;
625 // Special case: we *should* emit LPs with null LP MBB. This indicates
627 if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
628 LandingPads.erase(LandingPads.begin() + i);
632 for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
633 MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
634 MCSymbol *EndLabel = LandingPad.EndLabels[j];
635 if ((BeginLabel->isDefined() ||
636 (LPMap && (*LPMap)[BeginLabel] != 0)) &&
637 (EndLabel->isDefined() ||
638 (LPMap && (*LPMap)[EndLabel] != 0))) continue;
640 LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
641 LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
646 // Remove landing pads with no try-ranges.
647 if (LandingPads[i].BeginLabels.empty()) {
648 LandingPads.erase(LandingPads.begin() + i);
652 // If there is no landing pad, ensure that the list of typeids is empty.
653 // If the only typeid is a cleanup, this is the same as having no typeids.
654 if (!LandingPad.LandingPadBlock ||
655 (LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
656 LandingPad.TypeIds.clear();
661 void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) {
662 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
663 LP.TypeIds.push_back(0);
666 void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad,
667 const Function *Filter,
668 const BlockAddress *RecoverBA) {
669 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
671 Handler.FilterOrFinally = Filter;
672 Handler.RecoverBA = RecoverBA;
673 LP.SEHHandlers.push_back(Handler);
676 void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
677 const Function *Cleanup) {
678 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
680 Handler.FilterOrFinally = Cleanup;
681 Handler.RecoverBA = nullptr;
682 LP.SEHHandlers.push_back(Handler);
685 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
686 ArrayRef<unsigned> Sites) {
687 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
690 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
691 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
692 if (TypeInfos[i] == TI) return i + 1;
694 TypeInfos.push_back(TI);
695 return TypeInfos.size();
698 int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) {
699 // If the new filter coincides with the tail of an existing filter, then
700 // re-use the existing filter. Folding filters more than this requires
701 // re-ordering filters and/or their elements - probably not worth it.
702 for (std::vector<unsigned>::iterator I = FilterEnds.begin(),
703 E = FilterEnds.end(); I != E; ++I) {
704 unsigned i = *I, j = TyIds.size();
707 if (FilterIds[--i] != TyIds[--j])
711 // The new filter coincides with range [i, end) of the existing filter.
717 // Add the new filter.
718 int FilterID = -(1 + FilterIds.size());
719 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
720 FilterIds.insert(FilterIds.end(), TyIds.begin(), TyIds.end());
721 FilterEnds.push_back(FilterIds.size());
722 FilterIds.push_back(0); // terminator
726 void llvm::addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB) {
727 MachineFunction &MF = *MBB.getParent();
728 if (const auto *PF = dyn_cast<Function>(
729 I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts()))
730 MF.getMMI().addPersonality(PF);
735 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct,
736 // but we need to do it this way because of how the DWARF EH emitter
737 // processes the clauses.
738 for (unsigned i = I.getNumClauses(); i != 0; --i) {
739 Value *Val = I.getClause(i - 1);
740 if (I.isCatch(i - 1)) {
741 MF.addCatchTypeInfo(&MBB,
742 dyn_cast<GlobalValue>(Val->stripPointerCasts()));
744 // Add filters in a list.
745 Constant *CVal = cast<Constant>(Val);
746 SmallVector<const GlobalValue *, 4> FilterList;
747 for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end();
749 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
751 MF.addFilterTypeInfo(&MBB, FilterList);
758 //===----------------------------------------------------------------------===//
759 // MachineFrameInfo implementation
760 //===----------------------------------------------------------------------===//
762 /// Make sure the function is at least Align bytes aligned.
763 void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
764 if (!StackRealignable)
765 assert(Align <= StackAlignment &&
766 "For targets without stack realignment, Align is out of limit!");
767 if (MaxAlignment < Align) MaxAlignment = Align;
770 /// Clamp the alignment if requested and emit a warning.
771 static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
772 unsigned StackAlign) {
773 if (!ShouldClamp || Align <= StackAlign)
775 DEBUG(dbgs() << "Warning: requested alignment " << Align
776 << " exceeds the stack alignment " << StackAlign
777 << " when stack realignment is off" << '\n');
781 /// Create a new statically sized stack object, returning a nonnegative
782 /// identifier to represent it.
783 int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
784 bool isSS, const AllocaInst *Alloca) {
785 assert(Size != 0 && "Cannot allocate zero size stack objects!");
786 Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
787 Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
789 int Index = (int)Objects.size() - NumFixedObjects - 1;
790 assert(Index >= 0 && "Bad frame index!");
791 ensureMaxAlignment(Alignment);
795 /// Create a new statically sized stack object that represents a spill slot,
796 /// returning a nonnegative identifier to represent it.
797 int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
798 unsigned Alignment) {
799 Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
800 CreateStackObject(Size, Alignment, true);
801 int Index = (int)Objects.size() - NumFixedObjects - 1;
802 ensureMaxAlignment(Alignment);
806 /// Notify the MachineFrameInfo object that a variable sized object has been
807 /// created. This must be created whenever a variable sized object is created,
808 /// whether or not the index returned is actually used.
809 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
810 const AllocaInst *Alloca) {
811 HasVarSizedObjects = true;
812 Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
813 Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
814 ensureMaxAlignment(Alignment);
815 return (int)Objects.size()-NumFixedObjects-1;
818 /// Create a new object at a fixed location on the stack.
819 /// All fixed objects should be created before other objects are created for
820 /// efficiency. By default, fixed objects are immutable. This returns an
821 /// index with a negative value.
822 int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
823 bool Immutable, bool isAliased) {
824 assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
825 // The alignment of the frame index can be determined from its offset from
826 // the incoming frame position. If the frame object is at offset 32 and
827 // the stack is guaranteed to be 16-byte aligned, then we know that the
828 // object is 16-byte aligned. Note that unlike the non-fixed case, if the
829 // stack needs realignment, we can't assume that the stack will in fact be
831 unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
832 Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
833 Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
835 /*Alloca*/ nullptr, isAliased));
836 return -++NumFixedObjects;
839 /// Create a spill slot at a fixed location on the stack.
840 /// Returns an index with a negative value.
841 int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
844 unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
845 Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
846 Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
849 /*isAliased*/ false));
850 return -++NumFixedObjects;
853 BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
854 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
855 BitVector BV(TRI->getNumRegs());
857 // Before CSI is calculated, no registers are considered pristine. They can be
858 // freely used and PEI will make sure they are saved.
859 if (!isCalleeSavedInfoValid())
862 for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
865 // Saved CSRs are not pristine.
866 for (auto &I : getCalleeSavedInfo())
867 for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
873 unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
874 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
875 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
876 unsigned MaxAlign = getMaxAlignment();
879 // This code is very, very similar to PEI::calculateFrameObjectOffsets().
880 // It really should be refactored to share code. Until then, changes
881 // should keep in mind that there's tight coupling between the two.
883 for (int i = getObjectIndexBegin(); i != 0; ++i) {
884 int FixedOff = -getObjectOffset(i);
885 if (FixedOff > Offset) Offset = FixedOff;
887 for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
888 if (isDeadObjectIndex(i))
890 Offset += getObjectSize(i);
891 unsigned Align = getObjectAlignment(i);
892 // Adjust to alignment boundary
893 Offset = (Offset+Align-1)/Align*Align;
895 MaxAlign = std::max(Align, MaxAlign);
898 if (adjustsStack() && TFI->hasReservedCallFrame(MF))
899 Offset += getMaxCallFrameSize();
901 // Round up the size to a multiple of the alignment. If the function has
902 // any calls or alloca's, align to the target's StackAlignment value to
903 // ensure that the callee's frame or the alloca data is suitably aligned;
904 // otherwise, for leaf functions, align to the TransientStackAlignment
907 if (adjustsStack() || hasVarSizedObjects() ||
908 (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
909 StackAlign = TFI->getStackAlignment();
911 StackAlign = TFI->getTransientStackAlignment();
913 // If the frame pointer is eliminated, all frame offsets will be relative to
914 // SP not FP. Align to MaxAlign so this works.
915 StackAlign = std::max(StackAlign, MaxAlign);
916 unsigned AlignMask = StackAlign - 1;
917 Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
919 return (unsigned)Offset;
922 void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
923 if (Objects.empty()) return;
925 const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
926 int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
928 OS << "Frame Objects:\n";
930 for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
931 const StackObject &SO = Objects[i];
932 OS << " fi#" << (int)(i-NumFixedObjects) << ": ";
933 if (SO.Size == ~0ULL) {
938 OS << "variable sized";
940 OS << "size=" << SO.Size;
941 OS << ", align=" << SO.Alignment;
943 if (i < NumFixedObjects)
945 if (i < NumFixedObjects || SO.SPOffset != -1) {
946 int64_t Off = SO.SPOffset - ValOffset;
947 OS << ", at location [SP";
958 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
959 void MachineFrameInfo::dump(const MachineFunction &MF) const {
964 //===----------------------------------------------------------------------===//
965 // MachineJumpTableInfo implementation
966 //===----------------------------------------------------------------------===//
968 /// Return the size of each entry in the jump table.
969 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
970 // The size of a jump table entry is 4 bytes unless the entry is just the
971 // address of a block, in which case it is the pointer size.
972 switch (getEntryKind()) {
973 case MachineJumpTableInfo::EK_BlockAddress:
974 return TD.getPointerSize();
975 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
977 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
978 case MachineJumpTableInfo::EK_LabelDifference32:
979 case MachineJumpTableInfo::EK_Custom32:
981 case MachineJumpTableInfo::EK_Inline:
984 llvm_unreachable("Unknown jump table encoding!");
987 /// Return the alignment of each entry in the jump table.
988 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
989 // The alignment of a jump table entry is the alignment of int32 unless the
990 // entry is just the address of a block, in which case it is the pointer
992 switch (getEntryKind()) {
993 case MachineJumpTableInfo::EK_BlockAddress:
994 return TD.getPointerABIAlignment();
995 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
996 return TD.getABIIntegerTypeAlignment(64);
997 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
998 case MachineJumpTableInfo::EK_LabelDifference32:
999 case MachineJumpTableInfo::EK_Custom32:
1000 return TD.getABIIntegerTypeAlignment(32);
1001 case MachineJumpTableInfo::EK_Inline:
1004 llvm_unreachable("Unknown jump table encoding!");
1007 /// Create a new jump table entry in the jump table info.
1008 unsigned MachineJumpTableInfo::createJumpTableIndex(
1009 const std::vector<MachineBasicBlock*> &DestBBs) {
1010 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1011 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1012 return JumpTables.size()-1;
1015 /// If Old is the target of any jump tables, update the jump tables to branch
1017 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
1018 MachineBasicBlock *New) {
1019 assert(Old != New && "Not making a change?");
1020 bool MadeChange = false;
1021 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1022 ReplaceMBBInJumpTable(i, Old, New);
1026 /// If Old is a target of the jump tables, update the jump table to branch to
1028 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
1029 MachineBasicBlock *Old,
1030 MachineBasicBlock *New) {
1031 assert(Old != New && "Not making a change?");
1032 bool MadeChange = false;
1033 MachineJumpTableEntry &JTE = JumpTables[Idx];
1034 for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
1035 if (JTE.MBBs[j] == Old) {
1042 void MachineJumpTableInfo::print(raw_ostream &OS) const {
1043 if (JumpTables.empty()) return;
1045 OS << "Jump Tables:\n";
1047 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1048 OS << " jt#" << i << ": ";
1049 for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
1050 OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
1056 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1057 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
1061 //===----------------------------------------------------------------------===//
1062 // MachineConstantPool implementation
1063 //===----------------------------------------------------------------------===//
1065 void MachineConstantPoolValue::anchor() { }
1067 Type *MachineConstantPoolEntry::getType() const {
1068 if (isMachineConstantPoolEntry())
1069 return Val.MachineCPVal->getType();
1070 return Val.ConstVal->getType();
1073 bool MachineConstantPoolEntry::needsRelocation() const {
1074 if (isMachineConstantPoolEntry())
1076 return Val.ConstVal->needsRelocation();
1080 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
1081 if (needsRelocation())
1082 return SectionKind::getReadOnlyWithRel();
1083 switch (DL->getTypeAllocSize(getType())) {
1085 return SectionKind::getMergeableConst4();
1087 return SectionKind::getMergeableConst8();
1089 return SectionKind::getMergeableConst16();
1091 return SectionKind::getMergeableConst32();
1093 return SectionKind::getReadOnly();
1097 MachineConstantPool::~MachineConstantPool() {
1098 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1099 // so keep track of which we've deleted to avoid double deletions.
1100 DenseSet<MachineConstantPoolValue*> Deleted;
1101 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1102 if (Constants[i].isMachineConstantPoolEntry()) {
1103 Deleted.insert(Constants[i].Val.MachineCPVal);
1104 delete Constants[i].Val.MachineCPVal;
1106 for (DenseSet<MachineConstantPoolValue*>::iterator I =
1107 MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
1109 if (Deleted.count(*I) == 0)
1114 /// Test whether the given two constants can be allocated the same constant pool
1116 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1117 const DataLayout &DL) {
1118 // Handle the trivial case quickly.
1119 if (A == B) return true;
1121 // If they have the same type but weren't the same constant, quickly
1123 if (A->getType() == B->getType()) return false;
1125 // We can't handle structs or arrays.
1126 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1127 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1130 // For now, only support constants with the same size.
1131 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1132 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1135 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1137 // Try constant folding a bitcast of both instructions to an integer. If we
1138 // get two identical ConstantInt's, then we are good to share them. We use
1139 // the constant folding APIs to do this so that we get the benefit of
1141 if (isa<PointerType>(A->getType()))
1142 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1143 const_cast<Constant *>(A), IntTy, DL);
1144 else if (A->getType() != IntTy)
1145 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1147 if (isa<PointerType>(B->getType()))
1148 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1149 const_cast<Constant *>(B), IntTy, DL);
1150 else if (B->getType() != IntTy)
1151 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1157 /// Create a new entry in the constant pool or return an existing one.
1158 /// User must specify the log2 of the minimum required alignment for the object.
1159 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1160 unsigned Alignment) {
1161 assert(Alignment && "Alignment must be specified!");
1162 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1164 // Check to see if we already have this constant.
1166 // FIXME, this could be made much more efficient for large constant pools.
1167 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1168 if (!Constants[i].isMachineConstantPoolEntry() &&
1169 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1170 if ((unsigned)Constants[i].getAlignment() < Alignment)
1171 Constants[i].Alignment = Alignment;
1175 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1176 return Constants.size()-1;
1179 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1180 unsigned Alignment) {
1181 assert(Alignment && "Alignment must be specified!");
1182 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1184 // Check to see if we already have this constant.
1186 // FIXME, this could be made much more efficient for large constant pools.
1187 int Idx = V->getExistingMachineCPValue(this, Alignment);
1189 MachineCPVsSharingEntries.insert(V);
1190 return (unsigned)Idx;
1193 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1194 return Constants.size()-1;
1197 void MachineConstantPool::print(raw_ostream &OS) const {
1198 if (Constants.empty()) return;
1200 OS << "Constant Pool:\n";
1201 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1202 OS << " cp#" << i << ": ";
1203 if (Constants[i].isMachineConstantPoolEntry())
1204 Constants[i].Val.MachineCPVal->print(OS);
1206 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1207 OS << ", align=" << Constants[i].getAlignment();
1212 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1213 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }