1 //===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that splits the constant pool up into 'islands'
11 // which are scattered through-out the function. This is required due to the
12 // limited pc-relative displacements that ARM has.
14 //===----------------------------------------------------------------------===//
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMBasicBlockInfo.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMSubtarget.h"
21 #include "MCTargetDesc/ARMBaseInfo.h"
22 #include "Thumb2InstrInfo.h"
23 #include "Utils/ARMBaseInfo.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineInstr.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineOperand.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/MC/MCInstrDesc.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Compiler.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/Format.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Support/raw_ostream.h"
58 #define DEBUG_TYPE "arm-cp-islands"
60 #define ARM_CP_ISLANDS_OPT_NAME \
61 "ARM constant island placement and branch shortening pass"
62 STATISTIC(NumCPEs, "Number of constpool entries");
63 STATISTIC(NumSplit, "Number of uncond branches inserted");
64 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
65 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
66 STATISTIC(NumTBs, "Number of table branches generated");
67 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
68 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
69 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
70 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
71 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
74 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
75 cl::desc("Adjust basic block layout to better use TB[BH]"));
77 static cl::opt<unsigned>
78 CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
79 cl::desc("The max number of iteration for converge"));
81 static cl::opt<bool> SynthesizeThumb1TBB(
82 "arm-synthesize-thumb-1-tbb", cl::Hidden, cl::init(true),
83 cl::desc("Use compressed jump tables in Thumb-1 by synthesizing an "
84 "equivalent to the TBB/TBH instructions"));
88 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
89 /// requires constant pool entries to be scattered among the instructions
90 /// inside a function. To do this, it completely ignores the normal LLVM
91 /// constant pool; instead, it places constants wherever it feels like with
92 /// special instructions.
94 /// The terminology used in this pass includes:
95 /// Islands - Clumps of constants placed in the function.
96 /// Water - Potential places where an island could be formed.
97 /// CPE - A constant pool entry that has been placed somewhere, which
98 /// tracks a list of users.
99 class ARMConstantIslands : public MachineFunctionPass {
100 std::vector<BasicBlockInfo> BBInfo;
102 /// WaterList - A sorted list of basic blocks where islands could be placed
103 /// (i.e. blocks that don't fall through to the following block, due
104 /// to a return, unreachable, or unconditional branch).
105 std::vector<MachineBasicBlock*> WaterList;
107 /// NewWaterList - The subset of WaterList that was created since the
108 /// previous iteration by inserting unconditional branches.
109 SmallSet<MachineBasicBlock*, 4> NewWaterList;
111 using water_iterator = std::vector<MachineBasicBlock *>::iterator;
113 /// CPUser - One user of a constant pool, keeping the machine instruction
114 /// pointer, the constant pool being referenced, and the max displacement
115 /// allowed from the instruction to the CP. The HighWaterMark records the
116 /// highest basic block where a new CPEntry can be placed. To ensure this
117 /// pass terminates, the CP entries are initially placed at the end of the
118 /// function and then move monotonically to lower addresses. The
119 /// exception to this rule is when the current CP entry for a particular
120 /// CPUser is out of range, but there is another CP entry for the same
121 /// constant value in range. We want to use the existing in-range CP
122 /// entry, but if it later moves out of range, the search for new water
123 /// should resume where it left off. The HighWaterMark is used to record
128 MachineBasicBlock *HighWaterMark;
132 bool KnownAlignment = false;
134 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
135 bool neg, bool soimm)
136 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
137 HighWaterMark = CPEMI->getParent();
140 /// getMaxDisp - Returns the maximum displacement supported by MI.
141 /// Correct for unknown alignment.
142 /// Conservatively subtract 2 bytes to handle weird alignment effects.
143 unsigned getMaxDisp() const {
144 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
148 /// CPUsers - Keep track of all of the machine instructions that use various
149 /// constant pools and their max displacement.
150 std::vector<CPUser> CPUsers;
152 /// CPEntry - One per constant pool entry, keeping the machine instruction
153 /// pointer, the constpool index, and the number of CPUser's which
154 /// reference this entry.
160 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
161 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
164 /// CPEntries - Keep track of all of the constant pool entry machine
165 /// instructions. For each original constpool index (i.e. those that existed
166 /// upon entry to this pass), it keeps a vector of entries. Original
167 /// elements are cloned as we go along; the clones are put in the vector of
168 /// the original element, but have distinct CPIs.
170 /// The first half of CPEntries contains generic constants, the second half
171 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
172 /// which vector it will be in here.
173 std::vector<std::vector<CPEntry>> CPEntries;
175 /// Maps a JT index to the offset in CPEntries containing copies of that
176 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
177 DenseMap<int, int> JumpTableEntryIndices;
179 /// Maps a JT index to the LEA that actually uses the index to calculate its
181 DenseMap<int, int> JumpTableUserIndices;
183 /// ImmBranch - One per immediate branch, keeping the machine instruction
184 /// pointer, conditional or unconditional, the max displacement,
185 /// and (if isCond is true) the corresponding unconditional branch
189 unsigned MaxDisp : 31;
193 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
194 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
197 /// ImmBranches - Keep track of all the immediate branch instructions.
198 std::vector<ImmBranch> ImmBranches;
200 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
201 SmallVector<MachineInstr*, 4> PushPopMIs;
203 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
204 SmallVector<MachineInstr*, 4> T2JumpTables;
206 /// HasFarJump - True if any far jump instruction has been emitted during
207 /// the branch fix up pass.
211 MachineConstantPool *MCP;
212 const ARMBaseInstrInfo *TII;
213 const ARMSubtarget *STI;
214 ARMFunctionInfo *AFI;
218 bool isPositionIndependentOrROPI;
223 ARMConstantIslands() : MachineFunctionPass(ID) {}
225 bool runOnMachineFunction(MachineFunction &MF) override;
227 MachineFunctionProperties getRequiredProperties() const override {
228 return MachineFunctionProperties().set(
229 MachineFunctionProperties::Property::NoVRegs);
232 StringRef getPassName() const override {
233 return ARM_CP_ISLANDS_OPT_NAME;
237 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
238 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
239 bool BBHasFallthrough(MachineBasicBlock *MBB);
240 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
241 unsigned getCPELogAlign(const MachineInstr *CPEMI);
242 void scanFunctionJumpTables();
243 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
244 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
245 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
246 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
247 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
248 unsigned getCombinedIndex(const MachineInstr *CPEMI);
249 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
250 bool findAvailableWater(CPUser&U, unsigned UserOffset,
251 water_iterator &WaterIter, bool CloserWater);
252 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
253 MachineBasicBlock *&NewMBB);
254 bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
255 void removeDeadCPEMI(MachineInstr *CPEMI);
256 bool removeUnusedCPEntries();
257 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
258 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
259 bool DoDump = false);
260 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
261 CPUser &U, unsigned &Growth);
262 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
263 bool fixupImmediateBr(ImmBranch &Br);
264 bool fixupConditionalBr(ImmBranch &Br);
265 bool fixupUnconditionalBr(ImmBranch &Br);
266 bool undoLRSpillRestore();
267 bool optimizeThumb2Instructions();
268 bool optimizeThumb2Branches();
269 bool reorderThumb2JumpTables();
270 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
271 unsigned &DeadSize, bool &CanDeleteLEA,
273 bool optimizeThumb2JumpTables();
274 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
275 MachineBasicBlock *JTBB);
277 unsigned getOffsetOf(MachineInstr *MI) const;
278 unsigned getUserOffset(CPUser&) const;
282 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
283 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
284 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
286 return isOffsetInRange(UserOffset, TrialOffset,
287 U.getMaxDisp(), U.NegOk, U.IsSoImm);
291 } // end anonymous namespace
293 char ARMConstantIslands::ID = 0;
295 /// verify - check BBOffsets, BBSizes, alignment of islands
296 void ARMConstantIslands::verify() {
298 assert(std::is_sorted(MF->begin(), MF->end(),
299 [this](const MachineBasicBlock &LHS,
300 const MachineBasicBlock &RHS) {
301 return BBInfo[LHS.getNumber()].postOffset() <
302 BBInfo[RHS.getNumber()].postOffset();
304 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
305 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
306 CPUser &U = CPUsers[i];
307 unsigned UserOffset = getUserOffset(U);
308 // Verify offset using the real max displacement without the safety
310 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
311 /* DoDump = */ true)) {
312 DEBUG(dbgs() << "OK\n");
315 DEBUG(dbgs() << "Out of range.\n");
318 llvm_unreachable("Constant pool entry out of range!");
323 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
324 /// print block size and offset information - debugging
325 LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
327 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
328 const BasicBlockInfo &BBI = BBInfo[J];
329 dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
330 << " kb=" << unsigned(BBI.KnownBits)
331 << " ua=" << unsigned(BBI.Unalign)
332 << " pa=" << unsigned(BBI.PostAlign)
333 << format(" size=%#x\n", BBInfo[J].Size);
339 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
341 MCP = mf.getConstantPool();
343 DEBUG(dbgs() << "***** ARMConstantIslands: "
344 << MCP->getConstants().size() << " CP entries, aligned to "
345 << MCP->getConstantPoolAlignment() << " bytes *****\n");
347 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
348 TII = STI->getInstrInfo();
349 isPositionIndependentOrROPI =
350 STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
351 AFI = MF->getInfo<ARMFunctionInfo>();
353 isThumb = AFI->isThumbFunction();
354 isThumb1 = AFI->isThumb1OnlyFunction();
355 isThumb2 = AFI->isThumb2Function();
358 bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
360 // This pass invalidates liveness information when it splits basic blocks.
361 MF->getRegInfo().invalidateLiveness();
363 // Renumber all of the machine basic blocks in the function, guaranteeing that
364 // the numbers agree with the position of the block in the function.
365 MF->RenumberBlocks();
367 // Try to reorder and otherwise adjust the block layout to make good use
368 // of the TB[BH] instructions.
369 bool MadeChange = false;
370 if (GenerateTBB && AdjustJumpTableBlocks) {
371 scanFunctionJumpTables();
372 MadeChange |= reorderThumb2JumpTables();
373 // Data is out of date, so clear it. It'll be re-computed later.
374 T2JumpTables.clear();
375 // Blocks may have shifted around. Keep the numbering up to date.
376 MF->RenumberBlocks();
379 // Perform the initial placement of the constant pool entries. To start with,
380 // we put them all at the end of the function.
381 std::vector<MachineInstr*> CPEMIs;
383 doInitialConstPlacement(CPEMIs);
385 if (MF->getJumpTableInfo())
386 doInitialJumpTablePlacement(CPEMIs);
388 /// The next UID to take is the first unused one.
389 AFI->initPICLabelUId(CPEMIs.size());
391 // Do the initial scan of the function, building up information about the
392 // sizes of each block, the location of all the water, and finding all of the
393 // constant pool users.
394 initializeFunctionInfo(CPEMIs);
398 // Functions with jump tables need an alignment of 4 because they use the ADR
399 // instruction, which aligns the PC to 4 bytes before adding an offset.
400 if (!T2JumpTables.empty())
401 MF->ensureAlignment(2);
403 /// Remove dead constant pool entries.
404 MadeChange |= removeUnusedCPEntries();
406 // Iteratively place constant pool entries and fix up branches until there
408 unsigned NoCPIters = 0, NoBRIters = 0;
410 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
411 bool CPChange = false;
412 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
413 // For most inputs, it converges in no more than 5 iterations.
414 // If it doesn't end in 10, the input may have huge BB or many CPEs.
415 // In this case, we will try different heuristics.
416 CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
417 if (CPChange && ++NoCPIters > CPMaxIteration)
418 report_fatal_error("Constant Island pass failed to converge!");
421 // Clear NewWaterList now. If we split a block for branches, it should
422 // appear as "new water" for the next iteration of constant pool placement.
423 NewWaterList.clear();
425 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
426 bool BRChange = false;
427 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
428 BRChange |= fixupImmediateBr(ImmBranches[i]);
429 if (BRChange && ++NoBRIters > 30)
430 report_fatal_error("Branch Fix Up pass failed to converge!");
433 if (!CPChange && !BRChange)
438 // Shrink 32-bit Thumb2 load and store instructions.
439 if (isThumb2 && !STI->prefers32BitThumb())
440 MadeChange |= optimizeThumb2Instructions();
442 // Shrink 32-bit branch instructions.
443 if (isThumb && STI->hasV8MBaselineOps())
444 MadeChange |= optimizeThumb2Branches();
446 // Optimize jump tables using TBB / TBH.
447 if (GenerateTBB && !STI->genExecuteOnly())
448 MadeChange |= optimizeThumb2JumpTables();
450 // After a while, this might be made debug-only, but it is not expensive.
453 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
454 // undo the spill / restore of LR if possible.
455 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
456 MadeChange |= undoLRSpillRestore();
458 // Save the mapping between original and cloned constpool entries.
459 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
460 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
461 const CPEntry & CPE = CPEntries[i][j];
462 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
463 AFI->recordCPEClone(i, CPE.CPI);
467 DEBUG(dbgs() << '\n'; dumpBBs());
473 JumpTableEntryIndices.clear();
474 JumpTableUserIndices.clear();
477 T2JumpTables.clear();
482 /// \brief Perform the initial placement of the regular constant pool entries.
483 /// To start with, we put them all at the end of the function.
485 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
486 // Create the basic block to hold the CPE's.
487 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
490 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
491 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
493 // Mark the basic block as required by the const-pool.
494 BB->setAlignment(MaxAlign);
496 // The function needs to be as aligned as the basic blocks. The linker may
497 // move functions around based on their alignment.
498 MF->ensureAlignment(BB->getAlignment());
500 // Order the entries in BB by descending alignment. That ensures correct
501 // alignment of all entries as long as BB is sufficiently aligned. Keep
502 // track of the insertion point for each alignment. We are going to bucket
503 // sort the entries as they are created.
504 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
506 // Add all of the constants from the constant pool to the end block, use an
507 // identity mapping of CPI's to CPE's.
508 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
510 const DataLayout &TD = MF->getDataLayout();
511 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
512 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
513 assert(Size >= 4 && "Too small constant pool entry");
514 unsigned Align = CPs[i].getAlignment();
515 assert(isPowerOf2_32(Align) && "Invalid alignment");
516 // Verify that all constant pool entries are a multiple of their alignment.
517 // If not, we would have to pad them out so that instructions stay aligned.
518 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
520 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
521 unsigned LogAlign = Log2_32(Align);
522 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
523 MachineInstr *CPEMI =
524 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
525 .addImm(i).addConstantPoolIndex(i).addImm(Size);
526 CPEMIs.push_back(CPEMI);
528 // Ensure that future entries with higher alignment get inserted before
529 // CPEMI. This is bucket sort with iterators.
530 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
531 if (InsPoint[a] == InsAt)
534 // Add a new CPEntry, but no corresponding CPUser yet.
535 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
537 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
538 << Size << ", align = " << Align <<'\n');
543 /// \brief Do initial placement of the jump tables. Because Thumb2's TBB and TBH
544 /// instructions can be made more efficient if the jump table immediately
545 /// follows the instruction, it's best to place them immediately next to their
546 /// jumps to begin with. In almost all cases they'll never be moved from that
548 void ARMConstantIslands::doInitialJumpTablePlacement(
549 std::vector<MachineInstr *> &CPEMIs) {
550 unsigned i = CPEntries.size();
551 auto MJTI = MF->getJumpTableInfo();
552 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
554 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
555 for (MachineBasicBlock &MBB : *MF) {
556 auto MI = MBB.getLastNonDebugInstr();
561 switch (MI->getOpcode()) {
567 case ARM::BR_JTm_i12:
569 JTOpcode = ARM::JUMPTABLE_ADDRS;
572 JTOpcode = ARM::JUMPTABLE_INSTS;
576 JTOpcode = ARM::JUMPTABLE_TBB;
580 JTOpcode = ARM::JUMPTABLE_TBH;
584 unsigned NumOps = MI->getDesc().getNumOperands();
585 MachineOperand JTOp =
586 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
587 unsigned JTI = JTOp.getIndex();
588 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
589 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
590 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
591 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
592 DebugLoc(), TII->get(JTOpcode))
594 .addJumpTableIndex(JTI)
596 CPEMIs.push_back(CPEMI);
597 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
598 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
599 if (!LastCorrectlyNumberedBB)
600 LastCorrectlyNumberedBB = &MBB;
603 // If we did anything then we need to renumber the subsequent blocks.
604 if (LastCorrectlyNumberedBB)
605 MF->RenumberBlocks(LastCorrectlyNumberedBB);
608 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
609 /// into the block immediately after it.
610 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
611 // Get the next machine basic block in the function.
612 MachineFunction::iterator MBBI = MBB->getIterator();
613 // Can't fall off end of function.
614 if (std::next(MBBI) == MBB->getParent()->end())
617 MachineBasicBlock *NextBB = &*std::next(MBBI);
618 if (!MBB->isSuccessor(NextBB))
621 // Try to analyze the end of the block. A potential fallthrough may already
622 // have an unconditional branch for whatever reason.
623 MachineBasicBlock *TBB, *FBB;
624 SmallVector<MachineOperand, 4> Cond;
625 bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
626 return TooDifficult || FBB == nullptr;
629 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
630 /// look up the corresponding CPEntry.
631 ARMConstantIslands::CPEntry *
632 ARMConstantIslands::findConstPoolEntry(unsigned CPI,
633 const MachineInstr *CPEMI) {
634 std::vector<CPEntry> &CPEs = CPEntries[CPI];
635 // Number of entries per constpool index should be small, just do a
637 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
638 if (CPEs[i].CPEMI == CPEMI)
644 /// getCPELogAlign - Returns the required alignment of the constant pool entry
645 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
646 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
647 switch (CPEMI->getOpcode()) {
648 case ARM::CONSTPOOL_ENTRY:
650 case ARM::JUMPTABLE_TBB:
651 return isThumb1 ? 2 : 0;
652 case ARM::JUMPTABLE_TBH:
653 return isThumb1 ? 2 : 1;
654 case ARM::JUMPTABLE_INSTS:
656 case ARM::JUMPTABLE_ADDRS:
659 llvm_unreachable("unknown constpool entry kind");
662 unsigned CPI = getCombinedIndex(CPEMI);
663 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
664 unsigned Align = MCP->getConstants()[CPI].getAlignment();
665 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
666 return Log2_32(Align);
669 /// scanFunctionJumpTables - Do a scan of the function, building up
670 /// information about the sizes of each block and the locations of all
672 void ARMConstantIslands::scanFunctionJumpTables() {
673 for (MachineBasicBlock &MBB : *MF) {
674 for (MachineInstr &I : MBB)
676 (I.getOpcode() == ARM::t2BR_JT || I.getOpcode() == ARM::tBR_JTr))
677 T2JumpTables.push_back(&I);
681 /// initializeFunctionInfo - Do the initial scan of the function, building up
682 /// information about the sizes of each block, the location of all the water,
683 /// and finding all of the constant pool users.
684 void ARMConstantIslands::
685 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
687 BBInfo = computeAllBlockSizes(MF);
689 // The known bits of the entry block offset are determined by the function
691 BBInfo.front().KnownBits = MF->getAlignment();
693 // Compute block offsets and known bits.
694 adjustBBOffsetsAfter(&MF->front());
696 // Now go back through the instructions and build up our data structures.
697 for (MachineBasicBlock &MBB : *MF) {
698 // If this block doesn't fall through into the next MBB, then this is
699 // 'water' that a constant pool island could be placed.
700 if (!BBHasFallthrough(&MBB))
701 WaterList.push_back(&MBB);
703 for (MachineInstr &I : MBB) {
704 if (I.isDebugValue())
707 unsigned Opc = I.getOpcode();
715 continue; // Ignore other JT branches
718 T2JumpTables.push_back(&I);
719 continue; // Does not get an entry in ImmBranches
750 // Record this immediate branch.
751 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
752 ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
755 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
756 PushPopMIs.push_back(&I);
758 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
759 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
760 Opc == ARM::JUMPTABLE_TBH)
763 // Scan the instructions for constant pool operands.
764 for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
765 if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
766 // We found one. The addressing mode tells us the max displacement
767 // from the PC that this instruction permits.
769 // Basic size info comes from the TSFlags field.
773 bool IsSoImm = false;
777 llvm_unreachable("Unknown addressing mode for CP reference!");
779 // Taking the address of a CP entry.
781 case ARM::LEApcrelJT:
782 // This takes a SoImm, which is 8 bit immediate rotated. We'll
783 // pretend the maximum offset is 255 * 4. Since each instruction
784 // 4 byte wide, this is always correct. We'll check for other
785 // displacements that fits in a SoImm as well.
791 case ARM::t2LEApcrel:
792 case ARM::t2LEApcrelJT:
797 case ARM::tLEApcrelJT:
808 Bits = 12; // +-offset_12
814 Scale = 4; // +(offset_8*4)
820 Scale = 4; // +-(offset_8*4)
826 Scale = 2; // +(offset_5*2)
830 // Remember that this is a user of a CP entry.
831 unsigned CPI = I.getOperand(op).getIndex();
832 if (I.getOperand(op).isJTI()) {
833 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
834 CPI = JumpTableEntryIndices[CPI];
837 MachineInstr *CPEMI = CPEMIs[CPI];
838 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
839 CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
841 // Increment corresponding CPEntry reference count.
842 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
843 assert(CPE && "Cannot find a corresponding CPEntry!");
846 // Instructions can only use one CP entry, don't bother scanning the
847 // rest of the operands.
854 /// getOffsetOf - Return the current offset of the specified machine instruction
855 /// from the start of the function. This offset changes as stuff is moved
856 /// around inside the function.
857 unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
858 MachineBasicBlock *MBB = MI->getParent();
860 // The offset is composed of two things: the sum of the sizes of all MBB's
861 // before this instruction's block, and the offset from the start of the block
863 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
865 // Sum instructions before MI in MBB.
866 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
867 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
868 Offset += TII->getInstSizeInBytes(*I);
873 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
875 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
876 const MachineBasicBlock *RHS) {
877 return LHS->getNumber() < RHS->getNumber();
880 /// updateForInsertedWaterBlock - When a block is newly inserted into the
881 /// machine function, it upsets all of the block numbers. Renumber the blocks
882 /// and update the arrays that parallel this numbering.
883 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
884 // Renumber the MBB's to keep them consecutive.
885 NewBB->getParent()->RenumberBlocks(NewBB);
887 // Insert an entry into BBInfo to align it properly with the (newly
888 // renumbered) block numbers.
889 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
891 // Next, update WaterList. Specifically, we need to add NewMBB as having
892 // available water after it.
894 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
896 WaterList.insert(IP, NewBB);
899 /// Split the basic block containing MI into two blocks, which are joined by
900 /// an unconditional branch. Update data structures and renumber blocks to
901 /// account for this change and returns the newly created block.
902 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
903 MachineBasicBlock *OrigBB = MI->getParent();
905 // Create a new MBB for the code after the OrigBB.
906 MachineBasicBlock *NewBB =
907 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
908 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
909 MF->insert(MBBI, NewBB);
911 // Splice the instructions starting with MI over to NewBB.
912 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
914 // Add an unconditional branch from OrigBB to NewBB.
915 // Note the new unconditional branch is not being recorded.
916 // There doesn't seem to be meaningful DebugInfo available; this doesn't
917 // correspond to anything in the source.
918 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
920 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
922 BuildMI(OrigBB, DebugLoc(), TII->get(Opc))
924 .add(predOps(ARMCC::AL));
927 // Update the CFG. All succs of OrigBB are now succs of NewBB.
928 NewBB->transferSuccessors(OrigBB);
930 // OrigBB branches to NewBB.
931 OrigBB->addSuccessor(NewBB);
933 // Update internal data structures to account for the newly inserted MBB.
934 // This is almost the same as updateForInsertedWaterBlock, except that
935 // the Water goes after OrigBB, not NewBB.
936 MF->RenumberBlocks(NewBB);
938 // Insert an entry into BBInfo to align it properly with the (newly
939 // renumbered) block numbers.
940 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
942 // Next, update WaterList. Specifically, we need to add OrigMBB as having
943 // available water after it (but not if it's already there, which happens
944 // when splitting before a conditional branch that is followed by an
945 // unconditional branch - in that case we want to insert NewBB).
947 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
949 MachineBasicBlock* WaterBB = *IP;
950 if (WaterBB == OrigBB)
951 WaterList.insert(std::next(IP), NewBB);
953 WaterList.insert(IP, OrigBB);
954 NewWaterList.insert(OrigBB);
956 // Figure out how large the OrigBB is. As the first half of the original
957 // block, it cannot contain a tablejump. The size includes
958 // the new jump we added. (It should be possible to do this without
959 // recounting everything, but it's very confusing, and this is rarely
961 computeBlockSize(MF, OrigBB, BBInfo[OrigBB->getNumber()]);
963 // Figure out how large the NewMBB is. As the second half of the original
964 // block, it may contain a tablejump.
965 computeBlockSize(MF, NewBB, BBInfo[NewBB->getNumber()]);
967 // All BBOffsets following these blocks must be modified.
968 adjustBBOffsetsAfter(OrigBB);
973 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
974 /// displacement computation. Update U.KnownAlignment to match its current
975 /// basic block location.
976 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
977 unsigned UserOffset = getOffsetOf(U.MI);
978 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
979 unsigned KnownBits = BBI.internalKnownBits();
981 // The value read from PC is offset from the actual instruction address.
982 UserOffset += (isThumb ? 4 : 8);
984 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
985 // Make sure U.getMaxDisp() returns a constrained range.
986 U.KnownAlignment = (KnownBits >= 2);
988 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
989 // purposes of the displacement computation; compensate for that here.
990 // For unknown alignments, getMaxDisp() constrains the range instead.
991 if (isThumb && U.KnownAlignment)
997 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
998 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
999 /// constant pool entry).
1000 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
1001 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1002 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
1003 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1004 unsigned TrialOffset, unsigned MaxDisp,
1005 bool NegativeOK, bool IsSoImm) {
1006 if (UserOffset <= TrialOffset) {
1007 // User before the Trial.
1008 if (TrialOffset - UserOffset <= MaxDisp)
1010 // FIXME: Make use full range of soimm values.
1011 } else if (NegativeOK) {
1012 if (UserOffset - TrialOffset <= MaxDisp)
1014 // FIXME: Make use full range of soimm values.
1019 /// isWaterInRange - Returns true if a CPE placed after the specified
1020 /// Water (a basic block) will be in range for the specific MI.
1022 /// Compute how much the function will grow by inserting a CPE after Water.
1023 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1024 MachineBasicBlock* Water, CPUser &U,
1026 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1027 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1028 unsigned NextBlockOffset, NextBlockAlignment;
1029 MachineFunction::const_iterator NextBlock = Water->getIterator();
1030 if (++NextBlock == MF->end()) {
1031 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1032 NextBlockAlignment = 0;
1034 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1035 NextBlockAlignment = NextBlock->getAlignment();
1037 unsigned Size = U.CPEMI->getOperand(2).getImm();
1038 unsigned CPEEnd = CPEOffset + Size;
1040 // The CPE may be able to hide in the alignment padding before the next
1041 // block. It may also cause more padding to be required if it is more aligned
1042 // that the next block.
1043 if (CPEEnd > NextBlockOffset) {
1044 Growth = CPEEnd - NextBlockOffset;
1045 // Compute the padding that would go at the end of the CPE to align the next
1047 Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
1049 // If the CPE is to be inserted before the instruction, that will raise
1050 // the offset of the instruction. Also account for unknown alignment padding
1051 // in blocks between CPE and the user.
1052 if (CPEOffset < UserOffset)
1053 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1055 // CPE fits in existing padding.
1058 return isOffsetInRange(UserOffset, CPEOffset, U);
1061 /// isCPEntryInRange - Returns true if the distance between specific MI and
1062 /// specific ConstPool entry instruction can fit in MI's displacement field.
1063 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1064 MachineInstr *CPEMI, unsigned MaxDisp,
1065 bool NegOk, bool DoDump) {
1066 unsigned CPEOffset = getOffsetOf(CPEMI);
1070 unsigned Block = MI->getParent()->getNumber();
1071 const BasicBlockInfo &BBI = BBInfo[Block];
1072 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1073 << " max delta=" << MaxDisp
1074 << format(" insn address=%#x", UserOffset) << " in "
1075 << printMBBReference(*MI->getParent()) << ": "
1076 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1077 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1078 int(CPEOffset - UserOffset));
1082 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1086 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1087 /// unconditionally branches to its only successor.
1088 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1089 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1092 MachineBasicBlock *Succ = *MBB->succ_begin();
1093 MachineBasicBlock *Pred = *MBB->pred_begin();
1094 MachineInstr *PredMI = &Pred->back();
1095 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1096 || PredMI->getOpcode() == ARM::t2B)
1097 return PredMI->getOperand(0).getMBB() == Succ;
1102 void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
1103 unsigned BBNum = BB->getNumber();
1104 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
1105 // Get the offset and known bits at the end of the layout predecessor.
1106 // Include the alignment of the current block.
1107 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
1108 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
1109 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
1111 // This is where block i begins. Stop if the offset is already correct,
1112 // and we have updated 2 blocks. This is the maximum number of blocks
1113 // changed before calling this function.
1114 if (i > BBNum + 2 &&
1115 BBInfo[i].Offset == Offset &&
1116 BBInfo[i].KnownBits == KnownBits)
1119 BBInfo[i].Offset = Offset;
1120 BBInfo[i].KnownBits = KnownBits;
1124 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1125 /// and instruction CPEMI, and decrement its refcount. If the refcount
1126 /// becomes 0 remove the entry and instruction. Returns true if we removed
1127 /// the entry, false if we didn't.
1128 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1129 MachineInstr *CPEMI) {
1130 // Find the old entry. Eliminate it if it is no longer used.
1131 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1132 assert(CPE && "Unexpected!");
1133 if (--CPE->RefCount == 0) {
1134 removeDeadCPEMI(CPEMI);
1135 CPE->CPEMI = nullptr;
1142 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1143 if (CPEMI->getOperand(1).isCPI())
1144 return CPEMI->getOperand(1).getIndex();
1146 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1149 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1150 /// if not, see if an in-range clone of the CPE is in range, and if so,
1151 /// change the data structures so the user references the clone. Returns:
1152 /// 0 = no existing entry found
1153 /// 1 = entry found, and there were no code insertions or deletions
1154 /// 2 = entry found, and there were code insertions or deletions
1155 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
1156 MachineInstr *UserMI = U.MI;
1157 MachineInstr *CPEMI = U.CPEMI;
1159 // Check to see if the CPE is already in-range.
1160 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1162 DEBUG(dbgs() << "In range\n");
1166 // No. Look for previously created clones of the CPE that are in range.
1167 unsigned CPI = getCombinedIndex(CPEMI);
1168 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1169 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1170 // We already tried this one
1171 if (CPEs[i].CPEMI == CPEMI)
1173 // Removing CPEs can leave empty entries, skip
1174 if (CPEs[i].CPEMI == nullptr)
1176 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1178 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1179 << CPEs[i].CPI << "\n");
1180 // Point the CPUser node to the replacement
1181 U.CPEMI = CPEs[i].CPEMI;
1182 // Change the CPI in the instruction operand to refer to the clone.
1183 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1184 if (UserMI->getOperand(j).isCPI()) {
1185 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1188 // Adjust the refcount of the clone...
1190 // ...and the original. If we didn't remove the old entry, none of the
1191 // addresses changed, so we don't need another pass.
1192 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1198 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1199 /// the specific unconditional branch instruction.
1200 static inline unsigned getUnconditionalBrDisp(int Opc) {
1203 return ((1<<10)-1)*2;
1205 return ((1<<23)-1)*2;
1210 return ((1<<23)-1)*4;
1213 /// findAvailableWater - Look for an existing entry in the WaterList in which
1214 /// we can place the CPE referenced from U so it's within range of U's MI.
1215 /// Returns true if found, false if not. If it returns true, WaterIter
1216 /// is set to the WaterList entry. For Thumb, prefer water that will not
1217 /// introduce padding to water that will. To ensure that this pass
1218 /// terminates, the CPE location for a particular CPUser is only allowed to
1219 /// move to a lower address, so search backward from the end of the list and
1220 /// prefer the first water that is in range.
1221 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1222 water_iterator &WaterIter,
1224 if (WaterList.empty())
1227 unsigned BestGrowth = ~0u;
1228 // The nearest water without splitting the UserBB is right after it.
1229 // If the distance is still large (we have a big BB), then we need to split it
1230 // if we don't converge after certain iterations. This helps the following
1231 // situation to converge:
1236 // When a CP access is out of range, BB0 may be used as water. However,
1237 // inserting islands between BB0 and BB1 makes other accesses out of range.
1238 MachineBasicBlock *UserBB = U.MI->getParent();
1239 unsigned MinNoSplitDisp =
1240 BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI));
1241 if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1243 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1245 MachineBasicBlock* WaterBB = *IP;
1246 // Check if water is in range and is either at a lower address than the
1247 // current "high water mark" or a new water block that was created since
1248 // the previous iteration by inserting an unconditional branch. In the
1249 // latter case, we want to allow resetting the high water mark back to
1250 // this new water since we haven't seen it before. Inserting branches
1251 // should be relatively uncommon and when it does happen, we want to be
1252 // sure to take advantage of it for all the CPEs near that block, so that
1253 // we don't insert more branches than necessary.
1254 // When CloserWater is true, we try to find the lowest address after (or
1255 // equal to) user MI's BB no matter of padding growth.
1257 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1258 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1259 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1260 Growth < BestGrowth) {
1261 // This is the least amount of required padding seen so far.
1262 BestGrowth = Growth;
1264 DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
1265 << " Growth=" << Growth << '\n');
1267 if (CloserWater && WaterBB == U.MI->getParent())
1269 // Keep looking unless it is perfect and we're not looking for the lowest
1270 // possible address.
1271 if (!CloserWater && BestGrowth == 0)
1277 return BestGrowth != ~0u;
1280 /// createNewWater - No existing WaterList entry will work for
1281 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1282 /// block is used if in range, and the conditional branch munged so control
1283 /// flow is correct. Otherwise the block is split to create a hole with an
1284 /// unconditional branch around it. In either case NewMBB is set to a
1285 /// block following which the new island can be inserted (the WaterList
1286 /// is not adjusted).
1287 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1288 unsigned UserOffset,
1289 MachineBasicBlock *&NewMBB) {
1290 CPUser &U = CPUsers[CPUserIndex];
1291 MachineInstr *UserMI = U.MI;
1292 MachineInstr *CPEMI = U.CPEMI;
1293 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1294 MachineBasicBlock *UserMBB = UserMI->getParent();
1295 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1297 // If the block does not end in an unconditional branch already, and if the
1298 // end of the block is within range, make new water there. (The addition
1299 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1300 // Thumb2, 2 on Thumb1.
1301 if (BBHasFallthrough(UserMBB)) {
1302 // Size of branch to insert.
1303 unsigned Delta = isThumb1 ? 2 : 4;
1304 // Compute the offset where the CPE will begin.
1305 unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
1307 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1308 DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
1309 << format(", expected CPE offset %#x\n", CPEOffset));
1310 NewMBB = &*++UserMBB->getIterator();
1311 // Add an unconditional branch from UserMBB to fallthrough block. Record
1312 // it for branch lengthening; this new branch will not get out of range,
1313 // but if the preceding conditional branch is out of range, the targets
1314 // will be exchanged, and the altered branch may be out of range, so the
1315 // machinery has to know about it.
1316 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1318 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1320 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
1322 .add(predOps(ARMCC::AL));
1323 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1324 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1325 MaxDisp, false, UncondBr));
1326 computeBlockSize(MF, UserMBB, BBInfo[UserMBB->getNumber()]);
1327 adjustBBOffsetsAfter(UserMBB);
1332 // What a big block. Find a place within the block to split it. This is a
1333 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1334 // entries are 4 bytes: if instruction I references island CPE, and
1335 // instruction I+1 references CPE', it will not work well to put CPE as far
1336 // forward as possible, since then CPE' cannot immediately follow it (that
1337 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1338 // need to create a new island. So, we make a first guess, then walk through
1339 // the instructions between the one currently being looked at and the
1340 // possible insertion point, and make sure any other instructions that
1341 // reference CPEs will be able to use the same island area; if not, we back
1342 // up the insertion point.
1344 // Try to split the block so it's fully aligned. Compute the latest split
1345 // point where we can add a 4-byte branch instruction, and then align to
1346 // LogAlign which is the largest possible alignment in the function.
1347 unsigned LogAlign = MF->getAlignment();
1348 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
1349 unsigned KnownBits = UserBBI.internalKnownBits();
1350 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1351 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1352 DEBUG(dbgs() << format("Split in middle of big block before %#x",
1355 // The 4 in the following is for the unconditional branch we'll be inserting
1356 // (allows for long branch on Thumb1). Alignment of the island is handled
1357 // inside isOffsetInRange.
1358 BaseInsertOffset -= 4;
1360 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1361 << " la=" << LogAlign
1362 << " kb=" << KnownBits
1363 << " up=" << UPad << '\n');
1365 // This could point off the end of the block if we've already got constant
1366 // pool entries following this block; only the last one is in the water list.
1367 // Back past any possible branches (allow for a conditional and a maximally
1368 // long unconditional).
1369 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1370 // Ensure BaseInsertOffset is larger than the offset of the instruction
1371 // following UserMI so that the loop which searches for the split point
1372 // iterates at least once.
1374 std::max(UserBBI.postOffset() - UPad - 8,
1375 UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
1376 DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1378 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1379 CPEMI->getOperand(2).getImm();
1380 MachineBasicBlock::iterator MI = UserMI;
1382 unsigned CPUIndex = CPUserIndex+1;
1383 unsigned NumCPUsers = CPUsers.size();
1384 MachineInstr *LastIT = nullptr;
1385 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1386 Offset < BaseInsertOffset;
1387 Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
1388 assert(MI != UserMBB->end() && "Fell off end of block");
1389 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1390 CPUser &U = CPUsers[CPUIndex];
1391 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1392 // Shift intertion point by one unit of alignment so it is within reach.
1393 BaseInsertOffset -= 1u << LogAlign;
1394 EndInsertOffset -= 1u << LogAlign;
1396 // This is overly conservative, as we don't account for CPEMIs being
1397 // reused within the block, but it doesn't matter much. Also assume CPEs
1398 // are added in order with alignment padding. We may eventually be able
1399 // to pack the aligned CPEs better.
1400 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1404 // Remember the last IT instruction.
1405 if (MI->getOpcode() == ARM::t2IT)
1411 // Avoid splitting an IT block.
1413 unsigned PredReg = 0;
1414 ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1415 if (CC != ARMCC::AL)
1419 // We really must not split an IT block.
1420 DEBUG(unsigned PredReg;
1421 assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
1423 NewMBB = splitBlockBeforeInstr(&*MI);
1426 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1427 /// is out-of-range. If so, pick up the constant pool value and move it some
1428 /// place in-range. Return true if we changed any addresses (thus must run
1429 /// another pass of branch lengthening), false otherwise.
1430 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1432 CPUser &U = CPUsers[CPUserIndex];
1433 MachineInstr *UserMI = U.MI;
1434 MachineInstr *CPEMI = U.CPEMI;
1435 unsigned CPI = getCombinedIndex(CPEMI);
1436 unsigned Size = CPEMI->getOperand(2).getImm();
1437 // Compute this only once, it's expensive.
1438 unsigned UserOffset = getUserOffset(U);
1440 // See if the current entry is within range, or there is a clone of it
1442 int result = findInRangeCPEntry(U, UserOffset);
1443 if (result==1) return false;
1444 else if (result==2) return true;
1446 // No existing clone of this CPE is within range.
1447 // We will be generating a new clone. Get a UID for it.
1448 unsigned ID = AFI->createPICLabelUId();
1450 // Look for water where we can place this CPE.
1451 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1452 MachineBasicBlock *NewMBB;
1454 if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1455 DEBUG(dbgs() << "Found water in range\n");
1456 MachineBasicBlock *WaterBB = *IP;
1458 // If the original WaterList entry was "new water" on this iteration,
1459 // propagate that to the new island. This is just keeping NewWaterList
1460 // updated to match the WaterList, which will be updated below.
1461 if (NewWaterList.erase(WaterBB))
1462 NewWaterList.insert(NewIsland);
1464 // The new CPE goes before the following block (NewMBB).
1465 NewMBB = &*++WaterBB->getIterator();
1468 DEBUG(dbgs() << "No water found\n");
1469 createNewWater(CPUserIndex, UserOffset, NewMBB);
1471 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1472 // called while handling branches so that the water will be seen on the
1473 // next iteration for constant pools, but in this context, we don't want
1474 // it. Check for this so it will be removed from the WaterList.
1475 // Also remove any entry from NewWaterList.
1476 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1477 IP = find(WaterList, WaterBB);
1478 if (IP != WaterList.end())
1479 NewWaterList.erase(WaterBB);
1481 // We are adding new water. Update NewWaterList.
1482 NewWaterList.insert(NewIsland);
1485 // Remove the original WaterList entry; we want subsequent insertions in
1486 // this vicinity to go after the one we're about to insert. This
1487 // considerably reduces the number of times we have to move the same CPE
1488 // more than once and is also important to ensure the algorithm terminates.
1489 if (IP != WaterList.end())
1490 WaterList.erase(IP);
1492 // Okay, we know we can put an island before NewMBB now, do it!
1493 MF->insert(NewMBB->getIterator(), NewIsland);
1495 // Update internal data structures to account for the newly inserted MBB.
1496 updateForInsertedWaterBlock(NewIsland);
1498 // Now that we have an island to add the CPE to, clone the original CPE and
1499 // add it to the island.
1500 U.HighWaterMark = NewIsland;
1501 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1503 .add(CPEMI->getOperand(1))
1505 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1508 // Decrement the old entry, and remove it if refcount becomes 0.
1509 decrementCPEReferenceCount(CPI, CPEMI);
1511 // Mark the basic block as aligned as required by the const-pool entry.
1512 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1514 // Increase the size of the island block to account for the new entry.
1515 BBInfo[NewIsland->getNumber()].Size += Size;
1516 adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1518 // Finally, change the CPI in the instruction operand to be ID.
1519 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1520 if (UserMI->getOperand(i).isCPI()) {
1521 UserMI->getOperand(i).setIndex(ID);
1525 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1526 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
1531 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1532 /// sizes and offsets of impacted basic blocks.
1533 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1534 MachineBasicBlock *CPEBB = CPEMI->getParent();
1535 unsigned Size = CPEMI->getOperand(2).getImm();
1536 CPEMI->eraseFromParent();
1537 BBInfo[CPEBB->getNumber()].Size -= Size;
1538 // All succeeding offsets have the current size value added in, fix this.
1539 if (CPEBB->empty()) {
1540 BBInfo[CPEBB->getNumber()].Size = 0;
1542 // This block no longer needs to be aligned.
1543 CPEBB->setAlignment(0);
1545 // Entries are sorted by descending alignment, so realign from the front.
1546 CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
1548 adjustBBOffsetsAfter(CPEBB);
1549 // An island has only one predecessor BB and one successor BB. Check if
1550 // this BB's predecessor jumps directly to this BB's successor. This
1551 // shouldn't happen currently.
1552 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1553 // FIXME: remove the empty blocks after all the work is done?
1556 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1558 bool ARMConstantIslands::removeUnusedCPEntries() {
1559 unsigned MadeChange = false;
1560 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1561 std::vector<CPEntry> &CPEs = CPEntries[i];
1562 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1563 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1564 removeDeadCPEMI(CPEs[j].CPEMI);
1565 CPEs[j].CPEMI = nullptr;
1573 /// isBBInRange - Returns true if the distance between specific MI and
1574 /// specific BB can fit in MI's displacement field.
1575 bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
1577 unsigned PCAdj = isThumb ? 4 : 8;
1578 unsigned BrOffset = getOffsetOf(MI) + PCAdj;
1579 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1581 DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB)
1582 << " from " << printMBBReference(*MI->getParent())
1583 << " max delta=" << MaxDisp << " from " << getOffsetOf(MI)
1584 << " to " << DestOffset << " offset "
1585 << int(DestOffset - BrOffset) << "\t" << *MI);
1587 if (BrOffset <= DestOffset) {
1588 // Branch before the Dest.
1589 if (DestOffset-BrOffset <= MaxDisp)
1592 if (BrOffset-DestOffset <= MaxDisp)
1598 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1599 /// away to fit in its displacement field.
1600 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1601 MachineInstr *MI = Br.MI;
1602 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1604 // Check to see if the DestBB is already in-range.
1605 if (isBBInRange(MI, DestBB, Br.MaxDisp))
1609 return fixupUnconditionalBr(Br);
1610 return fixupConditionalBr(Br);
1613 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1614 /// too far away to fit in its displacement field. If the LR register has been
1615 /// spilled in the epilogue, then we can use BL to implement a far jump.
1616 /// Otherwise, add an intermediate branch instruction to a branch.
1618 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1619 MachineInstr *MI = Br.MI;
1620 MachineBasicBlock *MBB = MI->getParent();
1622 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1624 // Use BL to implement far jump.
1625 Br.MaxDisp = (1 << 21) * 2;
1626 MI->setDesc(TII->get(ARM::tBfar));
1627 BBInfo[MBB->getNumber()].Size += 2;
1628 adjustBBOffsetsAfter(MBB);
1632 DEBUG(dbgs() << " Changed B to long jump " << *MI);
1637 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1638 /// far away to fit in its displacement field. It is converted to an inverse
1639 /// conditional branch + an unconditional branch to the destination.
1641 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1642 MachineInstr *MI = Br.MI;
1643 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1645 // Add an unconditional branch to the destination and invert the branch
1646 // condition to jump over it:
1652 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1653 CC = ARMCC::getOppositeCondition(CC);
1654 unsigned CCReg = MI->getOperand(2).getReg();
1656 // If the branch is at the end of its MBB and that has a fall-through block,
1657 // direct the updated conditional branch to the fall-through block. Otherwise,
1658 // split the MBB before the next instruction.
1659 MachineBasicBlock *MBB = MI->getParent();
1660 MachineInstr *BMI = &MBB->back();
1661 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1665 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1666 BMI->getOpcode() == Br.UncondBr) {
1667 // Last MI in the BB is an unconditional branch. Can we simply invert the
1668 // condition and swap destinations:
1674 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1675 if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
1676 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
1678 BMI->getOperand(0).setMBB(DestBB);
1679 MI->getOperand(0).setMBB(NewDest);
1680 MI->getOperand(1).setImm(CC);
1687 splitBlockBeforeInstr(MI);
1688 // No need for the branch to the next block. We're adding an unconditional
1689 // branch to the destination.
1690 int delta = TII->getInstSizeInBytes(MBB->back());
1691 BBInfo[MBB->getNumber()].Size -= delta;
1692 MBB->back().eraseFromParent();
1694 // The conditional successor will be swapped between the BBs after this, so
1696 MBB->addSuccessor(DestBB);
1697 std::next(MBB->getIterator())->removeSuccessor(DestBB);
1699 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1701 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1703 DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
1704 << " also invert condition and change dest. to "
1705 << printMBBReference(*NextBB) << "\n");
1707 // Insert a new conditional branch and a new unconditional branch.
1708 // Also update the ImmBranch as well as adding a new entry for the new branch.
1709 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1710 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1711 Br.MI = &MBB->back();
1712 BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
1714 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr))
1716 .add(predOps(ARMCC::AL));
1718 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1719 BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
1720 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1721 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1723 // Remove the old conditional branch. It may or may not still be in MBB.
1724 BBInfo[MI->getParent()->getNumber()].Size -= TII->getInstSizeInBytes(*MI);
1725 MI->eraseFromParent();
1726 adjustBBOffsetsAfter(MBB);
1730 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1731 /// LR / restores LR to pc. FIXME: This is done here because it's only possible
1732 /// to do this if tBfar is not used.
1733 bool ARMConstantIslands::undoLRSpillRestore() {
1734 bool MadeChange = false;
1735 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1736 MachineInstr *MI = PushPopMIs[i];
1737 // First two operands are predicates.
1738 if (MI->getOpcode() == ARM::tPOP_RET &&
1739 MI->getOperand(2).getReg() == ARM::PC &&
1740 MI->getNumExplicitOperands() == 3) {
1741 // Create the new insn and copy the predicate from the old.
1742 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1743 .add(MI->getOperand(0))
1744 .add(MI->getOperand(1));
1745 MI->eraseFromParent();
1747 } else if (MI->getOpcode() == ARM::tPUSH &&
1748 MI->getOperand(2).getReg() == ARM::LR &&
1749 MI->getNumExplicitOperands() == 3) {
1750 // Just remove the push.
1751 MI->eraseFromParent();
1758 bool ARMConstantIslands::optimizeThumb2Instructions() {
1759 bool MadeChange = false;
1761 // Shrink ADR and LDR from constantpool.
1762 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1763 CPUser &U = CPUsers[i];
1764 unsigned Opcode = U.MI->getOpcode();
1765 unsigned NewOpc = 0;
1770 case ARM::t2LEApcrel:
1771 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1772 NewOpc = ARM::tLEApcrel;
1778 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1779 NewOpc = ARM::tLDRpci;
1789 unsigned UserOffset = getUserOffset(U);
1790 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1792 // Be conservative with inline asm.
1793 if (!U.KnownAlignment)
1796 // FIXME: Check if offset is multiple of scale if scale is not 4.
1797 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1798 DEBUG(dbgs() << "Shrink: " << *U.MI);
1799 U.MI->setDesc(TII->get(NewOpc));
1800 MachineBasicBlock *MBB = U.MI->getParent();
1801 BBInfo[MBB->getNumber()].Size -= 2;
1802 adjustBBOffsetsAfter(MBB);
1811 bool ARMConstantIslands::optimizeThumb2Branches() {
1812 bool MadeChange = false;
1814 // The order in which branches appear in ImmBranches is approximately their
1815 // order within the function body. By visiting later branches first, we reduce
1816 // the distance between earlier forward branches and their targets, making it
1817 // more likely that the cbn?z optimization, which can only apply to forward
1818 // branches, will succeed.
1819 for (unsigned i = ImmBranches.size(); i != 0; --i) {
1820 ImmBranch &Br = ImmBranches[i-1];
1821 unsigned Opcode = Br.MI->getOpcode();
1822 unsigned NewOpc = 0;
1839 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1840 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1841 if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
1842 DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1843 Br.MI->setDesc(TII->get(NewOpc));
1844 MachineBasicBlock *MBB = Br.MI->getParent();
1845 BBInfo[MBB->getNumber()].Size -= 2;
1846 adjustBBOffsetsAfter(MBB);
1852 Opcode = Br.MI->getOpcode();
1853 if (Opcode != ARM::tBcc)
1856 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1857 // so this transformation is not safe.
1858 if (!Br.MI->killsRegister(ARM::CPSR))
1862 unsigned PredReg = 0;
1863 ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1864 if (Pred == ARMCC::EQ)
1866 else if (Pred == ARMCC::NE)
1867 NewOpc = ARM::tCBNZ;
1870 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1871 // Check if the distance is within 126. Subtract starting offset by 2
1872 // because the cmp will be eliminated.
1873 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
1874 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1875 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
1876 MachineBasicBlock::iterator CmpMI = Br.MI;
1877 if (CmpMI != Br.MI->getParent()->begin()) {
1879 if (CmpMI->getOpcode() == ARM::tCMPi8) {
1880 unsigned Reg = CmpMI->getOperand(0).getReg();
1881 Pred = getInstrPredicate(*CmpMI, PredReg);
1882 if (Pred == ARMCC::AL &&
1883 CmpMI->getOperand(1).getImm() == 0 &&
1884 isARMLowRegister(Reg)) {
1885 MachineBasicBlock *MBB = Br.MI->getParent();
1886 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
1887 MachineInstr *NewBR =
1888 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1889 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
1890 CmpMI->eraseFromParent();
1891 Br.MI->eraseFromParent();
1893 BBInfo[MBB->getNumber()].Size -= 2;
1894 adjustBBOffsetsAfter(MBB);
1906 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
1908 if (I.getOpcode() != ARM::t2ADDrs)
1911 if (I.getOperand(0).getReg() != EntryReg)
1914 if (I.getOperand(1).getReg() != BaseReg)
1917 // FIXME: what about CC and IdxReg?
1921 /// \brief While trying to form a TBB/TBH instruction, we may (if the table
1922 /// doesn't immediately follow the BR_JT) need access to the start of the
1923 /// jump-table. We know one instruction that produces such a register; this
1924 /// function works out whether that definition can be preserved to the BR_JT,
1925 /// possibly by removing an intervening addition (which is usually needed to
1926 /// calculate the actual entry to jump to).
1927 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
1928 MachineInstr *LEAMI,
1931 bool &BaseRegKill) {
1932 if (JumpMI->getParent() != LEAMI->getParent())
1935 // Now we hope that we have at least these instructions in the basic block:
1936 // BaseReg = t2LEA ...
1938 // EntryReg = t2ADDrs BaseReg, ...
1942 // We have to be very conservative about what we recognise here though. The
1943 // main perturbing factors to watch out for are:
1944 // + Spills at any point in the chain: not direct problems but we would
1945 // expect a blocking Def of the spilled register so in practice what we
1946 // can do is limited.
1947 // + EntryReg == BaseReg: this is the one situation we should allow a Def
1948 // of BaseReg, but only if the t2ADDrs can be removed.
1949 // + Some instruction other than t2ADDrs computing the entry. Not seen in
1950 // the wild, but we should be careful.
1951 unsigned EntryReg = JumpMI->getOperand(0).getReg();
1952 unsigned BaseReg = LEAMI->getOperand(0).getReg();
1954 CanDeleteLEA = true;
1955 BaseRegKill = false;
1956 MachineInstr *RemovableAdd = nullptr;
1957 MachineBasicBlock::iterator I(LEAMI);
1958 for (++I; &*I != JumpMI; ++I) {
1959 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
1964 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
1965 const MachineOperand &MO = I->getOperand(K);
1966 if (!MO.isReg() || !MO.getReg())
1968 if (MO.isDef() && MO.getReg() == BaseReg)
1970 if (MO.isUse() && MO.getReg() == BaseReg) {
1971 BaseRegKill = BaseRegKill || MO.isKill();
1972 CanDeleteLEA = false;
1980 // Check the add really is removable, and that nothing else in the block
1981 // clobbers BaseReg.
1982 for (++I; &*I != JumpMI; ++I) {
1983 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
1984 const MachineOperand &MO = I->getOperand(K);
1985 if (!MO.isReg() || !MO.getReg())
1987 if (MO.isDef() && MO.getReg() == BaseReg)
1989 if (MO.isUse() && MO.getReg() == EntryReg)
1990 RemovableAdd = nullptr;
1995 RemovableAdd->eraseFromParent();
1996 DeadSize += isThumb2 ? 4 : 2;
1997 } else if (BaseReg == EntryReg) {
1998 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2003 // We reached the end of the block without seeing another definition of
2004 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2005 // used in the TBB/TBH if necessary.
2009 /// \brief Returns whether CPEMI is the first instruction in the block
2010 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2011 /// we can switch the first register to PC and usually remove the address
2012 /// calculation that preceded it.
2013 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2014 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2015 MachineFunction *MF = MBB->getParent();
2018 return MBB != MF->end() && MBB->begin() != MBB->end() &&
2019 &*MBB->begin() == CPEMI;
2022 static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
2023 MachineInstr *JumpMI,
2024 unsigned &DeadSize) {
2025 // Remove a dead add between the LEA and JT, which used to compute EntryReg,
2026 // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg
2027 // and is not clobbered / used.
2028 MachineInstr *RemovableAdd = nullptr;
2029 unsigned EntryReg = JumpMI->getOperand(0).getReg();
2031 // Find the last ADD to set EntryReg
2032 MachineBasicBlock::iterator I(LEAMI);
2033 for (++I; &*I != JumpMI; ++I) {
2034 if (I->getOpcode() == ARM::t2ADDrs && I->getOperand(0).getReg() == EntryReg)
2041 // Ensure EntryReg is not clobbered or used.
2042 MachineBasicBlock::iterator J(RemovableAdd);
2043 for (++J; &*J != JumpMI; ++J) {
2044 for (unsigned K = 0, E = J->getNumOperands(); K != E; ++K) {
2045 const MachineOperand &MO = J->getOperand(K);
2046 if (!MO.isReg() || !MO.getReg())
2048 if (MO.isDef() && MO.getReg() == EntryReg)
2050 if (MO.isUse() && MO.getReg() == EntryReg)
2055 DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
2056 RemovableAdd->eraseFromParent();
2060 static bool registerDefinedBetween(unsigned Reg,
2061 MachineBasicBlock::iterator From,
2062 MachineBasicBlock::iterator To,
2063 const TargetRegisterInfo *TRI) {
2064 for (auto I = From; I != To; ++I)
2065 if (I->modifiesRegister(Reg, TRI))
2070 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2071 /// jumptables when it's possible.
2072 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2073 bool MadeChange = false;
2075 // FIXME: After the tables are shrunk, can we get rid some of the
2076 // constantpool tables?
2077 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2078 if (!MJTI) return false;
2080 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2081 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2082 MachineInstr *MI = T2JumpTables[i];
2083 const MCInstrDesc &MCID = MI->getDesc();
2084 unsigned NumOps = MCID.getNumOperands();
2085 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2086 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2087 unsigned JTI = JTOP.getIndex();
2088 assert(JTI < JT.size());
2091 bool HalfWordOk = true;
2092 unsigned JTOffset = getOffsetOf(MI) + 4;
2093 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2094 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2095 MachineBasicBlock *MBB = JTBBs[j];
2096 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2097 // Negative offset is not ok. FIXME: We should change BB layout to make
2098 // sure all the branches are forward.
2099 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2101 unsigned TBHLimit = ((1<<16)-1)*2;
2102 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2104 if (!ByteOk && !HalfWordOk)
2108 if (!ByteOk && !HalfWordOk)
2111 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2112 MachineBasicBlock *MBB = MI->getParent();
2113 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2116 unsigned DeadSize = 0;
2117 bool CanDeleteLEA = false;
2118 bool BaseRegKill = false;
2120 unsigned IdxReg = ~0U;
2121 bool IdxRegKill = true;
2123 IdxReg = MI->getOperand(1).getReg();
2124 IdxRegKill = MI->getOperand(1).isKill();
2126 bool PreservedBaseReg =
2127 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2128 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2131 // We're in thumb-1 mode, so we must have something like:
2132 // %idx = tLSLri %idx, 2
2133 // %base = tLEApcrelJT
2134 // %t = tLDRr %base, %idx
2135 unsigned BaseReg = User.MI->getOperand(0).getReg();
2137 if (User.MI->getIterator() == User.MI->getParent()->begin())
2139 MachineInstr *Shift = User.MI->getPrevNode();
2140 if (Shift->getOpcode() != ARM::tLSLri ||
2141 Shift->getOperand(3).getImm() != 2 ||
2142 !Shift->getOperand(2).isKill())
2144 IdxReg = Shift->getOperand(2).getReg();
2145 unsigned ShiftedIdxReg = Shift->getOperand(0).getReg();
2147 // It's important that IdxReg is live until the actual TBB/TBH. Most of
2148 // the range is checked later, but the LEA might still clobber it and not
2149 // actually get removed.
2150 if (BaseReg == IdxReg && !jumpTableFollowsTB(MI, User.CPEMI))
2153 MachineInstr *Load = User.MI->getNextNode();
2154 if (Load->getOpcode() != ARM::tLDRr)
2156 if (Load->getOperand(1).getReg() != BaseReg ||
2157 Load->getOperand(2).getReg() != ShiftedIdxReg ||
2158 !Load->getOperand(2).isKill())
2161 // If we're in PIC mode, there should be another ADD following.
2162 auto *TRI = STI->getRegisterInfo();
2164 // %base cannot be redefined after the load as it will appear before
2169 if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
2172 if (isPositionIndependentOrROPI) {
2173 MachineInstr *Add = Load->getNextNode();
2174 if (Add->getOpcode() != ARM::tADDrr ||
2175 Add->getOperand(2).getReg() != BaseReg ||
2176 Add->getOperand(3).getReg() != Load->getOperand(0).getReg() ||
2177 !Add->getOperand(3).isKill())
2179 if (Add->getOperand(0).getReg() != MI->getOperand(0).getReg())
2181 if (registerDefinedBetween(IdxReg, Add->getNextNode(), MI, TRI))
2182 // IdxReg gets redefined in the middle of the sequence.
2184 Add->eraseFromParent();
2187 if (Load->getOperand(0).getReg() != MI->getOperand(0).getReg())
2189 if (registerDefinedBetween(IdxReg, Load->getNextNode(), MI, TRI))
2190 // IdxReg gets redefined in the middle of the sequence.
2194 // Now safe to delete the load and lsl. The LEA will be removed later.
2195 CanDeleteLEA = true;
2196 Shift->eraseFromParent();
2197 Load->eraseFromParent();
2201 DEBUG(dbgs() << "Shrink JT: " << *MI);
2202 MachineInstr *CPEMI = User.CPEMI;
2203 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2205 Opc = ByteOk ? ARM::tTBB_JT : ARM::tTBH_JT;
2207 MachineBasicBlock::iterator MI_JT = MI;
2208 MachineInstr *NewJTMI =
2209 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2210 .addReg(User.MI->getOperand(0).getReg(),
2211 getKillRegState(BaseRegKill))
2212 .addReg(IdxReg, getKillRegState(IdxRegKill))
2213 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2214 .addImm(CPEMI->getOperand(0).getImm());
2215 DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
2217 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2218 CPEMI->setDesc(TII->get(JTOpc));
2220 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2221 NewJTMI->getOperand(0).setReg(ARM::PC);
2222 NewJTMI->getOperand(0).setIsKill(false);
2226 RemoveDeadAddBetweenLEAAndJT(User.MI, MI, DeadSize);
2228 User.MI->eraseFromParent();
2229 DeadSize += isThumb2 ? 4 : 2;
2231 // The LEA was eliminated, the TBB instruction becomes the only new user
2232 // of the jump table.
2236 User.IsSoImm = false;
2237 User.KnownAlignment = false;
2239 // The LEA couldn't be eliminated, so we must add another CPUser to
2240 // record the TBB or TBH use.
2241 int CPEntryIdx = JumpTableEntryIndices[JTI];
2242 auto &CPEs = CPEntries[CPEntryIdx];
2244 find_if(CPEs, [&](CPEntry &E) { return E.CPEMI == User.CPEMI; });
2246 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2250 unsigned NewSize = TII->getInstSizeInBytes(*NewJTMI);
2251 unsigned OrigSize = TII->getInstSizeInBytes(*MI);
2252 MI->eraseFromParent();
2254 int Delta = OrigSize - NewSize + DeadSize;
2255 BBInfo[MBB->getNumber()].Size -= Delta;
2256 adjustBBOffsetsAfter(MBB);
2265 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2266 /// jump tables always branch forwards, since that's what tbb and tbh need.
2267 bool ARMConstantIslands::reorderThumb2JumpTables() {
2268 bool MadeChange = false;
2270 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2271 if (!MJTI) return false;
2273 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2274 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2275 MachineInstr *MI = T2JumpTables[i];
2276 const MCInstrDesc &MCID = MI->getDesc();
2277 unsigned NumOps = MCID.getNumOperands();
2278 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2279 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2280 unsigned JTI = JTOP.getIndex();
2281 assert(JTI < JT.size());
2283 // We prefer if target blocks for the jump table come after the jump
2284 // instruction so we can use TB[BH]. Loop through the target blocks
2285 // and try to adjust them such that that's true.
2286 int JTNumber = MI->getParent()->getNumber();
2287 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2288 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2289 MachineBasicBlock *MBB = JTBBs[j];
2290 int DTNumber = MBB->getNumber();
2292 if (DTNumber < JTNumber) {
2293 // The destination precedes the switch. Try to move the block forward
2294 // so we have a positive offset.
2295 MachineBasicBlock *NewBB =
2296 adjustJTTargetBlockForward(MBB, MI->getParent());
2298 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2307 MachineBasicBlock *ARMConstantIslands::
2308 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2309 // If the destination block is terminated by an unconditional branch,
2310 // try to move it; otherwise, create a new block following the jump
2311 // table that branches back to the actual target. This is a very simple
2312 // heuristic. FIXME: We can definitely improve it.
2313 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2314 SmallVector<MachineOperand, 4> Cond;
2315 SmallVector<MachineOperand, 4> CondPrior;
2316 MachineFunction::iterator BBi = BB->getIterator();
2317 MachineFunction::iterator OldPrior = std::prev(BBi);
2319 // If the block terminator isn't analyzable, don't try to move the block
2320 bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2322 // If the block ends in an unconditional branch, move it. The prior block
2323 // has to have an analyzable terminator for us to move this one. Be paranoid
2324 // and make sure we're not trying to move the entry block of the function.
2325 if (!B && Cond.empty() && BB != &MF->front() &&
2326 !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2327 BB->moveAfter(JTBB);
2328 OldPrior->updateTerminator();
2329 BB->updateTerminator();
2330 // Update numbering to account for the block being moved.
2331 MF->RenumberBlocks();
2336 // Create a new MBB for the code after the jump BB.
2337 MachineBasicBlock *NewBB =
2338 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2339 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2340 MF->insert(MBBI, NewBB);
2342 // Add an unconditional branch from NewBB to BB.
2343 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2344 // correspond directly to anything in the source.
2346 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B))
2348 .add(predOps(ARMCC::AL));
2350 BuildMI(NewBB, DebugLoc(), TII->get(ARM::tB))
2352 .add(predOps(ARMCC::AL));
2354 // Update internal data structures to account for the newly inserted MBB.
2355 MF->RenumberBlocks(NewBB);
2358 NewBB->addSuccessor(BB);
2359 JTBB->replaceSuccessor(BB, NewBB);
2365 /// createARMConstantIslandPass - returns an instance of the constpool
2367 FunctionPass *llvm::createARMConstantIslandPass() {
2368 return new ARMConstantIslands();
2371 INITIALIZE_PASS(ARMConstantIslands, "arm-cp-islands", ARM_CP_ISLANDS_OPT_NAME,