1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the spill code placement analysis.
12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
13 // basic blocks are weighted by the block frequency and added to become the node
16 // Transparent basic blocks have the variable live through, but don't care if it
17 // is spilled or in a register. These blocks become connections in the Hopfield
18 // network, again weighted by block frequency.
20 // The Hopfield network minimizes (possibly locally) its energy function:
22 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
24 // The energy function represents the expected spill code execution frequency,
25 // or the cost of spilling. This is a Lyapunov function which never increases
26 // when a node is updated. It is guaranteed to converge to a local minimum.
28 //===----------------------------------------------------------------------===//
30 #include "SpillPlacement.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/CodeGen/EdgeBundles.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ManagedStatic.h"
43 #define DEBUG_TYPE "spillplacement"
45 char SpillPlacement::ID = 0;
46 INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
47 "Spill Code Placement Analysis", true, true)
48 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
49 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
50 INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
51 "Spill Code Placement Analysis", true, true)
53 char &llvm::SpillPlacementID = SpillPlacement::ID;
55 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addRequired<MachineBlockFrequencyInfo>();
58 AU.addRequiredTransitive<EdgeBundles>();
59 AU.addRequiredTransitive<MachineLoopInfo>();
60 MachineFunctionPass::getAnalysisUsage(AU);
63 /// Node - Each edge bundle corresponds to a Hopfield node.
65 /// The node contains precomputed frequency data that only depends on the CFG,
66 /// but Bias and Links are computed each time placeSpills is called.
68 /// The node Value is positive when the variable should be in a register. The
69 /// value can change when linked nodes change, but convergence is very fast
70 /// because all weights are positive.
72 struct SpillPlacement::Node {
73 /// BiasN - Sum of blocks that prefer a spill.
75 /// BiasP - Sum of blocks that prefer a register.
78 /// Value - Output value of this node computed from the Bias and links.
79 /// This is always on of the values {-1, 0, 1}. A positive number means the
80 /// variable should go in a register through this bundle.
83 typedef SmallVector<std::pair<BlockFrequency, unsigned>, 4> LinkVector;
85 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
86 /// bundles. The weights are all positive block frequencies.
89 /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
90 BlockFrequency SumLinkWeights;
92 /// preferReg - Return true when this node prefers to be in a register.
93 bool preferReg() const {
94 // Undecided nodes (Value==0) go on the stack.
98 /// mustSpill - Return True if this node is so biased that it must spill.
99 bool mustSpill() const {
100 // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
101 // BiasN is saturated when MustSpill is set, make sure this still returns
102 // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
103 return BiasN >= BiasP + SumLinkWeights;
106 /// clear - Reset per-query data, but preserve frequencies that only depend on
108 void clear(const BlockFrequency &Threshold) {
109 BiasN = BiasP = Value = 0;
110 SumLinkWeights = Threshold;
114 /// addLink - Add a link to bundle b with weight w.
115 void addLink(unsigned b, BlockFrequency w) {
116 // Update cached sum.
119 // There can be multiple links to the same bundle, add them up.
120 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
121 if (I->second == b) {
125 // This must be the first link to b.
126 Links.push_back(std::make_pair(w, b));
129 /// addBias - Bias this node.
130 void addBias(BlockFrequency freq, BorderConstraint direction) {
141 BiasN = BlockFrequency::getMaxFrequency();
146 /// update - Recompute Value from Bias and Links. Return true when node
147 /// preference changes.
148 bool update(const Node nodes[], const BlockFrequency &Threshold) {
149 // Compute the weighted sum of inputs.
150 BlockFrequency SumN = BiasN;
151 BlockFrequency SumP = BiasP;
152 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
153 if (nodes[I->second].Value == -1)
155 else if (nodes[I->second].Value == 1)
159 // Each weighted sum is going to be less than the total frequency of the
160 // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
161 // will add a dead zone around 0 for two reasons:
163 // 1. It avoids arbitrary bias when all links are 0 as is possible during
164 // initial iterations.
165 // 2. It helps tame rounding errors when the links nominally sum to 0.
167 bool Before = preferReg();
168 if (SumN >= SumP + Threshold)
170 else if (SumP >= SumN + Threshold)
174 return Before != preferReg();
177 void getDissentingNeighbors(SparseSet<unsigned> &List,
178 const Node nodes[]) const {
179 for (const auto &Elt : Links) {
180 unsigned n = Elt.second;
181 // Neighbors that already have the same value are not going to
182 // change because of this node changing.
183 if (Value != nodes[n].Value)
189 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
191 bundles = &getAnalysis<EdgeBundles>();
192 loops = &getAnalysis<MachineLoopInfo>();
194 assert(!nodes && "Leaking node array");
195 nodes = new Node[bundles->getNumBundles()];
197 TodoList.setUniverse(bundles->getNumBundles());
199 // Compute total ingoing and outgoing block frequencies for all bundles.
200 BlockFrequencies.resize(mf.getNumBlockIDs());
201 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
202 setThreshold(MBFI->getEntryFreq());
204 unsigned Num = I.getNumber();
205 BlockFrequencies[Num] = MBFI->getBlockFreq(&I);
208 // We never change the function.
212 void SpillPlacement::releaseMemory() {
218 /// activate - mark node n as active if it wasn't already.
219 void SpillPlacement::activate(unsigned n) {
221 if (ActiveNodes->test(n))
224 nodes[n].clear(Threshold);
226 // Very large bundles usually come from big switches, indirect branches,
227 // landing pads, or loops with many 'continue' statements. It is difficult to
228 // allocate registers when so many different blocks are involved.
230 // Give a small negative bias to large bundles such that a substantial
231 // fraction of the connected blocks need to be interested before we consider
232 // expanding the region through the bundle. This helps compile time by
233 // limiting the number of blocks visited and the number of links in the
235 if (bundles->getBlocks(n).size() > 100) {
237 nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
241 /// \brief Set the threshold for a given entry frequency.
243 /// Set the threshold relative to \c Entry. Since the threshold is used as a
244 /// bound on the open interval (-Threshold;Threshold), 1 is the minimum
246 void SpillPlacement::setThreshold(const BlockFrequency &Entry) {
247 // Apparently 2 is a good threshold when Entry==2^14, but we need to scale
248 // it. Divide by 2^13, rounding as appropriate.
249 uint64_t Freq = Entry.getFrequency();
250 uint64_t Scaled = (Freq >> 13) + bool(Freq & (1 << 12));
251 Threshold = std::max(UINT64_C(1), Scaled);
254 /// addConstraints - Compute node biases and weights from a set of constraints.
255 /// Set a bit in NodeMask for each active node.
256 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
257 for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
258 E = LiveBlocks.end(); I != E; ++I) {
259 BlockFrequency Freq = BlockFrequencies[I->Number];
262 if (I->Entry != DontCare) {
263 unsigned ib = bundles->getBundle(I->Number, 0);
265 nodes[ib].addBias(Freq, I->Entry);
268 // Live-out from block?
269 if (I->Exit != DontCare) {
270 unsigned ob = bundles->getBundle(I->Number, 1);
272 nodes[ob].addBias(Freq, I->Exit);
277 /// addPrefSpill - Same as addConstraints(PrefSpill)
278 void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
279 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
281 BlockFrequency Freq = BlockFrequencies[*I];
284 unsigned ib = bundles->getBundle(*I, 0);
285 unsigned ob = bundles->getBundle(*I, 1);
288 nodes[ib].addBias(Freq, PrefSpill);
289 nodes[ob].addBias(Freq, PrefSpill);
293 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
294 for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
296 unsigned Number = *I;
297 unsigned ib = bundles->getBundle(Number, 0);
298 unsigned ob = bundles->getBundle(Number, 1);
300 // Ignore self-loops.
305 BlockFrequency Freq = BlockFrequencies[Number];
306 nodes[ib].addLink(ob, Freq);
307 nodes[ob].addLink(ib, Freq);
311 bool SpillPlacement::scanActiveBundles() {
312 RecentPositive.clear();
313 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
315 // A node that must spill, or a node without any links is not going to
316 // change its value ever again, so exclude it from iterations.
317 if (nodes[n].mustSpill())
319 if (nodes[n].preferReg())
320 RecentPositive.push_back(n);
322 return !RecentPositive.empty();
325 bool SpillPlacement::update(unsigned n) {
326 if (!nodes[n].update(nodes, Threshold))
328 nodes[n].getDissentingNeighbors(TodoList, nodes);
332 /// iterate - Repeatedly update the Hopfield nodes until stability or the
333 /// maximum number of iterations is reached.
334 void SpillPlacement::iterate() {
335 // We do not need to push those node in the todolist.
336 // They are already been proceeded as part of the previous iteration.
337 RecentPositive.clear();
339 // Since the last iteration, the todolist have been augmented by calls
340 // to addConstraints, addLinks, and co.
341 // Update the network energy starting at this new frontier.
342 // The call to ::update will add the nodes that changed into the todolist.
343 unsigned Limit = bundles->getNumBundles() * 10;
344 while(Limit-- > 0 && !TodoList.empty()) {
345 unsigned n = TodoList.pop_back_val();
348 if (nodes[n].preferReg())
349 RecentPositive.push_back(n);
353 void SpillPlacement::prepare(BitVector &RegBundles) {
354 RecentPositive.clear();
356 // Reuse RegBundles as our ActiveNodes vector.
357 ActiveNodes = &RegBundles;
358 ActiveNodes->clear();
359 ActiveNodes->resize(bundles->getNumBundles());
363 SpillPlacement::finish() {
364 assert(ActiveNodes && "Call prepare() first");
366 // Write preferences back to ActiveNodes.
368 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
369 if (!nodes[n].preferReg()) {
370 ActiveNodes->reset(n);
373 ActiveNodes = nullptr;