1 //===- ThreadSafetyTIL.cpp -------------------------------------*- C++ --*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT in the llvm repository for details.
8 //===----------------------------------------------------------------------===//
10 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
11 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
12 using namespace clang;
13 using namespace threadSafety;
16 StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) {
18 case UOP_Minus: return "-";
19 case UOP_BitNot: return "~";
20 case UOP_LogicNot: return "!";
25 StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
27 case BOP_Mul: return "*";
28 case BOP_Div: return "/";
29 case BOP_Rem: return "%";
30 case BOP_Add: return "+";
31 case BOP_Sub: return "-";
32 case BOP_Shl: return "<<";
33 case BOP_Shr: return ">>";
34 case BOP_BitAnd: return "&";
35 case BOP_BitXor: return "^";
36 case BOP_BitOr: return "|";
37 case BOP_Eq: return "==";
38 case BOP_Neq: return "!=";
39 case BOP_Lt: return "<";
40 case BOP_Leq: return "<=";
41 case BOP_Cmp: return "<=>";
42 case BOP_LogicAnd: return "&&";
43 case BOP_LogicOr: return "||";
49 SExpr* Future::force() {
50 Status = FS_evaluating;
57 unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
58 unsigned Idx = Predecessors.size();
59 Predecessors.reserveCheck(1, Arena);
60 Predecessors.push_back(Pred);
61 for (SExpr *E : Args) {
62 if (Phi* Ph = dyn_cast<Phi>(E)) {
63 Ph->values().reserveCheck(1, Arena);
64 Ph->values().push_back(nullptr);
71 void BasicBlock::reservePredecessors(unsigned NumPreds) {
72 Predecessors.reserve(NumPreds, Arena);
73 for (SExpr *E : Args) {
74 if (Phi* Ph = dyn_cast<Phi>(E)) {
75 Ph->values().reserve(NumPreds, Arena);
81 // If E is a variable, then trace back through any aliases or redundant
82 // Phi nodes to find the canonical definition.
83 const SExpr *til::getCanonicalVal(const SExpr *E) {
85 if (auto *V = dyn_cast<Variable>(E)) {
86 if (V->kind() == Variable::VK_Let) {
91 if (const Phi *Ph = dyn_cast<Phi>(E)) {
92 if (Ph->status() == Phi::PH_SingleVal) {
103 // If E is a variable, then trace back through any aliases or redundant
104 // Phi nodes to find the canonical definition.
105 // The non-const version will simplify incomplete Phi nodes.
106 SExpr *til::simplifyToCanonicalVal(SExpr *E) {
108 if (auto *V = dyn_cast<Variable>(E)) {
109 if (V->kind() != Variable::VK_Let)
111 // Eliminate redundant variables, e.g. x = y, or x = 5,
112 // but keep anything more complicated.
113 if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
119 if (auto *Ph = dyn_cast<Phi>(E)) {
120 if (Ph->status() == Phi::PH_Incomplete)
121 simplifyIncompleteArg(Ph);
122 // Eliminate redundant Phi nodes.
123 if (Ph->status() == Phi::PH_SingleVal) {
133 // Trace the arguments of an incomplete Phi node to see if they have the same
134 // canonical definition. If so, mark the Phi node as redundant.
135 // getCanonicalVal() will recursively call simplifyIncompletePhi().
136 void til::simplifyIncompleteArg(til::Phi *Ph) {
137 assert(Ph && Ph->status() == Phi::PH_Incomplete);
139 // eliminate infinite recursion -- assume that this node is not redundant.
140 Ph->setStatus(Phi::PH_MultiVal);
142 SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
143 for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
144 SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
146 continue; // Recursive reference to itself. Don't count.
148 return; // Status is already set to MultiVal.
151 Ph->setStatus(Phi::PH_SingleVal);
155 // Renumbers the arguments and instructions to have unique, sequential IDs.
156 int BasicBlock::renumberInstrs(int ID) {
157 for (auto *Arg : Args)
158 Arg->setID(this, ID++);
159 for (auto *Instr : Instrs)
160 Instr->setID(this, ID++);
161 TermInstr->setID(this, ID++);
165 // Sorts the CFGs blocks using a reverse post-order depth-first traversal.
166 // Each block will be written into the Blocks array in order, and its BlockID
167 // will be set to the index in the array. Sorting should start from the entry
168 // block, and ID should be the total number of blocks.
169 int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
170 if (Visited) return ID;
172 for (auto *Block : successors())
173 ID = Block->topologicalSort(Blocks, ID);
174 // set ID and update block array in place.
175 // We may lose pointers to unreachable blocks.
178 Blocks[BlockID] = this;
182 // Performs a reverse topological traversal, starting from the exit block and
183 // following back-edges. The dominator is serialized before any predecessors,
184 // which guarantees that all blocks are serialized after their dominator and
185 // before their post-dominator (because it's a reverse topological traversal).
186 // ID should be initially set to 0.
188 // This sort assumes that (1) dominators have been computed, (2) there are no
189 // critical edges, and (3) the entry block is reachable from the exit block
190 // and no blocks are accessible via traversal of back-edges from the exit that
191 // weren't accessible via forward edges from the entry.
192 int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
193 // Visited is assumed to have been set by the topologicalSort. This pass
194 // assumes !Visited means that we've visited this node before.
195 if (!Visited) return ID;
197 if (DominatorNode.Parent)
198 ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
199 for (auto *Pred : Predecessors)
200 ID = Pred->topologicalFinalSort(Blocks, ID);
201 assert(static_cast<size_t>(ID) < Blocks.size());
203 Blocks[BlockID] = this;
207 // Computes the immediate dominator of the current block. Assumes that all of
208 // its predecessors have already computed their dominators. This is achieved
209 // by visiting the nodes in topological order.
210 void BasicBlock::computeDominator() {
211 BasicBlock *Candidate = nullptr;
212 // Walk backwards from each predecessor to find the common dominator node.
213 for (auto *Pred : Predecessors) {
215 if (Pred->BlockID >= BlockID) continue;
216 // If we don't yet have a candidate for dominator yet, take this one.
217 if (Candidate == nullptr) {
221 // Walk the alternate and current candidate back to find a common ancestor.
222 auto *Alternate = Pred;
223 while (Alternate != Candidate) {
224 if (Candidate->BlockID > Alternate->BlockID)
225 Candidate = Candidate->DominatorNode.Parent;
227 Alternate = Alternate->DominatorNode.Parent;
230 DominatorNode.Parent = Candidate;
231 DominatorNode.SizeOfSubTree = 1;
234 // Computes the immediate post-dominator of the current block. Assumes that all
235 // of its successors have already computed their post-dominators. This is
236 // achieved visiting the nodes in reverse topological order.
237 void BasicBlock::computePostDominator() {
238 BasicBlock *Candidate = nullptr;
239 // Walk back from each predecessor to find the common post-dominator node.
240 for (auto *Succ : successors()) {
242 if (Succ->BlockID <= BlockID) continue;
243 // If we don't yet have a candidate for post-dominator yet, take this one.
244 if (Candidate == nullptr) {
248 // Walk the alternate and current candidate back to find a common ancestor.
249 auto *Alternate = Succ;
250 while (Alternate != Candidate) {
251 if (Candidate->BlockID < Alternate->BlockID)
252 Candidate = Candidate->PostDominatorNode.Parent;
254 Alternate = Alternate->PostDominatorNode.Parent;
257 PostDominatorNode.Parent = Candidate;
258 PostDominatorNode.SizeOfSubTree = 1;
262 // Renumber instructions in all blocks
263 void SCFG::renumberInstrs() {
265 for (auto *Block : Blocks)
266 InstrID = Block->renumberInstrs(InstrID);
270 static inline void computeNodeSize(BasicBlock *B,
271 BasicBlock::TopologyNode BasicBlock::*TN) {
272 BasicBlock::TopologyNode *N = &(B->*TN);
274 BasicBlock::TopologyNode *P = &(N->Parent->*TN);
275 // Initially set ID relative to the (as yet uncomputed) parent ID
276 N->NodeID = P->SizeOfSubTree;
277 P->SizeOfSubTree += N->SizeOfSubTree;
281 static inline void computeNodeID(BasicBlock *B,
282 BasicBlock::TopologyNode BasicBlock::*TN) {
283 BasicBlock::TopologyNode *N = &(B->*TN);
285 BasicBlock::TopologyNode *P = &(N->Parent->*TN);
286 N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
291 // Normalizes a CFG. Normalization has a few major components:
292 // 1) Removing unreachable blocks.
293 // 2) Computing dominators and post-dominators
294 // 3) Topologically sorting the blocks into the "Blocks" array.
295 void SCFG::computeNormalForm() {
296 // Topologically sort the blocks starting from the entry block.
297 int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
298 if (NumUnreachableBlocks > 0) {
299 // If there were unreachable blocks shift everything down, and delete them.
300 for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
301 size_t NI = I - NumUnreachableBlocks;
302 Blocks[NI] = Blocks[I];
303 Blocks[NI]->BlockID = NI;
304 // FIXME: clean up predecessor pointers to unreachable blocks?
306 Blocks.drop(NumUnreachableBlocks);
309 // Compute dominators.
310 for (auto *Block : Blocks)
311 Block->computeDominator();
313 // Once dominators have been computed, the final sort may be performed.
314 int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
315 assert(static_cast<size_t>(NumBlocks) == Blocks.size());
318 // Renumber the instructions now that we have a final sort.
321 // Compute post-dominators and compute the sizes of each node in the
323 for (auto *Block : Blocks.reverse()) {
324 Block->computePostDominator();
325 computeNodeSize(Block, &BasicBlock::DominatorNode);
327 // Compute the sizes of each node in the post-dominator tree and assign IDs in
328 // the dominator tree.
329 for (auto *Block : Blocks) {
330 computeNodeID(Block, &BasicBlock::DominatorNode);
331 computeNodeSize(Block, &BasicBlock::PostDominatorNode);
333 // Assign IDs in the post-dominator tree.
334 for (auto *Block : Blocks.reverse()) {
335 computeNodeID(Block, &BasicBlock::PostDominatorNode);