1 //===- FunctionComparator.h - Function Comparator -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the FunctionComparator and GlobalNumberState classes
11 // which are used by the MergeFunctions pass for comparing functions.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/FunctionComparator.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/IR/CallSite.h"
18 #include "llvm/IR/Instructions.h"
19 #include "llvm/IR/InlineAsm.h"
20 #include "llvm/IR/Module.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
26 #define DEBUG_TYPE "functioncomparator"
28 int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
34 int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
35 if ((int)L < (int)R) return -1;
36 if ((int)L > (int)R) return 1;
40 int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
41 if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
43 if (L.ugt(R)) return 1;
44 if (R.ugt(L)) return -1;
48 int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
49 // Floats are ordered first by semantics (i.e. float, double, half, etc.),
50 // then by value interpreted as a bitstring (aka APInt).
51 const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
52 if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
53 APFloat::semanticsPrecision(SR)))
55 if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
56 APFloat::semanticsMaxExponent(SR)))
58 if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
59 APFloat::semanticsMinExponent(SR)))
61 if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
62 APFloat::semanticsSizeInBits(SR)))
64 return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
67 int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
68 // Prevent heavy comparison, compare sizes first.
69 if (int Res = cmpNumbers(L.size(), R.size()))
72 // Compare strings lexicographically only when it is necessary: only when
73 // strings are equal in size.
77 int FunctionComparator::cmpAttrs(const AttributeList L,
78 const AttributeList R) const {
79 if (int Res = cmpNumbers(L.getNumSlots(), R.getNumSlots()))
82 for (unsigned i = 0, e = L.getNumSlots(); i != e; ++i) {
83 AttributeList::iterator LI = L.begin(i), LE = L.end(i), RI = R.begin(i),
85 for (; LI != LE && RI != RE; ++LI, ++RI) {
101 int FunctionComparator::cmpRangeMetadata(const MDNode *L,
102 const MDNode *R) const {
109 // Range metadata is a sequence of numbers. Make sure they are the same
111 // TODO: Note that as this is metadata, it is possible to drop and/or merge
112 // this data when considering functions to merge. Thus this comparison would
113 // return 0 (i.e. equivalent), but merging would become more complicated
114 // because the ranges would need to be unioned. It is not likely that
115 // functions differ ONLY in this metadata if they are actually the same
116 // function semantically.
117 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
119 for (size_t I = 0; I < L->getNumOperands(); ++I) {
120 ConstantInt *LLow = mdconst::extract<ConstantInt>(L->getOperand(I));
121 ConstantInt *RLow = mdconst::extract<ConstantInt>(R->getOperand(I));
122 if (int Res = cmpAPInts(LLow->getValue(), RLow->getValue()))
128 int FunctionComparator::cmpOperandBundlesSchema(const Instruction *L,
129 const Instruction *R) const {
130 ImmutableCallSite LCS(L);
131 ImmutableCallSite RCS(R);
133 assert(LCS && RCS && "Must be calls or invokes!");
134 assert(LCS.isCall() == RCS.isCall() && "Can't compare otherwise!");
137 cmpNumbers(LCS.getNumOperandBundles(), RCS.getNumOperandBundles()))
140 for (unsigned i = 0, e = LCS.getNumOperandBundles(); i != e; ++i) {
141 auto OBL = LCS.getOperandBundleAt(i);
142 auto OBR = RCS.getOperandBundleAt(i);
144 if (int Res = OBL.getTagName().compare(OBR.getTagName()))
147 if (int Res = cmpNumbers(OBL.Inputs.size(), OBR.Inputs.size()))
154 /// Constants comparison:
155 /// 1. Check whether type of L constant could be losslessly bitcasted to R
157 /// 2. Compare constant contents.
158 /// For more details see declaration comments.
159 int FunctionComparator::cmpConstants(const Constant *L,
160 const Constant *R) const {
162 Type *TyL = L->getType();
163 Type *TyR = R->getType();
165 // Check whether types are bitcastable. This part is just re-factored
166 // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
167 // we also pack into result which type is "less" for us.
168 int TypesRes = cmpTypes(TyL, TyR);
170 // Types are different, but check whether we can bitcast them.
171 if (!TyL->isFirstClassType()) {
172 if (TyR->isFirstClassType())
174 // Neither TyL nor TyR are values of first class type. Return the result
175 // of comparing the types
178 if (!TyR->isFirstClassType()) {
179 if (TyL->isFirstClassType())
184 // Vector -> Vector conversions are always lossless if the two vector types
185 // have the same size, otherwise not.
186 unsigned TyLWidth = 0;
187 unsigned TyRWidth = 0;
189 if (auto *VecTyL = dyn_cast<VectorType>(TyL))
190 TyLWidth = VecTyL->getBitWidth();
191 if (auto *VecTyR = dyn_cast<VectorType>(TyR))
192 TyRWidth = VecTyR->getBitWidth();
194 if (TyLWidth != TyRWidth)
195 return cmpNumbers(TyLWidth, TyRWidth);
197 // Zero bit-width means neither TyL nor TyR are vectors.
199 PointerType *PTyL = dyn_cast<PointerType>(TyL);
200 PointerType *PTyR = dyn_cast<PointerType>(TyR);
202 unsigned AddrSpaceL = PTyL->getAddressSpace();
203 unsigned AddrSpaceR = PTyR->getAddressSpace();
204 if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
212 // TyL and TyR aren't vectors, nor pointers. We don't know how to
218 // OK, types are bitcastable, now check constant contents.
220 if (L->isNullValue() && R->isNullValue())
222 if (L->isNullValue() && !R->isNullValue())
224 if (!L->isNullValue() && R->isNullValue())
227 auto GlobalValueL = const_cast<GlobalValue*>(dyn_cast<GlobalValue>(L));
228 auto GlobalValueR = const_cast<GlobalValue*>(dyn_cast<GlobalValue>(R));
229 if (GlobalValueL && GlobalValueR) {
230 return cmpGlobalValues(GlobalValueL, GlobalValueR);
233 if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
236 if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
237 const auto *SeqR = cast<ConstantDataSequential>(R);
238 // This handles ConstantDataArray and ConstantDataVector. Note that we
239 // compare the two raw data arrays, which might differ depending on the host
240 // endianness. This isn't a problem though, because the endiness of a module
241 // will affect the order of the constants, but this order is the same
242 // for a given input module and host platform.
243 return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
246 switch (L->getValueID()) {
247 case Value::UndefValueVal:
248 case Value::ConstantTokenNoneVal:
250 case Value::ConstantIntVal: {
251 const APInt &LInt = cast<ConstantInt>(L)->getValue();
252 const APInt &RInt = cast<ConstantInt>(R)->getValue();
253 return cmpAPInts(LInt, RInt);
255 case Value::ConstantFPVal: {
256 const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
257 const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
258 return cmpAPFloats(LAPF, RAPF);
260 case Value::ConstantArrayVal: {
261 const ConstantArray *LA = cast<ConstantArray>(L);
262 const ConstantArray *RA = cast<ConstantArray>(R);
263 uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
264 uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
265 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
267 for (uint64_t i = 0; i < NumElementsL; ++i) {
268 if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
269 cast<Constant>(RA->getOperand(i))))
274 case Value::ConstantStructVal: {
275 const ConstantStruct *LS = cast<ConstantStruct>(L);
276 const ConstantStruct *RS = cast<ConstantStruct>(R);
277 unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
278 unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
279 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
281 for (unsigned i = 0; i != NumElementsL; ++i) {
282 if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
283 cast<Constant>(RS->getOperand(i))))
288 case Value::ConstantVectorVal: {
289 const ConstantVector *LV = cast<ConstantVector>(L);
290 const ConstantVector *RV = cast<ConstantVector>(R);
291 unsigned NumElementsL = cast<VectorType>(TyL)->getNumElements();
292 unsigned NumElementsR = cast<VectorType>(TyR)->getNumElements();
293 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
295 for (uint64_t i = 0; i < NumElementsL; ++i) {
296 if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
297 cast<Constant>(RV->getOperand(i))))
302 case Value::ConstantExprVal: {
303 const ConstantExpr *LE = cast<ConstantExpr>(L);
304 const ConstantExpr *RE = cast<ConstantExpr>(R);
305 unsigned NumOperandsL = LE->getNumOperands();
306 unsigned NumOperandsR = RE->getNumOperands();
307 if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
309 for (unsigned i = 0; i < NumOperandsL; ++i) {
310 if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
311 cast<Constant>(RE->getOperand(i))))
316 case Value::BlockAddressVal: {
317 const BlockAddress *LBA = cast<BlockAddress>(L);
318 const BlockAddress *RBA = cast<BlockAddress>(R);
319 if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
321 if (LBA->getFunction() == RBA->getFunction()) {
322 // They are BBs in the same function. Order by which comes first in the
323 // BB order of the function. This order is deterministic.
324 Function* F = LBA->getFunction();
325 BasicBlock *LBB = LBA->getBasicBlock();
326 BasicBlock *RBB = RBA->getBasicBlock();
329 for(BasicBlock &BB : F->getBasicBlockList()) {
337 llvm_unreachable("Basic Block Address does not point to a basic block in "
341 // cmpValues said the functions are the same. So because they aren't
342 // literally the same pointer, they must respectively be the left and
344 assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
345 // cmpValues will tell us if these are equivalent BasicBlocks, in the
346 // context of their respective functions.
347 return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
350 default: // Unknown constant, abort.
351 DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
352 llvm_unreachable("Constant ValueID not recognized.");
357 int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue *R) const {
358 uint64_t LNumber = GlobalNumbers->getNumber(L);
359 uint64_t RNumber = GlobalNumbers->getNumber(R);
360 return cmpNumbers(LNumber, RNumber);
363 /// cmpType - compares two types,
364 /// defines total ordering among the types set.
365 /// See method declaration comments for more details.
366 int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
367 PointerType *PTyL = dyn_cast<PointerType>(TyL);
368 PointerType *PTyR = dyn_cast<PointerType>(TyR);
370 const DataLayout &DL = FnL->getParent()->getDataLayout();
371 if (PTyL && PTyL->getAddressSpace() == 0)
372 TyL = DL.getIntPtrType(TyL);
373 if (PTyR && PTyR->getAddressSpace() == 0)
374 TyR = DL.getIntPtrType(TyR);
379 if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
382 switch (TyL->getTypeID()) {
384 llvm_unreachable("Unknown type!");
385 // Fall through in Release mode.
387 case Type::IntegerTyID:
388 return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
389 cast<IntegerType>(TyR)->getBitWidth());
390 // TyL == TyR would have returned true earlier, because types are uniqued.
392 case Type::FloatTyID:
393 case Type::DoubleTyID:
394 case Type::X86_FP80TyID:
395 case Type::FP128TyID:
396 case Type::PPC_FP128TyID:
397 case Type::LabelTyID:
398 case Type::MetadataTyID:
399 case Type::TokenTyID:
402 case Type::PointerTyID: {
403 assert(PTyL && PTyR && "Both types must be pointers here.");
404 return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
407 case Type::StructTyID: {
408 StructType *STyL = cast<StructType>(TyL);
409 StructType *STyR = cast<StructType>(TyR);
410 if (STyL->getNumElements() != STyR->getNumElements())
411 return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
413 if (STyL->isPacked() != STyR->isPacked())
414 return cmpNumbers(STyL->isPacked(), STyR->isPacked());
416 for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
417 if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
423 case Type::FunctionTyID: {
424 FunctionType *FTyL = cast<FunctionType>(TyL);
425 FunctionType *FTyR = cast<FunctionType>(TyR);
426 if (FTyL->getNumParams() != FTyR->getNumParams())
427 return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
429 if (FTyL->isVarArg() != FTyR->isVarArg())
430 return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
432 if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
435 for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
436 if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
442 case Type::ArrayTyID:
443 case Type::VectorTyID: {
444 auto *STyL = cast<SequentialType>(TyL);
445 auto *STyR = cast<SequentialType>(TyR);
446 if (STyL->getNumElements() != STyR->getNumElements())
447 return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
448 return cmpTypes(STyL->getElementType(), STyR->getElementType());
453 // Determine whether the two operations are the same except that pointer-to-A
454 // and pointer-to-B are equivalent. This should be kept in sync with
455 // Instruction::isSameOperationAs.
456 // Read method declaration comments for more details.
457 int FunctionComparator::cmpOperations(const Instruction *L,
458 const Instruction *R,
459 bool &needToCmpOperands) const {
460 needToCmpOperands = true;
461 if (int Res = cmpValues(L, R))
464 // Differences from Instruction::isSameOperationAs:
465 // * replace type comparison with calls to cmpTypes.
466 // * we test for I->getRawSubclassOptionalData (nuw/nsw/tail) at the top.
467 // * because of the above, we don't test for the tail bit on calls later on.
468 if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
471 if (const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(L)) {
472 needToCmpOperands = false;
473 const GetElementPtrInst *GEPR = cast<GetElementPtrInst>(R);
475 cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
477 return cmpGEPs(GEPL, GEPR);
480 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
483 if (int Res = cmpTypes(L->getType(), R->getType()))
486 if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
487 R->getRawSubclassOptionalData()))
490 // We have two instructions of identical opcode and #operands. Check to see
491 // if all operands are the same type
492 for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
494 cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
498 // Check special state that is a part of some instructions.
499 if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
500 if (int Res = cmpTypes(AI->getAllocatedType(),
501 cast<AllocaInst>(R)->getAllocatedType()))
503 return cmpNumbers(AI->getAlignment(), cast<AllocaInst>(R)->getAlignment());
505 if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
506 if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
509 cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
512 cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
515 cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
517 return cmpRangeMetadata(LI->getMetadata(LLVMContext::MD_range),
518 cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
520 if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
522 cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
525 cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
528 cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
530 return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
532 if (const CmpInst *CI = dyn_cast<CmpInst>(L))
533 return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
534 if (const CallInst *CI = dyn_cast<CallInst>(L)) {
535 if (int Res = cmpNumbers(CI->getCallingConv(),
536 cast<CallInst>(R)->getCallingConv()))
539 cmpAttrs(CI->getAttributes(), cast<CallInst>(R)->getAttributes()))
541 if (int Res = cmpOperandBundlesSchema(CI, R))
543 return cmpRangeMetadata(
544 CI->getMetadata(LLVMContext::MD_range),
545 cast<CallInst>(R)->getMetadata(LLVMContext::MD_range));
547 if (const InvokeInst *II = dyn_cast<InvokeInst>(L)) {
548 if (int Res = cmpNumbers(II->getCallingConv(),
549 cast<InvokeInst>(R)->getCallingConv()))
552 cmpAttrs(II->getAttributes(), cast<InvokeInst>(R)->getAttributes()))
554 if (int Res = cmpOperandBundlesSchema(II, R))
556 return cmpRangeMetadata(
557 II->getMetadata(LLVMContext::MD_range),
558 cast<InvokeInst>(R)->getMetadata(LLVMContext::MD_range));
560 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
561 ArrayRef<unsigned> LIndices = IVI->getIndices();
562 ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
563 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
565 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
566 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
571 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
572 ArrayRef<unsigned> LIndices = EVI->getIndices();
573 ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
574 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
576 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
577 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
581 if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
583 cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
585 return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
587 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
588 if (int Res = cmpNumbers(CXI->isVolatile(),
589 cast<AtomicCmpXchgInst>(R)->isVolatile()))
591 if (int Res = cmpNumbers(CXI->isWeak(),
592 cast<AtomicCmpXchgInst>(R)->isWeak()))
595 cmpOrderings(CXI->getSuccessOrdering(),
596 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
599 cmpOrderings(CXI->getFailureOrdering(),
600 cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
602 return cmpNumbers(CXI->getSynchScope(),
603 cast<AtomicCmpXchgInst>(R)->getSynchScope());
605 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
606 if (int Res = cmpNumbers(RMWI->getOperation(),
607 cast<AtomicRMWInst>(R)->getOperation()))
609 if (int Res = cmpNumbers(RMWI->isVolatile(),
610 cast<AtomicRMWInst>(R)->isVolatile()))
612 if (int Res = cmpOrderings(RMWI->getOrdering(),
613 cast<AtomicRMWInst>(R)->getOrdering()))
615 return cmpNumbers(RMWI->getSynchScope(),
616 cast<AtomicRMWInst>(R)->getSynchScope());
618 if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
619 const PHINode *PNR = cast<PHINode>(R);
620 // Ensure that in addition to the incoming values being identical
621 // (checked by the caller of this function), the incoming blocks
622 // are also identical.
623 for (unsigned i = 0, e = PNL->getNumIncomingValues(); i != e; ++i) {
625 cmpValues(PNL->getIncomingBlock(i), PNR->getIncomingBlock(i)))
632 // Determine whether two GEP operations perform the same underlying arithmetic.
633 // Read method declaration comments for more details.
634 int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
635 const GEPOperator *GEPR) const {
637 unsigned int ASL = GEPL->getPointerAddressSpace();
638 unsigned int ASR = GEPR->getPointerAddressSpace();
640 if (int Res = cmpNumbers(ASL, ASR))
643 // When we have target data, we can reduce the GEP down to the value in bytes
644 // added to the address.
645 const DataLayout &DL = FnL->getParent()->getDataLayout();
646 unsigned BitWidth = DL.getPointerSizeInBits(ASL);
647 APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
648 if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
649 GEPR->accumulateConstantOffset(DL, OffsetR))
650 return cmpAPInts(OffsetL, OffsetR);
651 if (int Res = cmpTypes(GEPL->getSourceElementType(),
652 GEPR->getSourceElementType()))
655 if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
658 for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
659 if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
666 int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
667 const InlineAsm *R) const {
668 // InlineAsm's are uniqued. If they are the same pointer, obviously they are
669 // the same, otherwise compare the fields.
672 if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
674 if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
676 if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
678 if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
680 if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
682 if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
684 llvm_unreachable("InlineAsm blocks were not uniqued.");
688 /// Compare two values used by the two functions under pair-wise comparison. If
689 /// this is the first time the values are seen, they're added to the mapping so
690 /// that we will detect mismatches on next use.
691 /// See comments in declaration for more details.
692 int FunctionComparator::cmpValues(const Value *L, const Value *R) const {
693 // Catch self-reference case.
705 const Constant *ConstL = dyn_cast<Constant>(L);
706 const Constant *ConstR = dyn_cast<Constant>(R);
707 if (ConstL && ConstR) {
710 return cmpConstants(ConstL, ConstR);
718 const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
719 const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
721 if (InlineAsmL && InlineAsmR)
722 return cmpInlineAsm(InlineAsmL, InlineAsmR);
728 auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
729 RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
731 return cmpNumbers(LeftSN.first->second, RightSN.first->second);
734 // Test whether two basic blocks have equivalent behaviour.
735 int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
736 const BasicBlock *BBR) const {
737 BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
738 BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
741 bool needToCmpOperands = true;
742 if (int Res = cmpOperations(&*InstL, &*InstR, needToCmpOperands))
744 if (needToCmpOperands) {
745 assert(InstL->getNumOperands() == InstR->getNumOperands());
747 for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
748 Value *OpL = InstL->getOperand(i);
749 Value *OpR = InstR->getOperand(i);
750 if (int Res = cmpValues(OpL, OpR))
752 // cmpValues should ensure this is true.
753 assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
759 } while (InstL != InstLE && InstR != InstRE);
761 if (InstL != InstLE && InstR == InstRE)
763 if (InstL == InstLE && InstR != InstRE)
768 int FunctionComparator::compareSignature() const {
769 if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
772 if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
776 if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
780 if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
783 if (FnL->hasSection()) {
784 if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
788 if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
791 // TODO: if it's internal and only used in direct calls, we could handle this
793 if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
796 if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
799 assert(FnL->arg_size() == FnR->arg_size() &&
800 "Identically typed functions have different numbers of args!");
802 // Visit the arguments so that they get enumerated in the order they're
804 for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
805 ArgRI = FnR->arg_begin(),
806 ArgLE = FnL->arg_end();
807 ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
808 if (cmpValues(&*ArgLI, &*ArgRI) != 0)
809 llvm_unreachable("Arguments repeat!");
814 // Test whether the two functions have equivalent behaviour.
815 int FunctionComparator::compare() {
818 if (int Res = compareSignature())
821 // We do a CFG-ordered walk since the actual ordering of the blocks in the
822 // linked list is immaterial. Our walk starts at the entry block for both
823 // functions, then takes each block from each terminator in order. As an
824 // artifact, this also means that unreachable blocks are ignored.
825 SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
826 SmallPtrSet<const BasicBlock *, 32> VisitedBBs; // in terms of F1.
828 FnLBBs.push_back(&FnL->getEntryBlock());
829 FnRBBs.push_back(&FnR->getEntryBlock());
831 VisitedBBs.insert(FnLBBs[0]);
832 while (!FnLBBs.empty()) {
833 const BasicBlock *BBL = FnLBBs.pop_back_val();
834 const BasicBlock *BBR = FnRBBs.pop_back_val();
836 if (int Res = cmpValues(BBL, BBR))
839 if (int Res = cmpBasicBlocks(BBL, BBR))
842 const TerminatorInst *TermL = BBL->getTerminator();
843 const TerminatorInst *TermR = BBR->getTerminator();
845 assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
846 for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
847 if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
850 FnLBBs.push_back(TermL->getSuccessor(i));
851 FnRBBs.push_back(TermR->getSuccessor(i));
859 // Accumulate the hash of a sequence of 64-bit integers. This is similar to a
860 // hash of a sequence of 64bit ints, but the entire input does not need to be
861 // available at once. This interface is necessary for functionHash because it
862 // needs to accumulate the hash as the structure of the function is traversed
863 // without saving these values to an intermediate buffer. This form of hashing
864 // is not often needed, as usually the object to hash is just read from a
866 class HashAccumulator64 {
869 // Initialize to random constant, so the state isn't zero.
870 HashAccumulator64() { Hash = 0x6acaa36bef8325c5ULL; }
871 void add(uint64_t V) {
872 Hash = llvm::hashing::detail::hash_16_bytes(Hash, V);
874 // No finishing is required, because the entire hash value is used.
875 uint64_t getHash() { return Hash; }
877 } // end anonymous namespace
879 // A function hash is calculated by considering only the number of arguments and
880 // whether a function is varargs, the order of basic blocks (given by the
881 // successors of each basic block in depth first order), and the order of
882 // opcodes of each instruction within each of these basic blocks. This mirrors
883 // the strategy compare() uses to compare functions by walking the BBs in depth
884 // first order and comparing each instruction in sequence. Because this hash
885 // does not look at the operands, it is insensitive to things such as the
886 // target of calls and the constants used in the function, which makes it useful
887 // when possibly merging functions which are the same modulo constants and call
889 FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
894 SmallVector<const BasicBlock *, 8> BBs;
895 SmallSet<const BasicBlock *, 16> VisitedBBs;
897 // Walk the blocks in the same order as FunctionComparator::cmpBasicBlocks(),
898 // accumulating the hash of the function "structure." (BB and opcode sequence)
899 BBs.push_back(&F.getEntryBlock());
900 VisitedBBs.insert(BBs[0]);
901 while (!BBs.empty()) {
902 const BasicBlock *BB = BBs.pop_back_val();
903 // This random value acts as a block header, as otherwise the partition of
904 // opcodes into BBs wouldn't affect the hash, only the order of the opcodes
906 for (auto &Inst : *BB) {
907 H.add(Inst.getOpcode());
909 const TerminatorInst *Term = BB->getTerminator();
910 for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
911 if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
913 BBs.push_back(Term->getSuccessor(i));