1 //===----- ARMCodeGenPrepare.cpp ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass inserts intrinsics to handle small types that would otherwise be
12 /// promoted during legalization. Here we can manually promote types or insert
13 /// intrinsics which can handle narrow types that aren't supported by the
16 //===----------------------------------------------------------------------===//
19 #include "ARMSubtarget.h"
20 #include "ARMTargetMachine.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Attributes.h"
25 #include "llvm/IR/BasicBlock.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/IR/Value.h"
35 #include "llvm/IR/Verifier.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/CommandLine.h"
40 #define DEBUG_TYPE "arm-codegenprepare"
45 DisableCGP("arm-disable-cgp", cl::Hidden, cl::init(true),
46 cl::desc("Disable ARM specific CodeGenPrepare pass"));
49 EnableDSP("arm-enable-scalar-dsp", cl::Hidden, cl::init(false),
50 cl::desc("Use DSP instructions for scalar operations"));
53 EnableDSPWithImms("arm-enable-scalar-dsp-imms", cl::Hidden, cl::init(false),
54 cl::desc("Use DSP instructions for scalar operations\
55 with immediate operands"));
60 SmallPtrSet<Value*, 8> NewInsts;
61 SmallVector<Instruction*, 4> InstsToRemove;
66 IRPromoter(Module *M) : M(M), Ctx(M->getContext()) { }
69 for (auto *I : InstsToRemove) {
70 LLVM_DEBUG(dbgs() << "ARM CGP: Removing " << *I << "\n");
71 I->dropAllReferences();
74 InstsToRemove.clear();
78 void Mutate(Type *OrigTy,
79 SmallPtrSetImpl<Value*> &Visited,
80 SmallPtrSetImpl<Value*> &Leaves,
81 SmallPtrSetImpl<Instruction*> &Roots);
84 class ARMCodeGenPrepare : public FunctionPass {
85 const ARMSubtarget *ST = nullptr;
86 IRPromoter *Promoter = nullptr;
87 std::set<Value*> AllVisited;
88 Type *OrigTy = nullptr;
89 unsigned TypeSize = 0;
91 bool isNarrowInstSupported(Instruction *I);
92 bool isSupportedValue(Value *V);
93 bool isLegalToPromote(Value *V);
94 bool TryToPromote(Value *V);
99 ARMCodeGenPrepare() : FunctionPass(ID) {}
101 void getAnalysisUsage(AnalysisUsage &AU) const override {
102 AU.addRequired<TargetPassConfig>();
105 StringRef getPassName() const override { return "ARM IR optimizations"; }
107 bool doInitialization(Module &M) override;
108 bool runOnFunction(Function &F) override;
109 bool doFinalization(Module &M) override;
114 /// Can the given value generate sign bits.
115 static bool isSigned(Value *V) {
116 if (!isa<Instruction>(V))
119 unsigned Opc = cast<Instruction>(V)->getOpcode();
120 return Opc == Instruction::AShr || Opc == Instruction::SDiv ||
121 Opc == Instruction::SRem;
124 /// Some instructions can use 8- and 16-bit operands, and we don't need to
125 /// promote anything larger. We disallow booleans to make life easier when
126 /// dealing with icmps but allow any other integer that is <= 16 bits. Void
127 /// types are accepted so we can handle switches.
128 static bool isSupportedType(Value *V) {
129 if (V->getType()->isVoidTy())
132 const IntegerType *IntTy = dyn_cast<IntegerType>(V->getType());
136 // Don't try to promote boolean values.
137 if (IntTy->getBitWidth() == 1)
140 if (auto *ZExt = dyn_cast<ZExtInst>(V))
141 return isSupportedType(ZExt->getOperand(0));
143 return IntTy->getBitWidth() <= 16;
146 /// Return true if V will require any promoted values to be truncated for the
148 static bool isSink(Value *V) {
149 auto UsesNarrowValue = [](Value *V) {
150 return V->getType()->getScalarSizeInBits() <= 32;
153 if (auto *Store = dyn_cast<StoreInst>(V))
154 return UsesNarrowValue(Store->getValueOperand());
155 if (auto *Return = dyn_cast<ReturnInst>(V))
156 return UsesNarrowValue(Return->getReturnValue());
158 return isa<CallInst>(V);
161 /// Return true if the given value is a leaf that will need to be zext'd.
162 static bool isSource(Value *V) {
163 if (isa<Argument>(V) && isSupportedType(V))
165 else if (isa<TruncInst>(V))
167 else if (auto *ZExt = dyn_cast<ZExtInst>(V))
168 // ZExt can be a leaf if its the only user of a load.
169 return isa<LoadInst>(ZExt->getOperand(0)) &&
170 ZExt->getOperand(0)->hasOneUse();
171 else if (auto *Call = dyn_cast<CallInst>(V))
172 return Call->hasRetAttr(Attribute::AttrKind::ZExt);
173 else if (auto *Load = dyn_cast<LoadInst>(V)) {
174 if (!isa<IntegerType>(Load->getType()))
176 // A load is a leaf, unless its already just being zext'd.
177 if (Load->hasOneUse() && isa<ZExtInst>(*Load->use_begin()))
185 /// Return whether the instruction can be promoted within any modifications to
186 /// it's operands or result.
187 static bool isSafeOverflow(Instruction *I) {
188 if (isa<OverflowingBinaryOperator>(I) && I->hasNoUnsignedWrap())
191 unsigned Opc = I->getOpcode();
192 if (Opc == Instruction::Add || Opc == Instruction::Sub) {
193 // We don't care if the add or sub could wrap if the value is decreasing
194 // and is only being used by an unsigned compare.
195 if (!I->hasOneUse() ||
196 !isa<ICmpInst>(*I->user_begin()) ||
197 !isa<ConstantInt>(I->getOperand(1)))
200 auto *CI = cast<ICmpInst>(*I->user_begin());
204 bool NegImm = cast<ConstantInt>(I->getOperand(1))->isNegative();
205 bool IsDecreasing = ((Opc == Instruction::Sub) && !NegImm) ||
206 ((Opc == Instruction::Add) && NegImm);
210 LLVM_DEBUG(dbgs() << "ARM CGP: Allowing safe overflow for " << *I << "\n");
214 // Otherwise, if an instruction is using a negative immediate we will need
215 // to fix it up during the promotion.
216 for (auto &Op : I->operands()) {
217 if (auto *Const = dyn_cast<ConstantInt>(Op))
218 if (Const->isNegative())
224 static bool shouldPromote(Value *V) {
225 auto *I = dyn_cast<Instruction>(V);
229 if (!isa<IntegerType>(V->getType()))
232 if (isa<StoreInst>(I) || isa<TerminatorInst>(I) || isa<TruncInst>(I) ||
236 if (auto *ZExt = dyn_cast<ZExtInst>(I))
237 return !ZExt->getDestTy()->isIntegerTy(32);
242 /// Return whether we can safely mutate V's type to ExtTy without having to be
243 /// concerned with zero extending or truncation.
244 static bool isPromotedResultSafe(Value *V) {
245 if (!isa<Instruction>(V))
251 // If I is only being used by something that will require its value to be
252 // truncated, then we don't care about the promoted result.
253 auto *I = cast<Instruction>(V);
254 if (I->hasOneUse() && isSink(*I->use_begin()))
257 if (isa<OverflowingBinaryOperator>(I))
258 return isSafeOverflow(I);
262 /// Return the intrinsic for the instruction that can perform the same
263 /// operation but on a narrow type. This is using the parallel dsp intrinsics
264 /// on scalar values.
265 static Intrinsic::ID getNarrowIntrinsic(Instruction *I, unsigned TypeSize) {
266 // Whether we use the signed or unsigned versions of these intrinsics
267 // doesn't matter because we're not using the GE bits that they set in
269 switch(I->getOpcode()) {
272 case Instruction::Add:
273 return TypeSize == 16 ? Intrinsic::arm_uadd16 :
274 Intrinsic::arm_uadd8;
275 case Instruction::Sub:
276 return TypeSize == 16 ? Intrinsic::arm_usub16 :
277 Intrinsic::arm_usub8;
279 llvm_unreachable("unhandled opcode for narrow intrinsic");
282 void IRPromoter::Mutate(Type *OrigTy,
283 SmallPtrSetImpl<Value*> &Visited,
284 SmallPtrSetImpl<Value*> &Leaves,
285 SmallPtrSetImpl<Instruction*> &Roots) {
286 IRBuilder<> Builder{Ctx};
287 Type *ExtTy = Type::getInt32Ty(M->getContext());
288 unsigned TypeSize = OrigTy->getPrimitiveSizeInBits();
289 SmallPtrSet<Value*, 8> Promoted;
290 LLVM_DEBUG(dbgs() << "ARM CGP: Promoting use-def chains to from " << TypeSize
293 auto ReplaceAllUsersOfWith = [&](Value *From, Value *To) {
294 SmallVector<Instruction*, 4> Users;
295 Instruction *InstTo = dyn_cast<Instruction>(To);
296 for (Use &U : From->uses()) {
297 auto *User = cast<Instruction>(U.getUser());
298 if (InstTo && User->isIdenticalTo(InstTo))
300 Users.push_back(User);
303 for (auto &U : Users)
304 U->replaceUsesOfWith(From, To);
307 auto FixConst = [&](ConstantInt *Const, Instruction *I) {
308 Constant *NewConst = nullptr;
309 if (isSafeOverflow(I)) {
310 NewConst = (Const->isNegative()) ?
311 ConstantExpr::getSExt(Const, ExtTy) :
312 ConstantExpr::getZExt(Const, ExtTy);
314 uint64_t NewVal = *Const->getValue().getRawData();
315 if (Const->getType() == Type::getInt16Ty(Ctx))
319 NewConst = ConstantInt::get(ExtTy, NewVal);
321 I->replaceUsesOfWith(Const, NewConst);
324 auto InsertDSPIntrinsic = [&](Instruction *I) {
325 LLVM_DEBUG(dbgs() << "ARM CGP: Inserting DSP intrinsic for "
328 Intrinsic::getDeclaration(M, getNarrowIntrinsic(I, TypeSize));
329 Builder.SetInsertPoint(I);
330 Builder.SetCurrentDebugLocation(I->getDebugLoc());
331 Value *Args[] = { I->getOperand(0), I->getOperand(1) };
332 CallInst *Call = Builder.CreateCall(DSPInst, Args);
333 ReplaceAllUsersOfWith(I, Call);
334 InstsToRemove.push_back(I);
335 NewInsts.insert(Call);
338 auto InsertZExt = [&](Value *V, Instruction *InsertPt) {
339 LLVM_DEBUG(dbgs() << "ARM CGP: Inserting ZExt for " << *V << "\n");
340 Builder.SetInsertPoint(InsertPt);
341 if (auto *I = dyn_cast<Instruction>(V))
342 Builder.SetCurrentDebugLocation(I->getDebugLoc());
343 auto *ZExt = cast<Instruction>(Builder.CreateZExt(V, ExtTy));
344 if (isa<Argument>(V))
345 ZExt->moveBefore(InsertPt);
347 ZExt->moveAfter(InsertPt);
348 ReplaceAllUsersOfWith(V, ZExt);
349 NewInsts.insert(ZExt);
352 // First, insert extending instructions between the leaves and their users.
353 LLVM_DEBUG(dbgs() << "ARM CGP: Promoting leaves:\n");
354 for (auto V : Leaves) {
355 LLVM_DEBUG(dbgs() << " - " << *V << "\n");
356 if (auto *ZExt = dyn_cast<ZExtInst>(V))
357 ZExt->mutateType(ExtTy);
358 else if (auto *I = dyn_cast<Instruction>(V))
360 else if (auto *Arg = dyn_cast<Argument>(V)) {
361 BasicBlock &BB = Arg->getParent()->front();
362 InsertZExt(Arg, &*BB.getFirstInsertionPt());
364 llvm_unreachable("unhandled leaf that needs extending");
369 LLVM_DEBUG(dbgs() << "ARM CGP: Mutating the tree..\n");
370 // Then mutate the types of the instructions within the tree. Here we handle
371 // constant operands.
372 for (auto *V : Visited) {
376 if (!isa<Instruction>(V))
379 auto *I = cast<Instruction>(V);
383 for (auto &U : I->operands()) {
384 if ((U->getType() == ExtTy) || !isSupportedType(&*U))
387 if (auto *Const = dyn_cast<ConstantInt>(&*U))
389 else if (isa<UndefValue>(&*U))
390 U->mutateType(ExtTy);
393 if (shouldPromote(I)) {
394 I->mutateType(ExtTy);
399 // Now we need to remove any zexts that have become unnecessary, as well
400 // as insert any intrinsics.
401 for (auto *V : Visited) {
404 if (auto *ZExt = dyn_cast<ZExtInst>(V)) {
405 if (ZExt->getDestTy() != ExtTy) {
406 ZExt->mutateType(ExtTy);
407 Promoted.insert(ZExt);
409 else if (ZExt->getSrcTy() == ExtTy) {
410 ReplaceAllUsersOfWith(V, ZExt->getOperand(0));
411 InstsToRemove.push_back(ZExt);
416 if (!shouldPromote(V) || isPromotedResultSafe(V))
419 // Replace unsafe instructions with appropriate intrinsic calls.
420 InsertDSPIntrinsic(cast<Instruction>(V));
423 LLVM_DEBUG(dbgs() << "ARM CGP: Fixing up the roots:\n");
424 // Fix up any stores or returns that use the results of the promoted
426 for (auto I : Roots) {
427 LLVM_DEBUG(dbgs() << " - " << *I << "\n");
428 Type *TruncTy = OrigTy;
429 if (auto *Store = dyn_cast<StoreInst>(I)) {
430 auto *PtrTy = cast<PointerType>(Store->getPointerOperandType());
431 TruncTy = PtrTy->getElementType();
432 } else if (isa<ReturnInst>(I)) {
433 Function *F = I->getParent()->getParent();
434 TruncTy = F->getFunctionType()->getReturnType();
437 for (unsigned i = 0; i < I->getNumOperands(); ++i) {
438 Value *V = I->getOperand(i);
439 if (Promoted.count(V) || NewInsts.count(V)) {
440 if (auto *Op = dyn_cast<Instruction>(V)) {
442 if (auto *Call = dyn_cast<CallInst>(I))
443 TruncTy = Call->getFunctionType()->getParamType(i);
445 if (TruncTy == ExtTy)
448 LLVM_DEBUG(dbgs() << "ARM CGP: Creating " << *TruncTy
449 << " Trunc for " << *Op << "\n");
450 Builder.SetInsertPoint(Op);
451 auto *Trunc = cast<Instruction>(Builder.CreateTrunc(Op, TruncTy));
452 Trunc->moveBefore(I);
453 I->setOperand(i, Trunc);
454 NewInsts.insert(Trunc);
459 LLVM_DEBUG(dbgs() << "ARM CGP: Mutation complete.\n");
462 bool ARMCodeGenPrepare::isNarrowInstSupported(Instruction *I) {
463 if (!ST->hasDSP() || !EnableDSP || !isSupportedType(I))
466 if (ST->isThumb() && !ST->hasThumb2())
469 if (I->getOpcode() != Instruction::Add && I->getOpcode() != Instruction::Sub)
473 // Would it be profitable? For Thumb code, these parallel DSP instructions
474 // are only Thumb-2, so we wouldn't be able to dual issue on Cortex-M33. For
475 // Cortex-A, specifically Cortex-A72, the latency is double and throughput is
476 // halved. They also do not take immediates as operands.
477 for (auto &Op : I->operands()) {
478 if (isa<Constant>(Op)) {
479 if (!EnableDSPWithImms)
486 /// We accept most instructions, as well as Arguments and ConstantInsts. We
487 /// Disallow casts other than zext and truncs and only allow calls if their
488 /// return value is zeroext. We don't allow opcodes that can introduce sign
490 bool ARMCodeGenPrepare::isSupportedValue(Value *V) {
491 LLVM_DEBUG(dbgs() << "ARM CGP: Is " << *V << " supported?\n");
493 // Non-instruction values that we can handle.
494 if (isa<ConstantInt>(V) || isa<Argument>(V))
497 // Memory instructions
498 if (isa<StoreInst>(V) || isa<LoadInst>(V) || isa<GetElementPtrInst>(V))
501 // Branches and targets.
502 if (auto *ICmp = dyn_cast<ICmpInst>(V))
503 return ICmp->isEquality() || !ICmp->isSigned();
505 if( isa<BranchInst>(V) || isa<SwitchInst>(V) || isa<BasicBlock>(V))
508 if (isa<PHINode>(V) || isa<SelectInst>(V) || isa<ReturnInst>(V))
511 // Special cases for calls as we need to check for zeroext
512 // TODO We should accept calls even if they don't have zeroext, as they can
514 if (auto *Call = dyn_cast<CallInst>(V))
515 return Call->hasRetAttr(Attribute::AttrKind::ZExt);
516 else if (auto *Cast = dyn_cast<CastInst>(V)) {
517 if (isa<ZExtInst>(Cast))
518 return Cast->getDestTy()->getScalarSizeInBits() <= 32;
519 else if (auto *Trunc = dyn_cast<TruncInst>(V))
520 return Trunc->getDestTy()->getScalarSizeInBits() <= TypeSize;
522 LLVM_DEBUG(dbgs() << "ARM CGP: No, unsupported cast.\n");
525 } else if (!isa<BinaryOperator>(V)) {
526 LLVM_DEBUG(dbgs() << "ARM CGP: No, not a binary operator.\n");
530 bool res = !isSigned(V);
532 LLVM_DEBUG(dbgs() << "ARM CGP: No, it's a signed instruction.\n");
536 /// Check that the type of V would be promoted and that the original type is
537 /// smaller than the targeted promoted type. Check that we're not trying to
538 /// promote something larger than our base 'TypeSize' type.
539 bool ARMCodeGenPrepare::isLegalToPromote(Value *V) {
540 if (!isSupportedType(V))
544 if (auto *Ld = dyn_cast<LoadInst>(V)) {
545 auto *PtrTy = cast<PointerType>(Ld->getPointerOperandType());
546 VSize = PtrTy->getElementType()->getPrimitiveSizeInBits();
547 } else if (auto *ZExt = dyn_cast<ZExtInst>(V)) {
548 VSize = ZExt->getOperand(0)->getType()->getPrimitiveSizeInBits();
550 VSize = V->getType()->getPrimitiveSizeInBits();
553 if (VSize > TypeSize)
556 if (isPromotedResultSafe(V))
559 if (auto *I = dyn_cast<Instruction>(V))
560 return isNarrowInstSupported(I);
565 bool ARMCodeGenPrepare::TryToPromote(Value *V) {
566 OrigTy = V->getType();
567 TypeSize = OrigTy->getPrimitiveSizeInBits();
569 if (!isSupportedValue(V) || !shouldPromote(V) || !isLegalToPromote(V))
572 LLVM_DEBUG(dbgs() << "ARM CGP: TryToPromote: " << *V << "\n");
574 SetVector<Value*> WorkList;
575 SmallPtrSet<Value*, 8> Leaves;
576 SmallPtrSet<Instruction*, 4> Roots;
578 SmallPtrSet<Value*, 16> CurrentVisited;
579 CurrentVisited.clear();
581 // Return true if the given value can, or has been, visited. Add V to the
582 // worklist if needed.
583 auto AddLegalInst = [&](Value *V) {
584 if (CurrentVisited.count(V))
587 if (!isSupportedValue(V) || (shouldPromote(V) && !isLegalToPromote(V))) {
588 LLVM_DEBUG(dbgs() << "ARM CGP: Can't handle: " << *V << "\n");
596 // Iterate through, and add to, a tree of operands and users in the use-def.
597 while (!WorkList.empty()) {
598 Value *V = WorkList.back();
600 if (CurrentVisited.count(V))
603 if (!isa<Instruction>(V) && !isSource(V))
606 // If we've already visited this value from somewhere, bail now because
607 // the tree has already been explored.
608 // TODO: This could limit the transform, ie if we try to promote something
609 // from an i8 and fail first, before trying an i16.
610 if (AllVisited.count(V)) {
611 LLVM_DEBUG(dbgs() << "ARM CGP: Already visited this: " << *V << "\n");
615 CurrentVisited.insert(V);
616 AllVisited.insert(V);
618 // Calls can be both sources and sinks.
620 Roots.insert(cast<Instruction>(V));
623 else if (auto *I = dyn_cast<Instruction>(V)) {
624 // Visit operands of any instruction visited.
625 for (auto &U : I->operands()) {
626 if (!AddLegalInst(U))
631 // Don't visit users of a node which isn't going to be mutated unless its a
633 if (isSource(V) || shouldPromote(V)) {
634 for (Use &U : V->uses()) {
635 if (!AddLegalInst(U.getUser()))
641 unsigned NumToPromote = 0;
643 for (auto *V : CurrentVisited) {
644 // Truncs will cause a uxt and no zeroext arguments will often require
646 if (isa<TruncInst>(V))
648 else if (auto *Arg = dyn_cast<Argument>(V)) {
649 if (!Arg->hasZExtAttr())
653 // Mem ops can automatically be extended/truncated and non-instructions
654 // don't need anything done.
655 if (Leaves.count(V) || isa<StoreInst>(V) || !isa<Instruction>(V))
658 // Will need to truncate calls args and returns.
659 if (Roots.count(cast<Instruction>(V))) {
664 if (shouldPromote(V))
668 LLVM_DEBUG(dbgs() << "ARM CGP: Visited nodes:\n";
669 for (auto *I : CurrentVisited)
672 LLVM_DEBUG(dbgs() << "ARM CGP: Cost of promoting " << NumToPromote
673 << " instructions = " << Cost << "\n");
674 if (Cost > NumToPromote || (NumToPromote == 0))
677 Promoter->Mutate(OrigTy, CurrentVisited, Leaves, Roots);
681 bool ARMCodeGenPrepare::doInitialization(Module &M) {
682 Promoter = new IRPromoter(&M);
686 bool ARMCodeGenPrepare::runOnFunction(Function &F) {
687 if (skipFunction(F) || DisableCGP)
690 auto *TPC = &getAnalysis<TargetPassConfig>();
694 const TargetMachine &TM = TPC->getTM<TargetMachine>();
695 ST = &TM.getSubtarget<ARMSubtarget>(F);
696 bool MadeChange = false;
697 LLVM_DEBUG(dbgs() << "ARM CGP: Running on " << F.getName() << "\n");
699 // Search up from icmps to try to promote their operands.
700 for (BasicBlock &BB : F) {
701 auto &Insts = BB.getInstList();
702 for (auto &I : Insts) {
703 if (AllVisited.count(&I))
706 if (isa<ICmpInst>(I)) {
707 auto &CI = cast<ICmpInst>(I);
709 // Skip signed or pointer compares
710 if (CI.isSigned() || !isa<IntegerType>(CI.getOperand(0)->getType()))
713 LLVM_DEBUG(dbgs() << "ARM CGP: Searching from: " << CI << "\n");
714 for (auto &Op : CI.operands()) {
715 if (auto *I = dyn_cast<Instruction>(Op)) {
716 if (isa<ZExtInst>(I))
717 MadeChange |= TryToPromote(I->getOperand(0));
719 MadeChange |= TryToPromote(I);
725 LLVM_DEBUG(if (verifyFunction(F, &dbgs())) {
727 report_fatal_error("Broken function after type promotion");
731 LLVM_DEBUG(dbgs() << "After ARMCodeGenPrepare: " << F << "\n");
736 bool ARMCodeGenPrepare::doFinalization(Module &M) {
741 INITIALIZE_PASS_BEGIN(ARMCodeGenPrepare, DEBUG_TYPE,
742 "ARM IR optimizations", false, false)
743 INITIALIZE_PASS_END(ARMCodeGenPrepare, DEBUG_TYPE, "ARM IR optimizations",
746 char ARMCodeGenPrepare::ID = 0;
748 FunctionPass *llvm::createARMCodeGenPreparePass() {
749 return new ARMCodeGenPrepare();