1 //===- ARMParallelDSP.cpp - Parallel DSP Pass -----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Armv6 introduced instructions to perform 32-bit SIMD operations. The
11 /// purpose of this pass is do some IR pattern matching to create ACLE
12 /// DSP intrinsics, which map on these 32-bit SIMD operations.
13 /// This pass runs only when unaligned accesses is supported/enabled.
15 //===----------------------------------------------------------------------===//
18 #include "ARMSubtarget.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/LoopAccessAnalysis.h"
23 #include "llvm/Analysis/OrderedBasicBlock.h"
24 #include "llvm/CodeGen/TargetPassConfig.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicsARM.h"
27 #include "llvm/IR/NoFolder.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/Pass.h"
30 #include "llvm/PassRegistry.h"
31 #include "llvm/PassSupport.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Transforms/Scalar.h"
34 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 using namespace PatternMatch;
39 #define DEBUG_TYPE "arm-parallel-dsp"
41 STATISTIC(NumSMLAD , "Number of smlad instructions generated");
44 DisableParallelDSP("disable-arm-parallel-dsp", cl::Hidden, cl::init(false),
45 cl::desc("Disable the ARM Parallel DSP pass"));
47 static cl::opt<unsigned>
48 NumLoadLimit("arm-parallel-dsp-load-limit", cl::Hidden, cl::init(16),
49 cl::desc("Limit the number of loads analysed"));
55 using MulCandList = SmallVector<std::unique_ptr<MulCandidate>, 8>;
56 using MemInstList = SmallVectorImpl<LoadInst*>;
57 using MulPairList = SmallVector<std::pair<MulCandidate*, MulCandidate*>, 8>;
59 // 'MulCandidate' holds the multiplication instructions that are candidates
60 // for parallel execution.
65 bool Exchange = false;
68 SmallVector<LoadInst*, 2> VecLd; // Container for loads to widen.
70 MulCandidate(Instruction *I, Value *lhs, Value *rhs) :
71 Root(I), LHS(lhs), RHS(rhs) { }
73 bool HasTwoLoadInputs() const {
74 return isa<LoadInst>(LHS) && isa<LoadInst>(RHS);
77 LoadInst *getBaseLoad() const {
82 /// Represent a sequence of multiply-accumulate operations with the aim to
83 /// perform the multiplications in parallel.
85 Instruction *Root = nullptr;
89 SetVector<Instruction*> Adds;
94 Reduction (Instruction *Add) : Root(Add) { }
96 /// Record an Add instruction that is a part of the this reduction.
97 void InsertAdd(Instruction *I) { Adds.insert(I); }
99 /// Create MulCandidates, each rooted at a Mul instruction, that is a part
100 /// of this reduction.
102 auto GetMulOperand = [](Value *V) -> Instruction* {
103 if (auto *SExt = dyn_cast<SExtInst>(V)) {
104 if (auto *I = dyn_cast<Instruction>(SExt->getOperand(0)))
105 if (I->getOpcode() == Instruction::Mul)
107 } else if (auto *I = dyn_cast<Instruction>(V)) {
108 if (I->getOpcode() == Instruction::Mul)
114 auto InsertMul = [this](Instruction *I) {
115 Value *LHS = cast<Instruction>(I->getOperand(0))->getOperand(0);
116 Value *RHS = cast<Instruction>(I->getOperand(1))->getOperand(0);
117 Muls.push_back(std::make_unique<MulCandidate>(I, LHS, RHS));
120 for (auto *Add : Adds) {
123 if (auto *Mul = GetMulOperand(Add->getOperand(0)))
125 if (auto *Mul = GetMulOperand(Add->getOperand(1)))
130 /// Add the incoming accumulator value, returns true if a value had not
131 /// already been added. Returning false signals to the user that this
132 /// reduction already has a value to initialise the accumulator.
133 bool InsertAcc(Value *V) {
140 /// Set two MulCandidates, rooted at muls, that can be executed as a single
141 /// parallel operation.
142 void AddMulPair(MulCandidate *Mul0, MulCandidate *Mul1,
143 bool Exchange = false) {
144 LLVM_DEBUG(dbgs() << "Pairing:\n"
145 << *Mul0->Root << "\n"
146 << *Mul1->Root << "\n");
150 Mul1->Exchange = true;
151 MulPairs.push_back(std::make_pair(Mul0, Mul1));
154 /// Return true if enough mul operations are found that can be executed in
156 bool CreateParallelPairs();
158 /// Return the add instruction which is the root of the reduction.
159 Instruction *getRoot() { return Root; }
161 bool is64Bit() const { return Root->getType()->isIntegerTy(64); }
163 Type *getType() const { return Root->getType(); }
165 /// Return the incoming value to be accumulated. This maybe null.
166 Value *getAccumulator() { return Acc; }
168 /// Return the set of adds that comprise the reduction.
169 SetVector<Instruction*> &getAdds() { return Adds; }
171 /// Return the MulCandidate, rooted at mul instruction, that comprise the
173 MulCandList &getMuls() { return Muls; }
175 /// Return the MulCandidate, rooted at mul instructions, that have been
176 /// paired for parallel execution.
177 MulPairList &getMulPairs() { return MulPairs; }
179 /// To finalise, replace the uses of the root with the intrinsic call.
180 void UpdateRoot(Instruction *SMLAD) {
181 Root->replaceAllUsesWith(SMLAD);
185 LLVM_DEBUG(dbgs() << "Reduction:\n";
186 for (auto *Add : Adds)
187 LLVM_DEBUG(dbgs() << *Add << "\n");
188 for (auto &Mul : Muls)
189 LLVM_DEBUG(dbgs() << *Mul->Root << "\n"
190 << " " << *Mul->LHS << "\n"
191 << " " << *Mul->RHS << "\n");
192 LLVM_DEBUG(if (Acc) dbgs() << "Acc in: " << *Acc << "\n")
198 LoadInst *NewLd = nullptr;
199 SmallVector<LoadInst*, 4> Loads;
202 WidenedLoad(SmallVectorImpl<LoadInst*> &Lds, LoadInst *Wide)
207 LoadInst *getLoad() {
212 class ARMParallelDSP : public FunctionPass {
215 TargetLibraryInfo *TLI;
217 const DataLayout *DL;
219 std::map<LoadInst*, LoadInst*> LoadPairs;
220 SmallPtrSet<LoadInst*, 4> OffsetLoads;
221 std::map<LoadInst*, std::unique_ptr<WidenedLoad>> WideLoads;
224 bool IsNarrowSequence(Value *V);
225 bool Search(Value *V, BasicBlock *BB, Reduction &R);
226 bool RecordMemoryOps(BasicBlock *BB);
227 void InsertParallelMACs(Reduction &Reduction);
228 bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, MemInstList &VecMem);
229 LoadInst* CreateWideLoad(MemInstList &Loads, IntegerType *LoadTy);
230 bool CreateParallelPairs(Reduction &R);
232 /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
233 /// Dual performs two signed 16x16-bit multiplications. It adds the
234 /// products to a 32-bit accumulate operand. Optionally, the instruction can
235 /// exchange the halfwords of the second operand before performing the
237 bool MatchSMLAD(Function &F);
242 ARMParallelDSP() : FunctionPass(ID) { }
244 void getAnalysisUsage(AnalysisUsage &AU) const override {
245 FunctionPass::getAnalysisUsage(AU);
246 AU.addRequired<AssumptionCacheTracker>();
247 AU.addRequired<ScalarEvolutionWrapperPass>();
248 AU.addRequired<AAResultsWrapperPass>();
249 AU.addRequired<TargetLibraryInfoWrapperPass>();
250 AU.addRequired<DominatorTreeWrapperPass>();
251 AU.addRequired<TargetPassConfig>();
252 AU.addPreserved<ScalarEvolutionWrapperPass>();
253 AU.addPreserved<GlobalsAAWrapperPass>();
254 AU.setPreservesCFG();
257 bool runOnFunction(Function &F) override {
258 if (DisableParallelDSP)
263 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
264 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
265 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
266 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
267 auto &TPC = getAnalysis<TargetPassConfig>();
270 DL = &M->getDataLayout();
272 auto &TM = TPC.getTM<TargetMachine>();
273 auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
275 if (!ST->allowsUnalignedMem()) {
276 LLVM_DEBUG(dbgs() << "Unaligned memory access not supported: not "
277 "running pass ARMParallelDSP\n");
282 LLVM_DEBUG(dbgs() << "DSP extension not enabled: not running pass "
287 if (!ST->isLittle()) {
288 LLVM_DEBUG(dbgs() << "Only supporting little endian: not running pass "
289 << "ARMParallelDSP\n");
293 LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
294 LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
296 bool Changes = MatchSMLAD(F);
302 template<typename MemInst>
303 static bool AreSequentialAccesses(MemInst *MemOp0, MemInst *MemOp1,
304 const DataLayout &DL, ScalarEvolution &SE) {
305 if (isConsecutiveAccess(MemOp0, MemOp1, DL, SE))
310 bool ARMParallelDSP::AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1,
311 MemInstList &VecMem) {
315 if (!LoadPairs.count(Ld0) || LoadPairs[Ld0] != Ld1)
318 LLVM_DEBUG(dbgs() << "Loads are sequential and valid:\n";
319 dbgs() << "Ld0:"; Ld0->dump();
320 dbgs() << "Ld1:"; Ld1->dump();
324 VecMem.push_back(Ld0);
325 VecMem.push_back(Ld1);
329 // MaxBitwidth: the maximum supported bitwidth of the elements in the DSP
330 // instructions, which is set to 16. So here we should collect all i8 and i16
331 // narrow operations.
332 // TODO: we currently only collect i16, and will support i8 later, so that's
333 // why we check that types are equal to MaxBitWidth, and not <= MaxBitWidth.
334 template<unsigned MaxBitWidth>
335 bool ARMParallelDSP::IsNarrowSequence(Value *V) {
336 if (auto *SExt = dyn_cast<SExtInst>(V)) {
337 if (SExt->getSrcTy()->getIntegerBitWidth() != MaxBitWidth)
340 if (auto *Ld = dyn_cast<LoadInst>(SExt->getOperand(0))) {
341 // Check that this load could be paired.
342 return LoadPairs.count(Ld) || OffsetLoads.count(Ld);
348 /// Iterate through the block and record base, offset pairs of loads which can
349 /// be widened into a single load.
350 bool ARMParallelDSP::RecordMemoryOps(BasicBlock *BB) {
351 SmallVector<LoadInst*, 8> Loads;
352 SmallVector<Instruction*, 8> Writes;
355 OrderedBasicBlock OrderedBB(BB);
357 // Collect loads and instruction that may write to memory. For now we only
358 // record loads which are simple, sign-extended and have a single user.
359 // TODO: Allow zero-extended loads.
360 for (auto &I : *BB) {
361 if (I.mayWriteToMemory())
362 Writes.push_back(&I);
363 auto *Ld = dyn_cast<LoadInst>(&I);
364 if (!Ld || !Ld->isSimple() ||
365 !Ld->hasOneUse() || !isa<SExtInst>(Ld->user_back()))
370 if (Loads.empty() || Loads.size() > NumLoadLimit)
373 using InstSet = std::set<Instruction*>;
374 using DepMap = std::map<Instruction*, InstSet>;
377 // Record any writes that may alias a load.
378 const auto Size = LocationSize::unknown();
379 for (auto Write : Writes) {
380 for (auto Read : Loads) {
381 MemoryLocation ReadLoc =
382 MemoryLocation(Read->getPointerOperand(), Size);
384 if (!isModOrRefSet(intersectModRef(AA->getModRefInfo(Write, ReadLoc),
385 ModRefInfo::ModRef)))
387 if (OrderedBB.dominates(Write, Read))
388 RAWDeps[Read].insert(Write);
392 // Check whether there's not a write between the two loads which would
393 // prevent them from being safely merged.
394 auto SafeToPair = [&](LoadInst *Base, LoadInst *Offset) {
395 LoadInst *Dominator = OrderedBB.dominates(Base, Offset) ? Base : Offset;
396 LoadInst *Dominated = OrderedBB.dominates(Base, Offset) ? Offset : Base;
398 if (RAWDeps.count(Dominated)) {
399 InstSet &WritesBefore = RAWDeps[Dominated];
401 for (auto Before : WritesBefore) {
402 // We can't move the second load backward, past a write, to merge
403 // with the first load.
404 if (OrderedBB.dominates(Dominator, Before))
411 // Record base, offset load pairs.
412 for (auto *Base : Loads) {
413 for (auto *Offset : Loads) {
414 if (Base == Offset || OffsetLoads.count(Offset))
417 if (AreSequentialAccesses<LoadInst>(Base, Offset, *DL, *SE) &&
418 SafeToPair(Base, Offset)) {
419 LoadPairs[Base] = Offset;
420 OffsetLoads.insert(Offset);
426 LLVM_DEBUG(if (!LoadPairs.empty()) {
427 dbgs() << "Consecutive load pairs:\n";
428 for (auto &MapIt : LoadPairs) {
429 LLVM_DEBUG(dbgs() << *MapIt.first << ", "
430 << *MapIt.second << "\n");
433 return LoadPairs.size() > 1;
436 // Search recursively back through the operands to find a tree of values that
437 // form a multiply-accumulate chain. The search records the Add and Mul
438 // instructions that form the reduction and allows us to find a single value
439 // to be used as the initial input to the accumlator.
440 bool ARMParallelDSP::Search(Value *V, BasicBlock *BB, Reduction &R) {
441 // If we find a non-instruction, try to use it as the initial accumulator
442 // value. This may have already been found during the search in which case
443 // this function will return false, signaling a search fail.
444 auto *I = dyn_cast<Instruction>(V);
446 return R.InsertAcc(V);
448 if (I->getParent() != BB)
451 switch (I->getOpcode()) {
454 case Instruction::PHI:
455 // Could be the accumulator value.
456 return R.InsertAcc(V);
457 case Instruction::Add: {
458 // Adds should be adding together two muls, or another add and a mul to
459 // be within the mac chain. One of the operands may also be the
460 // accumulator value at which point we should stop searching.
462 Value *LHS = I->getOperand(0);
463 Value *RHS = I->getOperand(1);
464 bool ValidLHS = Search(LHS, BB, R);
465 bool ValidRHS = Search(RHS, BB, R);
467 if (ValidLHS && ValidRHS)
470 return R.InsertAcc(I);
472 case Instruction::Mul: {
473 Value *MulOp0 = I->getOperand(0);
474 Value *MulOp1 = I->getOperand(1);
475 return IsNarrowSequence<16>(MulOp0) && IsNarrowSequence<16>(MulOp1);
477 case Instruction::SExt:
478 return Search(I->getOperand(0), BB, R);
483 // The pass needs to identify integer add/sub reductions of 16-bit vector
486 // 1) we first need to find integer add then look for this pattern:
490 // sext0 = sext i16 %ld0 to i32
492 // sext1 = sext i16 %ld1 to i32
493 // mul0 = mul %sext0, %sext1
495 // sext2 = sext i16 %ld2 to i32
497 // sext3 = sext i16 %ld3 to i32
498 // mul1 = mul i32 %sext2, %sext3
499 // add0 = add i32 %mul0, %acc0
500 // acc1 = add i32 %add0, %mul1
502 // Which can be selected to:
506 // smlad r2, r0, r1, r2
508 // If constants are used instead of loads, these will need to be hoisted
509 // out and into a register.
511 // If loop invariants are used instead of loads, these need to be packed
512 // before the loop begins.
514 bool ARMParallelDSP::MatchSMLAD(Function &F) {
515 bool Changed = false;
518 SmallPtrSet<Instruction*, 4> AllAdds;
519 if (!RecordMemoryOps(&BB))
522 for (Instruction &I : reverse(BB)) {
523 if (I.getOpcode() != Instruction::Add)
526 if (AllAdds.count(&I))
529 const auto *Ty = I.getType();
530 if (!Ty->isIntegerTy(32) && !Ty->isIntegerTy(64))
534 if (!Search(&I, &BB, R))
538 LLVM_DEBUG(dbgs() << "After search, Reduction:\n"; R.dump());
540 if (!CreateParallelPairs(R))
543 InsertParallelMACs(R);
545 AllAdds.insert(R.getAdds().begin(), R.getAdds().end());
552 bool ARMParallelDSP::CreateParallelPairs(Reduction &R) {
554 // Not enough mul operations to make a pair.
555 if (R.getMuls().size() < 2)
558 // Check that the muls operate directly upon sign extended loads.
559 for (auto &MulCand : R.getMuls()) {
560 if (!MulCand->HasTwoLoadInputs())
564 auto CanPair = [&](Reduction &R, MulCandidate *PMul0, MulCandidate *PMul1) {
565 // The first elements of each vector should be loads with sexts. If we
566 // find that its two pairs of consecutive loads, then these can be
567 // transformed into two wider loads and the users can be replaced with
569 auto Ld0 = static_cast<LoadInst*>(PMul0->LHS);
570 auto Ld1 = static_cast<LoadInst*>(PMul1->LHS);
571 auto Ld2 = static_cast<LoadInst*>(PMul0->RHS);
572 auto Ld3 = static_cast<LoadInst*>(PMul1->RHS);
574 if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd)) {
575 if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
576 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
577 R.AddMulPair(PMul0, PMul1);
579 } else if (AreSequentialLoads(Ld3, Ld2, PMul1->VecLd)) {
580 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
581 LLVM_DEBUG(dbgs() << " exchanging Ld2 and Ld3\n");
582 R.AddMulPair(PMul0, PMul1, true);
585 } else if (AreSequentialLoads(Ld1, Ld0, PMul0->VecLd) &&
586 AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
587 LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
588 LLVM_DEBUG(dbgs() << " exchanging Ld0 and Ld1\n");
589 LLVM_DEBUG(dbgs() << " and swapping muls\n");
590 // Only the second operand can be exchanged, so swap the muls.
591 R.AddMulPair(PMul1, PMul0, true);
597 MulCandList &Muls = R.getMuls();
598 const unsigned Elems = Muls.size();
599 for (unsigned i = 0; i < Elems; ++i) {
600 MulCandidate *PMul0 = static_cast<MulCandidate*>(Muls[i].get());
604 for (unsigned j = 0; j < Elems; ++j) {
608 MulCandidate *PMul1 = static_cast<MulCandidate*>(Muls[j].get());
612 const Instruction *Mul0 = PMul0->Root;
613 const Instruction *Mul1 = PMul1->Root;
617 assert(PMul0 != PMul1 && "expected different chains");
619 if (CanPair(R, PMul0, PMul1))
623 return !R.getMulPairs().empty();
626 void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
628 auto CreateSMLAD = [&](LoadInst* WideLd0, LoadInst *WideLd1,
629 Value *Acc, bool Exchange,
630 Instruction *InsertAfter) {
631 // Replace the reduction chain with an intrinsic call
633 Value* Args[] = { WideLd0, WideLd1, Acc };
634 Function *SMLAD = nullptr;
636 SMLAD = Acc->getType()->isIntegerTy(32) ?
637 Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
638 Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
640 SMLAD = Acc->getType()->isIntegerTy(32) ?
641 Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
642 Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
644 IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
645 BasicBlock::iterator(InsertAfter));
646 Instruction *Call = Builder.CreateCall(SMLAD, Args);
651 // Return the instruction after the dominated instruction.
652 auto GetInsertPoint = [this](Value *A, Value *B) {
653 assert((isa<Instruction>(A) || isa<Instruction>(B)) &&
654 "expected at least one instruction");
657 if (!isa<Instruction>(A))
659 else if (!isa<Instruction>(B))
662 V = DT->dominates(cast<Instruction>(A), cast<Instruction>(B)) ? B : A;
664 return &*++BasicBlock::iterator(cast<Instruction>(V));
667 Value *Acc = R.getAccumulator();
669 // For any muls that were discovered but not paired, accumulate their values
671 IRBuilder<NoFolder> Builder(R.getRoot()->getParent());
672 MulCandList &MulCands = R.getMuls();
673 for (auto &MulCand : MulCands) {
677 Instruction *Mul = cast<Instruction>(MulCand->Root);
678 LLVM_DEBUG(dbgs() << "Accumulating unpaired mul: " << *Mul << "\n");
680 if (R.getType() != Mul->getType()) {
681 assert(R.is64Bit() && "expected 64-bit result");
682 Builder.SetInsertPoint(&*++BasicBlock::iterator(Mul));
683 Mul = cast<Instruction>(Builder.CreateSExt(Mul, R.getRoot()->getType()));
691 // If Acc is the original incoming value to the reduction, it could be a
692 // phi. But the phi will dominate Mul, meaning that Mul will be the
694 Builder.SetInsertPoint(GetInsertPoint(Mul, Acc));
695 Acc = Builder.CreateAdd(Mul, Acc);
700 ConstantInt::get(IntegerType::get(M->getContext(), 64), 0) :
701 ConstantInt::get(IntegerType::get(M->getContext(), 32), 0);
702 } else if (Acc->getType() != R.getType()) {
703 Builder.SetInsertPoint(R.getRoot());
704 Acc = Builder.CreateSExt(Acc, R.getType());
707 // Roughly sort the mul pairs in their program order.
708 OrderedBasicBlock OrderedBB(R.getRoot()->getParent());
709 llvm::sort(R.getMulPairs(), [&OrderedBB](auto &PairA, auto &PairB) {
710 const Instruction *A = PairA.first->Root;
711 const Instruction *B = PairB.first->Root;
712 return OrderedBB.dominates(A, B);
715 IntegerType *Ty = IntegerType::get(M->getContext(), 32);
716 for (auto &Pair : R.getMulPairs()) {
717 MulCandidate *LHSMul = Pair.first;
718 MulCandidate *RHSMul = Pair.second;
719 LoadInst *BaseLHS = LHSMul->getBaseLoad();
720 LoadInst *BaseRHS = RHSMul->getBaseLoad();
721 LoadInst *WideLHS = WideLoads.count(BaseLHS) ?
722 WideLoads[BaseLHS]->getLoad() : CreateWideLoad(LHSMul->VecLd, Ty);
723 LoadInst *WideRHS = WideLoads.count(BaseRHS) ?
724 WideLoads[BaseRHS]->getLoad() : CreateWideLoad(RHSMul->VecLd, Ty);
726 Instruction *InsertAfter = GetInsertPoint(WideLHS, WideRHS);
727 InsertAfter = GetInsertPoint(InsertAfter, Acc);
728 Acc = CreateSMLAD(WideLHS, WideRHS, Acc, RHSMul->Exchange, InsertAfter);
730 R.UpdateRoot(cast<Instruction>(Acc));
733 LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
734 IntegerType *LoadTy) {
735 assert(Loads.size() == 2 && "currently only support widening two loads");
737 LoadInst *Base = Loads[0];
738 LoadInst *Offset = Loads[1];
740 Instruction *BaseSExt = dyn_cast<SExtInst>(Base->user_back());
741 Instruction *OffsetSExt = dyn_cast<SExtInst>(Offset->user_back());
743 assert((BaseSExt && OffsetSExt)
744 && "Loads should have a single, extending, user");
746 std::function<void(Value*, Value*)> MoveBefore =
747 [&](Value *A, Value *B) -> void {
748 if (!isa<Instruction>(A) || !isa<Instruction>(B))
751 auto *Source = cast<Instruction>(A);
752 auto *Sink = cast<Instruction>(B);
754 if (DT->dominates(Source, Sink) ||
755 Source->getParent() != Sink->getParent() ||
756 isa<PHINode>(Source) || isa<PHINode>(Sink))
759 Source->moveBefore(Sink);
760 for (auto &Op : Source->operands())
761 MoveBefore(Op, Source);
764 // Insert the load at the point of the original dominating load.
765 LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
766 IRBuilder<NoFolder> IRB(DomLoad->getParent(),
767 ++BasicBlock::iterator(DomLoad));
769 // Bitcast the pointer to a wider type and create the wide load, while making
770 // sure to maintain the original alignment as this prevents ldrd from being
771 // generated when it could be illegal due to memory alignment.
772 const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
773 Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
774 LoadTy->getPointerTo(AddrSpace));
775 LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr,
776 Base->getAlignment());
778 // Make sure everything is in the correct order in the basic block.
779 MoveBefore(Base->getPointerOperand(), VecPtr);
780 MoveBefore(VecPtr, WideLoad);
782 // From the wide load, create two values that equal the original two loads.
783 // Loads[0] needs trunc while Loads[1] needs a lshr and trunc.
784 // TODO: Support big-endian as well.
785 Value *Bottom = IRB.CreateTrunc(WideLoad, Base->getType());
786 Value *NewBaseSExt = IRB.CreateSExt(Bottom, BaseSExt->getType());
787 BaseSExt->replaceAllUsesWith(NewBaseSExt);
789 IntegerType *OffsetTy = cast<IntegerType>(Offset->getType());
790 Value *ShiftVal = ConstantInt::get(LoadTy, OffsetTy->getBitWidth());
791 Value *Top = IRB.CreateLShr(WideLoad, ShiftVal);
792 Value *Trunc = IRB.CreateTrunc(Top, OffsetTy);
793 Value *NewOffsetSExt = IRB.CreateSExt(Trunc, OffsetSExt->getType());
794 OffsetSExt->replaceAllUsesWith(NewOffsetSExt);
796 LLVM_DEBUG(dbgs() << "From Base and Offset:\n"
797 << *Base << "\n" << *Offset << "\n"
798 << "Created Wide Load:\n"
801 << *NewBaseSExt << "\n"
804 << *NewOffsetSExt << "\n");
805 WideLoads.emplace(std::make_pair(Base,
806 std::make_unique<WidenedLoad>(Loads, WideLoad)));
810 Pass *llvm::createARMParallelDSPPass() {
811 return new ARMParallelDSP();
814 char ARMParallelDSP::ID = 0;
816 INITIALIZE_PASS_BEGIN(ARMParallelDSP, "arm-parallel-dsp",
817 "Transform functions to use DSP intrinsics", false, false)
818 INITIALIZE_PASS_END(ARMParallelDSP, "arm-parallel-dsp",
819 "Transform functions to use DSP intrinsics", false, false)