1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
26 #include "llvm/CodeGen/LowLevelType.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/StackProtector.h"
35 #include "llvm/CodeGen/TargetFrameLowering.h"
36 #include "llvm/CodeGen/TargetInstrInfo.h"
37 #include "llvm/CodeGen/TargetLowering.h"
38 #include "llvm/CodeGen/TargetPassConfig.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfo.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GetElementPtrTypeIterator.h"
50 #include "llvm/IR/InlineAsm.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/InitializePasses.h"
60 #include "llvm/MC/MCContext.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/CodeGen.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/LowLevelTypeImpl.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Target/TargetIntrinsicInfo.h"
70 #include "llvm/Target/TargetMachine.h"
79 #define DEBUG_TYPE "irtranslator"
84 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
85 cl::desc("Should enable CSE in irtranslator"),
86 cl::Optional, cl::init(false));
87 char IRTranslator::ID = 0;
89 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
91 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
92 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
93 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
96 static void reportTranslationError(MachineFunction &MF,
97 const TargetPassConfig &TPC,
98 OptimizationRemarkEmitter &ORE,
99 OptimizationRemarkMissed &R) {
100 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
102 // Print the function name explicitly if we don't have a debug location (which
103 // makes the diagnostic less useful) or if we're going to emit a raw error.
104 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
105 R << (" (in function: " + MF.getName() + ")").str();
107 if (TPC.isGlobalISelAbortEnabled())
108 report_fatal_error(R.getMsg());
113 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
117 /// Verify that every instruction created has the same DILocation as the
118 /// instruction being translated.
119 class DILocationVerifier : public GISelChangeObserver {
120 const Instruction *CurrInst = nullptr;
123 DILocationVerifier() = default;
124 ~DILocationVerifier() = default;
126 const Instruction *getCurrentInst() const { return CurrInst; }
127 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
129 void erasingInstr(MachineInstr &MI) override {}
130 void changingInstr(MachineInstr &MI) override {}
131 void changedInstr(MachineInstr &MI) override {}
133 void createdInstr(MachineInstr &MI) override {
134 assert(getCurrentInst() && "Inserted instruction without a current MI");
136 // Only print the check message if we're actually checking it.
138 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
139 << " was copied to " << MI);
141 // We allow insts in the entry block to have a debug loc line of 0 because
142 // they could have originated from constants, and we don't want a jumpy
144 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
145 MI.getDebugLoc().getLine() == 0) &&
146 "Line info was not transferred to all instructions");
150 #endif // ifndef NDEBUG
153 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
154 AU.addRequired<StackProtector>();
155 AU.addRequired<TargetPassConfig>();
156 AU.addRequired<GISelCSEAnalysisWrapperPass>();
157 getSelectionDAGFallbackAnalysisUsage(AU);
158 MachineFunctionPass::getAnalysisUsage(AU);
161 IRTranslator::ValueToVRegInfo::VRegListT &
162 IRTranslator::allocateVRegs(const Value &Val) {
163 assert(!VMap.contains(Val) && "Value already allocated in VMap");
164 auto *Regs = VMap.getVRegs(Val);
165 auto *Offsets = VMap.getOffsets(Val);
166 SmallVector<LLT, 4> SplitTys;
167 computeValueLLTs(*DL, *Val.getType(), SplitTys,
168 Offsets->empty() ? Offsets : nullptr);
169 for (unsigned i = 0; i < SplitTys.size(); ++i)
174 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
175 auto VRegsIt = VMap.findVRegs(Val);
176 if (VRegsIt != VMap.vregs_end())
177 return *VRegsIt->second;
179 if (Val.getType()->isVoidTy())
180 return *VMap.getVRegs(Val);
182 // Create entry for this type.
183 auto *VRegs = VMap.getVRegs(Val);
184 auto *Offsets = VMap.getOffsets(Val);
186 assert(Val.getType()->isSized() &&
187 "Don't know how to create an empty vreg");
189 SmallVector<LLT, 4> SplitTys;
190 computeValueLLTs(*DL, *Val.getType(), SplitTys,
191 Offsets->empty() ? Offsets : nullptr);
193 if (!isa<Constant>(Val)) {
194 for (auto Ty : SplitTys)
195 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
199 if (Val.getType()->isAggregateType()) {
200 // UndefValue, ConstantAggregateZero
201 auto &C = cast<Constant>(Val);
203 while (auto Elt = C.getAggregateElement(Idx++)) {
204 auto EltRegs = getOrCreateVRegs(*Elt);
205 llvm::copy(EltRegs, std::back_inserter(*VRegs));
208 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
209 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
210 bool Success = translate(cast<Constant>(Val), VRegs->front());
212 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
213 MF->getFunction().getSubprogram(),
214 &MF->getFunction().getEntryBlock());
215 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
216 reportTranslationError(*MF, *TPC, *ORE, R);
224 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
225 if (FrameIndices.find(&AI) != FrameIndices.end())
226 return FrameIndices[&AI];
228 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
230 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
232 // Always allocate at least one byte.
233 Size = std::max<uint64_t>(Size, 1u);
235 int &FI = FrameIndices[&AI];
236 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
240 Align IRTranslator::getMemOpAlign(const Instruction &I) {
241 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
242 return SI->getAlign();
243 if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
244 return LI->getAlign();
246 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
247 // TODO(PR27168): This instruction has no alignment attribute, but unlike
248 // the default alignment for load/store, the default here is to assume
249 // it has NATURAL alignment, not DataLayout-specified alignment.
250 const DataLayout &DL = AI->getModule()->getDataLayout();
251 return Align(DL.getTypeStoreSize(AI->getCompareOperand()->getType()));
253 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
254 // TODO(PR27168): This instruction has no alignment attribute, but unlike
255 // the default alignment for load/store, the default here is to assume
256 // it has NATURAL alignment, not DataLayout-specified alignment.
257 const DataLayout &DL = AI->getModule()->getDataLayout();
258 return Align(DL.getTypeStoreSize(AI->getValOperand()->getType()));
260 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
261 R << "unable to translate memop: " << ore::NV("Opcode", &I);
262 reportTranslationError(*MF, *TPC, *ORE, R);
266 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
267 MachineBasicBlock *&MBB = BBToMBB[&BB];
268 assert(MBB && "BasicBlock was not encountered before");
272 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
273 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
274 MachinePreds[Edge].push_back(NewPred);
277 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
278 MachineIRBuilder &MIRBuilder) {
279 // Get or create a virtual register for each value.
280 // Unless the value is a Constant => loadimm cst?
281 // or inline constant each time?
282 // Creation of a virtual register needs to have a size.
283 Register Op0 = getOrCreateVReg(*U.getOperand(0));
284 Register Op1 = getOrCreateVReg(*U.getOperand(1));
285 Register Res = getOrCreateVReg(U);
287 if (isa<Instruction>(U)) {
288 const Instruction &I = cast<Instruction>(U);
289 Flags = MachineInstr::copyFlagsFromInstruction(I);
292 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
296 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
297 // -0.0 - X --> G_FNEG
298 if (isa<Constant>(U.getOperand(0)) &&
299 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
300 Register Op1 = getOrCreateVReg(*U.getOperand(1));
301 Register Res = getOrCreateVReg(U);
303 if (isa<Instruction>(U)) {
304 const Instruction &I = cast<Instruction>(U);
305 Flags = MachineInstr::copyFlagsFromInstruction(I);
307 // Negate the last operand of the FSUB
308 MIRBuilder.buildFNeg(Res, Op1, Flags);
311 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
314 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
315 Register Op0 = getOrCreateVReg(*U.getOperand(0));
316 Register Res = getOrCreateVReg(U);
318 if (isa<Instruction>(U)) {
319 const Instruction &I = cast<Instruction>(U);
320 Flags = MachineInstr::copyFlagsFromInstruction(I);
322 MIRBuilder.buildFNeg(Res, Op0, Flags);
326 bool IRTranslator::translateCompare(const User &U,
327 MachineIRBuilder &MIRBuilder) {
328 auto *CI = dyn_cast<CmpInst>(&U);
329 Register Op0 = getOrCreateVReg(*U.getOperand(0));
330 Register Op1 = getOrCreateVReg(*U.getOperand(1));
331 Register Res = getOrCreateVReg(U);
332 CmpInst::Predicate Pred =
333 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
334 cast<ConstantExpr>(U).getPredicate());
335 if (CmpInst::isIntPredicate(Pred))
336 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
337 else if (Pred == CmpInst::FCMP_FALSE)
338 MIRBuilder.buildCopy(
339 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
340 else if (Pred == CmpInst::FCMP_TRUE)
341 MIRBuilder.buildCopy(
342 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
344 assert(CI && "Instruction should be CmpInst");
345 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
346 MachineInstr::copyFlagsFromInstruction(*CI));
352 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
353 const ReturnInst &RI = cast<ReturnInst>(U);
354 const Value *Ret = RI.getReturnValue();
355 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
358 ArrayRef<Register> VRegs;
360 VRegs = getOrCreateVRegs(*Ret);
362 Register SwiftErrorVReg = 0;
363 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
364 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
365 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
368 // The target may mess up with the insertion point, but
369 // this is not important as a return is the last instruction
370 // of the block anyway.
371 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
374 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
375 const BranchInst &BrInst = cast<BranchInst>(U);
377 if (!BrInst.isUnconditional()) {
378 // We want a G_BRCOND to the true BB followed by an unconditional branch.
379 Register Tst = getOrCreateVReg(*BrInst.getCondition());
380 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
381 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
382 MIRBuilder.buildBrCond(Tst, TrueBB);
385 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
386 MachineBasicBlock &TgtBB = getMBB(BrTgt);
387 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
389 // If the unconditional target is the layout successor, fallthrough.
390 if (!CurBB.isLayoutSuccessor(&TgtBB))
391 MIRBuilder.buildBr(TgtBB);
394 for (const BasicBlock *Succ : successors(&BrInst))
395 CurBB.addSuccessor(&getMBB(*Succ));
399 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
400 MachineBasicBlock *Dst,
401 BranchProbability Prob) {
403 Src->addSuccessorWithoutProb(Dst);
406 if (Prob.isUnknown())
407 Prob = getEdgeProbability(Src, Dst);
408 Src->addSuccessor(Dst, Prob);
412 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
413 const MachineBasicBlock *Dst) const {
414 const BasicBlock *SrcBB = Src->getBasicBlock();
415 const BasicBlock *DstBB = Dst->getBasicBlock();
417 // If BPI is not available, set the default probability as 1 / N, where N is
418 // the number of successors.
419 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
420 return BranchProbability(1, SuccSize);
422 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
425 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
426 using namespace SwitchCG;
427 // Extract cases from the switch.
428 const SwitchInst &SI = cast<SwitchInst>(U);
429 BranchProbabilityInfo *BPI = FuncInfo.BPI;
430 CaseClusterVector Clusters;
431 Clusters.reserve(SI.getNumCases());
432 for (auto &I : SI.cases()) {
433 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
434 assert(Succ && "Could not find successor mbb in mapping");
435 const ConstantInt *CaseVal = I.getCaseValue();
436 BranchProbability Prob =
437 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
438 : BranchProbability(1, SI.getNumCases() + 1);
439 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
442 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
444 // Cluster adjacent cases with the same destination. We do this at all
445 // optimization levels because it's cheap to do and will make codegen faster
446 // if there are many clusters.
447 sortAndRangeify(Clusters);
449 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
451 // If there is only the default destination, jump there directly.
452 if (Clusters.empty()) {
453 SwitchMBB->addSuccessor(DefaultMBB);
454 if (DefaultMBB != SwitchMBB->getNextNode())
455 MIB.buildBr(*DefaultMBB);
459 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
462 dbgs() << "Case clusters: ";
463 for (const CaseCluster &C : Clusters) {
464 if (C.Kind == CC_JumpTable)
466 if (C.Kind == CC_BitTests)
469 C.Low->getValue().print(dbgs(), true);
470 if (C.Low != C.High) {
472 C.High->getValue().print(dbgs(), true);
479 assert(!Clusters.empty());
480 SwitchWorkList WorkList;
481 CaseClusterIt First = Clusters.begin();
482 CaseClusterIt Last = Clusters.end() - 1;
483 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
484 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
486 // FIXME: At the moment we don't do any splitting optimizations here like
487 // SelectionDAG does, so this worklist only has one entry.
488 while (!WorkList.empty()) {
489 SwitchWorkListItem W = WorkList.back();
491 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
497 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
498 MachineBasicBlock *MBB) {
499 // Emit the code for the jump table
500 assert(JT.Reg != -1U && "Should lower JT Header first!");
501 MachineIRBuilder MIB(*MBB->getParent());
503 MIB.setDebugLoc(CurBuilder->getDebugLoc());
505 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
506 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
508 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
509 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
512 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
513 SwitchCG::JumpTableHeader &JTH,
514 MachineBasicBlock *HeaderBB) {
515 MachineIRBuilder MIB(*HeaderBB->getParent());
516 MIB.setMBB(*HeaderBB);
517 MIB.setDebugLoc(CurBuilder->getDebugLoc());
519 const Value &SValue = *JTH.SValue;
520 // Subtract the lowest switch case value from the value being switched on.
521 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
522 Register SwitchOpReg = getOrCreateVReg(SValue);
523 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
524 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
526 // This value may be smaller or larger than the target's pointer type, and
527 // therefore require extension or truncating.
528 Type *PtrIRTy = SValue.getType()->getPointerTo();
529 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
530 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
532 JT.Reg = Sub.getReg(0);
534 if (JTH.OmitRangeCheck) {
535 if (JT.MBB != HeaderBB->getNextNode())
536 MIB.buildBr(*JT.MBB);
540 // Emit the range check for the jump table, and branch to the default block
541 // for the switch statement if the value being switched on exceeds the
542 // largest case in the switch.
543 auto Cst = getOrCreateVReg(
544 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
545 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
546 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
548 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
550 // Avoid emitting unnecessary branches to the next block.
551 if (JT.MBB != HeaderBB->getNextNode())
552 BrCond = MIB.buildBr(*JT.MBB);
556 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
557 MachineBasicBlock *SwitchBB,
558 MachineIRBuilder &MIB) {
559 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
561 DebugLoc OldDbgLoc = MIB.getDebugLoc();
562 MIB.setDebugLoc(CB.DbgLoc);
563 MIB.setMBB(*CB.ThisBB);
565 if (CB.PredInfo.NoCmp) {
566 // Branch or fall through to TrueBB.
567 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
568 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
570 CB.ThisBB->normalizeSuccProbs();
571 if (CB.TrueBB != CB.ThisBB->getNextNode())
572 MIB.buildBr(*CB.TrueBB);
573 MIB.setDebugLoc(OldDbgLoc);
577 const LLT i1Ty = LLT::scalar(1);
578 // Build the compare.
580 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
581 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
583 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
584 "Can only handle SLE ranges");
586 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
587 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
589 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
590 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
591 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
593 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
595 const LLT CmpTy = MRI->getType(CmpOpReg);
596 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
597 auto Diff = MIB.buildConstant(CmpTy, High - Low);
598 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
602 // Update successor info
603 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
605 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
608 // TrueBB and FalseBB are always different unless the incoming IR is
609 // degenerate. This only happens when running llc on weird IR.
610 if (CB.TrueBB != CB.FalseBB)
611 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
612 CB.ThisBB->normalizeSuccProbs();
614 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
615 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
618 // If the lhs block is the next block, invert the condition so that we can
619 // fall through to the lhs instead of the rhs block.
620 if (CB.TrueBB == CB.ThisBB->getNextNode()) {
621 std::swap(CB.TrueBB, CB.FalseBB);
622 auto True = MIB.buildConstant(i1Ty, 1);
623 Cond = MIB.buildXor(i1Ty, Cond, True).getReg(0);
626 MIB.buildBrCond(Cond, *CB.TrueBB);
627 MIB.buildBr(*CB.FalseBB);
628 MIB.setDebugLoc(OldDbgLoc);
631 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
632 MachineBasicBlock *SwitchMBB,
633 MachineBasicBlock *CurMBB,
634 MachineBasicBlock *DefaultMBB,
635 MachineIRBuilder &MIB,
636 MachineFunction::iterator BBI,
637 BranchProbability UnhandledProbs,
638 SwitchCG::CaseClusterIt I,
639 MachineBasicBlock *Fallthrough,
640 bool FallthroughUnreachable) {
641 using namespace SwitchCG;
642 MachineFunction *CurMF = SwitchMBB->getParent();
643 // FIXME: Optimize away range check based on pivot comparisons.
644 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
645 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
646 BranchProbability DefaultProb = W.DefaultProb;
648 // The jump block hasn't been inserted yet; insert it here.
649 MachineBasicBlock *JumpMBB = JT->MBB;
650 CurMF->insert(BBI, JumpMBB);
652 // Since the jump table block is separate from the switch block, we need
653 // to keep track of it as a machine predecessor to the default block,
654 // otherwise we lose the phi edges.
655 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
657 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
660 auto JumpProb = I->Prob;
661 auto FallthroughProb = UnhandledProbs;
663 // If the default statement is a target of the jump table, we evenly
664 // distribute the default probability to successors of CurMBB. Also
665 // update the probability on the edge from JumpMBB to Fallthrough.
666 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
667 SE = JumpMBB->succ_end();
669 if (*SI == DefaultMBB) {
670 JumpProb += DefaultProb / 2;
671 FallthroughProb -= DefaultProb / 2;
672 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
673 JumpMBB->normalizeSuccProbs();
675 // Also record edges from the jump table block to it's successors.
676 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
681 // Skip the range check if the fallthrough block is unreachable.
682 if (FallthroughUnreachable)
683 JTH->OmitRangeCheck = true;
685 if (!JTH->OmitRangeCheck)
686 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
687 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
688 CurMBB->normalizeSuccProbs();
690 // The jump table header will be inserted in our current block, do the
691 // range check, and fall through to our fallthrough block.
692 JTH->HeaderBB = CurMBB;
693 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
695 // If we're in the right place, emit the jump table header right now.
696 if (CurMBB == SwitchMBB) {
697 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
703 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
705 MachineBasicBlock *Fallthrough,
706 bool FallthroughUnreachable,
707 BranchProbability UnhandledProbs,
708 MachineBasicBlock *CurMBB,
709 MachineIRBuilder &MIB,
710 MachineBasicBlock *SwitchMBB) {
711 using namespace SwitchCG;
712 const Value *RHS, *LHS, *MHS;
713 CmpInst::Predicate Pred;
714 if (I->Low == I->High) {
715 // Check Cond == I->Low.
716 Pred = CmpInst::ICMP_EQ;
721 // Check I->Low <= Cond <= I->High.
722 Pred = CmpInst::ICMP_SLE;
728 // If Fallthrough is unreachable, fold away the comparison.
729 // The false probability is the sum of all unhandled cases.
730 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
731 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
733 emitSwitchCase(CB, SwitchMBB, MIB);
737 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
739 MachineBasicBlock *SwitchMBB,
740 MachineBasicBlock *DefaultMBB,
741 MachineIRBuilder &MIB) {
742 using namespace SwitchCG;
743 MachineFunction *CurMF = FuncInfo.MF;
744 MachineBasicBlock *NextMBB = nullptr;
745 MachineFunction::iterator BBI(W.MBB);
746 if (++BBI != FuncInfo.MF->end())
750 // Here, we order cases by probability so the most likely case will be
751 // checked first. However, two clusters can have the same probability in
752 // which case their relative ordering is non-deterministic. So we use Low
753 // as a tie-breaker as clusters are guaranteed to never overlap.
754 llvm::sort(W.FirstCluster, W.LastCluster + 1,
755 [](const CaseCluster &a, const CaseCluster &b) {
756 return a.Prob != b.Prob
758 : a.Low->getValue().slt(b.Low->getValue());
761 // Rearrange the case blocks so that the last one falls through if possible
762 // without changing the order of probabilities.
763 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
765 if (I->Prob > W.LastCluster->Prob)
767 if (I->Kind == CC_Range && I->MBB == NextMBB) {
768 std::swap(*I, *W.LastCluster);
774 // Compute total probability.
775 BranchProbability DefaultProb = W.DefaultProb;
776 BranchProbability UnhandledProbs = DefaultProb;
777 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
778 UnhandledProbs += I->Prob;
780 MachineBasicBlock *CurMBB = W.MBB;
781 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
782 bool FallthroughUnreachable = false;
783 MachineBasicBlock *Fallthrough;
784 if (I == W.LastCluster) {
785 // For the last cluster, fall through to the default destination.
786 Fallthrough = DefaultMBB;
787 FallthroughUnreachable = isa<UnreachableInst>(
788 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
790 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
791 CurMF->insert(BBI, Fallthrough);
793 UnhandledProbs -= I->Prob;
797 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
798 return false; // Bit tests currently unimplemented.
801 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
802 UnhandledProbs, I, Fallthrough,
803 FallthroughUnreachable)) {
804 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
810 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
811 FallthroughUnreachable, UnhandledProbs,
812 CurMBB, MIB, SwitchMBB)) {
813 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
819 CurMBB = Fallthrough;
825 bool IRTranslator::translateIndirectBr(const User &U,
826 MachineIRBuilder &MIRBuilder) {
827 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
829 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
830 MIRBuilder.buildBrIndirect(Tgt);
833 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
834 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
835 for (const BasicBlock *Succ : successors(&BrInst)) {
836 // It's legal for indirectbr instructions to have duplicate blocks in the
837 // destination list. We don't allow this in MIR. Skip anything that's
838 // already a successor.
839 if (!AddedSuccessors.insert(Succ).second)
841 CurBB.addSuccessor(&getMBB(*Succ));
847 static bool isSwiftError(const Value *V) {
848 if (auto Arg = dyn_cast<Argument>(V))
849 return Arg->hasSwiftErrorAttr();
850 if (auto AI = dyn_cast<AllocaInst>(V))
851 return AI->isSwiftError();
855 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
856 const LoadInst &LI = cast<LoadInst>(U);
857 if (DL->getTypeStoreSize(LI.getType()) == 0)
860 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
861 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
862 Register Base = getOrCreateVReg(*LI.getPointerOperand());
864 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
865 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
867 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
868 assert(Regs.size() == 1 && "swifterror should be single pointer");
869 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
870 LI.getPointerOperand());
871 MIRBuilder.buildCopy(Regs[0], VReg);
875 auto &TLI = *MF->getSubtarget().getTargetLowering();
876 MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
878 const MDNode *Ranges =
879 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
880 for (unsigned i = 0; i < Regs.size(); ++i) {
882 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
884 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
885 Align BaseAlign = getMemOpAlign(LI);
886 AAMDNodes AAMetadata;
887 LI.getAAMetadata(AAMetadata);
888 auto MMO = MF->getMachineMemOperand(
889 Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(),
890 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
891 LI.getSyncScopeID(), LI.getOrdering());
892 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
898 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
899 const StoreInst &SI = cast<StoreInst>(U);
900 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
903 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
904 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
905 Register Base = getOrCreateVReg(*SI.getPointerOperand());
907 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
908 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
910 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
911 assert(Vals.size() == 1 && "swifterror should be single pointer");
913 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
914 SI.getPointerOperand());
915 MIRBuilder.buildCopy(VReg, Vals[0]);
919 auto &TLI = *MF->getSubtarget().getTargetLowering();
920 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
922 for (unsigned i = 0; i < Vals.size(); ++i) {
924 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
926 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
927 Align BaseAlign = getMemOpAlign(SI);
928 AAMDNodes AAMetadata;
929 SI.getAAMetadata(AAMetadata);
930 auto MMO = MF->getMachineMemOperand(
931 Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(),
932 commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
933 SI.getSyncScopeID(), SI.getOrdering());
934 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
939 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
940 const Value *Src = U.getOperand(0);
941 Type *Int32Ty = Type::getInt32Ty(U.getContext());
943 // getIndexedOffsetInType is designed for GEPs, so the first index is the
944 // usual array element rather than looking into the actual aggregate.
945 SmallVector<Value *, 1> Indices;
946 Indices.push_back(ConstantInt::get(Int32Ty, 0));
948 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
949 for (auto Idx : EVI->indices())
950 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
951 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
952 for (auto Idx : IVI->indices())
953 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
955 for (unsigned i = 1; i < U.getNumOperands(); ++i)
956 Indices.push_back(U.getOperand(i));
959 return 8 * static_cast<uint64_t>(
960 DL.getIndexedOffsetInType(Src->getType(), Indices));
963 bool IRTranslator::translateExtractValue(const User &U,
964 MachineIRBuilder &MIRBuilder) {
965 const Value *Src = U.getOperand(0);
966 uint64_t Offset = getOffsetFromIndices(U, *DL);
967 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
968 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
969 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
970 auto &DstRegs = allocateVRegs(U);
972 for (unsigned i = 0; i < DstRegs.size(); ++i)
973 DstRegs[i] = SrcRegs[Idx++];
978 bool IRTranslator::translateInsertValue(const User &U,
979 MachineIRBuilder &MIRBuilder) {
980 const Value *Src = U.getOperand(0);
981 uint64_t Offset = getOffsetFromIndices(U, *DL);
982 auto &DstRegs = allocateVRegs(U);
983 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
984 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
985 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
986 auto InsertedIt = InsertedRegs.begin();
988 for (unsigned i = 0; i < DstRegs.size(); ++i) {
989 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
990 DstRegs[i] = *InsertedIt++;
992 DstRegs[i] = SrcRegs[i];
998 bool IRTranslator::translateSelect(const User &U,
999 MachineIRBuilder &MIRBuilder) {
1000 Register Tst = getOrCreateVReg(*U.getOperand(0));
1001 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1002 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1003 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1006 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1007 Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1009 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1010 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1016 bool IRTranslator::translateCopy(const User &U, const Value &V,
1017 MachineIRBuilder &MIRBuilder) {
1018 Register Src = getOrCreateVReg(V);
1019 auto &Regs = *VMap.getVRegs(U);
1021 Regs.push_back(Src);
1022 VMap.getOffsets(U)->push_back(0);
1024 // If we already assigned a vreg for this instruction, we can't change that.
1025 // Emit a copy to satisfy the users we already emitted.
1026 MIRBuilder.buildCopy(Regs[0], Src);
1031 bool IRTranslator::translateBitCast(const User &U,
1032 MachineIRBuilder &MIRBuilder) {
1033 // If we're bitcasting to the source type, we can reuse the source vreg.
1034 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1035 getLLTForType(*U.getType(), *DL))
1036 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1038 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1041 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1042 MachineIRBuilder &MIRBuilder) {
1043 Register Op = getOrCreateVReg(*U.getOperand(0));
1044 Register Res = getOrCreateVReg(U);
1045 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1049 bool IRTranslator::translateGetElementPtr(const User &U,
1050 MachineIRBuilder &MIRBuilder) {
1051 Value &Op0 = *U.getOperand(0);
1052 Register BaseReg = getOrCreateVReg(Op0);
1053 Type *PtrIRTy = Op0.getType();
1054 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1055 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1056 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1058 // Normalize Vector GEP - all scalar operands should be converted to the
1060 unsigned VectorWidth = 0;
1061 if (auto *VT = dyn_cast<VectorType>(U.getType()))
1062 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1064 // We might need to splat the base pointer into a vector if the offsets
1066 if (VectorWidth && !PtrTy.isVector()) {
1068 MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg)
1070 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1071 PtrTy = getLLTForType(*PtrIRTy, *DL);
1072 OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1073 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1077 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1079 const Value *Idx = GTI.getOperand();
1080 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1081 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1082 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1085 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1087 // If this is a scalar constant or a splat vector of constants,
1088 // handle it quickly.
1089 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1090 Offset += ElementSize * CI->getSExtValue();
1095 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1096 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1101 Register IdxReg = getOrCreateVReg(*Idx);
1102 LLT IdxTy = MRI->getType(IdxReg);
1103 if (IdxTy != OffsetTy) {
1104 if (!IdxTy.isVector() && VectorWidth) {
1105 IdxReg = MIRBuilder.buildSplatVector(
1106 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1109 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1112 // N = N + Idx * ElementSize;
1113 // Avoid doing it for ElementSize of 1.
1114 Register GepOffsetReg;
1115 if (ElementSize != 1) {
1116 auto ElementSizeMIB = MIRBuilder.buildConstant(
1117 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1119 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1121 GepOffsetReg = IdxReg;
1123 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1129 MIRBuilder.buildConstant(OffsetTy, Offset);
1130 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1134 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1138 bool IRTranslator::translateMemFunc(const CallInst &CI,
1139 MachineIRBuilder &MIRBuilder,
1142 // If the source is undef, then just emit a nop.
1143 if (isa<UndefValue>(CI.getArgOperand(1)))
1146 ArrayRef<Register> Res;
1147 auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
1148 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
1149 ICall.addUse(getOrCreateVReg(**AI));
1154 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1157 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1158 DstAlign = MCI->getDestAlign().valueOrOne();
1159 SrcAlign = MCI->getSourceAlign().valueOrOne();
1160 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1161 DstAlign = MMI->getDestAlign().valueOrOne();
1162 SrcAlign = MMI->getSourceAlign().valueOrOne();
1164 auto *MSI = cast<MemSetInst>(&CI);
1165 DstAlign = MSI->getDestAlign().valueOrOne();
1168 // We need to propagate the tail call flag from the IR inst as an argument.
1169 // Otherwise, we have to pessimize and assume later that we cannot tail call
1170 // any memory intrinsics.
1171 ICall.addImm(CI.isTailCall() ? 1 : 0);
1173 // Create mem operands to store the alignment and volatile info.
1174 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1175 ICall.addMemOperand(MF->getMachineMemOperand(
1176 MachinePointerInfo(CI.getArgOperand(0)),
1177 MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1178 if (ID != Intrinsic::memset)
1179 ICall.addMemOperand(MF->getMachineMemOperand(
1180 MachinePointerInfo(CI.getArgOperand(1)),
1181 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1186 void IRTranslator::getStackGuard(Register DstReg,
1187 MachineIRBuilder &MIRBuilder) {
1188 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1189 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1191 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1193 auto &TLI = *MF->getSubtarget().getTargetLowering();
1194 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1198 MachinePointerInfo MPInfo(Global);
1199 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1200 MachineMemOperand::MODereferenceable;
1201 MachineMemOperand *MemRef =
1202 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1203 DL->getPointerABIAlignment(0));
1204 MIB.setMemRefs({MemRef});
1207 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1208 MachineIRBuilder &MIRBuilder) {
1209 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1210 MIRBuilder.buildInstr(
1211 Op, {ResRegs[0], ResRegs[1]},
1212 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1217 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1221 case Intrinsic::bswap:
1222 return TargetOpcode::G_BSWAP;
1223 case Intrinsic::bitreverse:
1224 return TargetOpcode::G_BITREVERSE;
1225 case Intrinsic::fshl:
1226 return TargetOpcode::G_FSHL;
1227 case Intrinsic::fshr:
1228 return TargetOpcode::G_FSHR;
1229 case Intrinsic::ceil:
1230 return TargetOpcode::G_FCEIL;
1231 case Intrinsic::cos:
1232 return TargetOpcode::G_FCOS;
1233 case Intrinsic::ctpop:
1234 return TargetOpcode::G_CTPOP;
1235 case Intrinsic::exp:
1236 return TargetOpcode::G_FEXP;
1237 case Intrinsic::exp2:
1238 return TargetOpcode::G_FEXP2;
1239 case Intrinsic::fabs:
1240 return TargetOpcode::G_FABS;
1241 case Intrinsic::copysign:
1242 return TargetOpcode::G_FCOPYSIGN;
1243 case Intrinsic::minnum:
1244 return TargetOpcode::G_FMINNUM;
1245 case Intrinsic::maxnum:
1246 return TargetOpcode::G_FMAXNUM;
1247 case Intrinsic::minimum:
1248 return TargetOpcode::G_FMINIMUM;
1249 case Intrinsic::maximum:
1250 return TargetOpcode::G_FMAXIMUM;
1251 case Intrinsic::canonicalize:
1252 return TargetOpcode::G_FCANONICALIZE;
1253 case Intrinsic::floor:
1254 return TargetOpcode::G_FFLOOR;
1255 case Intrinsic::fma:
1256 return TargetOpcode::G_FMA;
1257 case Intrinsic::log:
1258 return TargetOpcode::G_FLOG;
1259 case Intrinsic::log2:
1260 return TargetOpcode::G_FLOG2;
1261 case Intrinsic::log10:
1262 return TargetOpcode::G_FLOG10;
1263 case Intrinsic::nearbyint:
1264 return TargetOpcode::G_FNEARBYINT;
1265 case Intrinsic::pow:
1266 return TargetOpcode::G_FPOW;
1267 case Intrinsic::rint:
1268 return TargetOpcode::G_FRINT;
1269 case Intrinsic::round:
1270 return TargetOpcode::G_INTRINSIC_ROUND;
1271 case Intrinsic::sin:
1272 return TargetOpcode::G_FSIN;
1273 case Intrinsic::sqrt:
1274 return TargetOpcode::G_FSQRT;
1275 case Intrinsic::trunc:
1276 return TargetOpcode::G_INTRINSIC_TRUNC;
1277 case Intrinsic::readcyclecounter:
1278 return TargetOpcode::G_READCYCLECOUNTER;
1279 case Intrinsic::ptrmask:
1280 return TargetOpcode::G_PTRMASK;
1282 return Intrinsic::not_intrinsic;
1285 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1287 MachineIRBuilder &MIRBuilder) {
1289 unsigned Op = getSimpleIntrinsicOpcode(ID);
1291 // Is this a simple intrinsic?
1292 if (Op == Intrinsic::not_intrinsic)
1295 // Yes. Let's translate it.
1296 SmallVector<llvm::SrcOp, 4> VRegs;
1297 for (auto &Arg : CI.arg_operands())
1298 VRegs.push_back(getOrCreateVReg(*Arg));
1300 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1301 MachineInstr::copyFlagsFromInstruction(CI));
1305 // TODO: Include ConstainedOps.def when all strict instructions are defined.
1306 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1308 case Intrinsic::experimental_constrained_fadd:
1309 return TargetOpcode::G_STRICT_FADD;
1310 case Intrinsic::experimental_constrained_fsub:
1311 return TargetOpcode::G_STRICT_FSUB;
1312 case Intrinsic::experimental_constrained_fmul:
1313 return TargetOpcode::G_STRICT_FMUL;
1314 case Intrinsic::experimental_constrained_fdiv:
1315 return TargetOpcode::G_STRICT_FDIV;
1316 case Intrinsic::experimental_constrained_frem:
1317 return TargetOpcode::G_STRICT_FREM;
1318 case Intrinsic::experimental_constrained_fma:
1319 return TargetOpcode::G_STRICT_FMA;
1320 case Intrinsic::experimental_constrained_sqrt:
1321 return TargetOpcode::G_STRICT_FSQRT;
1327 bool IRTranslator::translateConstrainedFPIntrinsic(
1328 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1329 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
1331 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1335 unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1336 if (EB == fp::ExceptionBehavior::ebIgnore)
1337 Flags |= MachineInstr::NoFPExcept;
1339 SmallVector<llvm::SrcOp, 4> VRegs;
1340 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1341 if (!FPI.isUnaryOp())
1342 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1343 if (FPI.isTernaryOp())
1344 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1346 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1350 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1351 MachineIRBuilder &MIRBuilder) {
1353 // If this is a simple intrinsic (that is, we just need to add a def of
1354 // a vreg, and uses for each arg operand, then translate it.
1355 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1361 case Intrinsic::lifetime_start:
1362 case Intrinsic::lifetime_end: {
1363 // No stack colouring in O0, discard region information.
1364 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1367 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1368 : TargetOpcode::LIFETIME_END;
1370 // Get the underlying objects for the location passed on the lifetime
1372 SmallVector<const Value *, 4> Allocas;
1373 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1375 // Iterate over each underlying object, creating lifetime markers for each
1376 // static alloca. Quit if we find a non-static alloca.
1377 for (const Value *V : Allocas) {
1378 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1382 if (!AI->isStaticAlloca())
1385 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1389 case Intrinsic::dbg_declare: {
1390 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1391 assert(DI.getVariable() && "Missing variable");
1393 const Value *Address = DI.getAddress();
1394 if (!Address || isa<UndefValue>(Address)) {
1395 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1399 assert(DI.getVariable()->isValidLocationForIntrinsic(
1400 MIRBuilder.getDebugLoc()) &&
1401 "Expected inlined-at fields to agree");
1402 auto AI = dyn_cast<AllocaInst>(Address);
1403 if (AI && AI->isStaticAlloca()) {
1404 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1405 // instructions (in fact, they get ignored if they *do* exist).
1406 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1407 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1409 // A dbg.declare describes the address of a source variable, so lower it
1410 // into an indirect DBG_VALUE.
1411 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1412 DI.getVariable(), DI.getExpression());
1416 case Intrinsic::dbg_label: {
1417 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1418 assert(DI.getLabel() && "Missing label");
1420 assert(DI.getLabel()->isValidLocationForIntrinsic(
1421 MIRBuilder.getDebugLoc()) &&
1422 "Expected inlined-at fields to agree");
1424 MIRBuilder.buildDbgLabel(DI.getLabel());
1427 case Intrinsic::vaend:
1428 // No target I know of cares about va_end. Certainly no in-tree target
1429 // does. Simplest intrinsic ever!
1431 case Intrinsic::vastart: {
1432 auto &TLI = *MF->getSubtarget().getTargetLowering();
1433 Value *Ptr = CI.getArgOperand(0);
1434 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1436 // FIXME: Get alignment
1437 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1438 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1439 MachineMemOperand::MOStore,
1440 ListSize, Align(1)));
1443 case Intrinsic::dbg_value: {
1444 // This form of DBG_VALUE is target-independent.
1445 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1446 const Value *V = DI.getValue();
1447 assert(DI.getVariable()->isValidLocationForIntrinsic(
1448 MIRBuilder.getDebugLoc()) &&
1449 "Expected inlined-at fields to agree");
1451 // Currently the optimizer can produce this; insert an undef to
1452 // help debugging. Probably the optimizer should not do this.
1453 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1454 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1455 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1457 for (Register Reg : getOrCreateVRegs(*V)) {
1458 // FIXME: This does not handle register-indirect values at offset 0. The
1459 // direct/indirect thing shouldn't really be handled by something as
1460 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1461 // pretty baked in right now.
1462 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1467 case Intrinsic::uadd_with_overflow:
1468 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1469 case Intrinsic::sadd_with_overflow:
1470 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1471 case Intrinsic::usub_with_overflow:
1472 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1473 case Intrinsic::ssub_with_overflow:
1474 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1475 case Intrinsic::umul_with_overflow:
1476 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1477 case Intrinsic::smul_with_overflow:
1478 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1479 case Intrinsic::uadd_sat:
1480 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
1481 case Intrinsic::sadd_sat:
1482 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
1483 case Intrinsic::usub_sat:
1484 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
1485 case Intrinsic::ssub_sat:
1486 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
1487 case Intrinsic::fmuladd: {
1488 const TargetMachine &TM = MF->getTarget();
1489 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1490 Register Dst = getOrCreateVReg(CI);
1491 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1492 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1493 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1494 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1495 TLI.isFMAFasterThanFMulAndFAdd(*MF,
1496 TLI.getValueType(*DL, CI.getType()))) {
1497 // TODO: Revisit this to see if we should move this part of the
1498 // lowering to the combiner.
1499 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
1500 MachineInstr::copyFlagsFromInstruction(CI));
1502 LLT Ty = getLLTForType(*CI.getType(), *DL);
1503 auto FMul = MIRBuilder.buildFMul(
1504 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
1505 MIRBuilder.buildFAdd(Dst, FMul, Op2,
1506 MachineInstr::copyFlagsFromInstruction(CI));
1510 case Intrinsic::memcpy:
1511 case Intrinsic::memmove:
1512 case Intrinsic::memset:
1513 return translateMemFunc(CI, MIRBuilder, ID);
1514 case Intrinsic::eh_typeid_for: {
1515 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1516 Register Reg = getOrCreateVReg(CI);
1517 unsigned TypeID = MF->getTypeIDFor(GV);
1518 MIRBuilder.buildConstant(Reg, TypeID);
1521 case Intrinsic::objectsize:
1522 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1524 case Intrinsic::is_constant:
1525 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1527 case Intrinsic::stackguard:
1528 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1530 case Intrinsic::stackprotector: {
1531 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1532 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1533 getStackGuard(GuardVal, MIRBuilder);
1535 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1536 int FI = getOrCreateFrameIndex(*Slot);
1537 MF->getFrameInfo().setStackProtectorIndex(FI);
1539 MIRBuilder.buildStore(
1540 GuardVal, getOrCreateVReg(*Slot),
1541 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1542 MachineMemOperand::MOStore |
1543 MachineMemOperand::MOVolatile,
1544 PtrTy.getSizeInBits() / 8, Align(8)));
1547 case Intrinsic::stacksave: {
1548 // Save the stack pointer to the location provided by the intrinsic.
1549 Register Reg = getOrCreateVReg(CI);
1550 Register StackPtr = MF->getSubtarget()
1551 .getTargetLowering()
1552 ->getStackPointerRegisterToSaveRestore();
1554 // If the target doesn't specify a stack pointer, then fall back.
1558 MIRBuilder.buildCopy(Reg, StackPtr);
1561 case Intrinsic::stackrestore: {
1562 // Restore the stack pointer from the location provided by the intrinsic.
1563 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1564 Register StackPtr = MF->getSubtarget()
1565 .getTargetLowering()
1566 ->getStackPointerRegisterToSaveRestore();
1568 // If the target doesn't specify a stack pointer, then fall back.
1572 MIRBuilder.buildCopy(StackPtr, Reg);
1575 case Intrinsic::cttz:
1576 case Intrinsic::ctlz: {
1577 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1578 bool isTrailing = ID == Intrinsic::cttz;
1579 unsigned Opcode = isTrailing
1580 ? Cst->isZero() ? TargetOpcode::G_CTTZ
1581 : TargetOpcode::G_CTTZ_ZERO_UNDEF
1582 : Cst->isZero() ? TargetOpcode::G_CTLZ
1583 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1584 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
1585 {getOrCreateVReg(*CI.getArgOperand(0))});
1588 case Intrinsic::invariant_start: {
1589 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1590 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1591 MIRBuilder.buildUndef(Undef);
1594 case Intrinsic::invariant_end:
1596 case Intrinsic::assume:
1597 case Intrinsic::var_annotation:
1598 case Intrinsic::sideeffect:
1599 // Discard annotate attributes, assumptions, and artificial side-effects.
1601 case Intrinsic::read_volatile_register:
1602 case Intrinsic::read_register: {
1603 Value *Arg = CI.getArgOperand(0);
1605 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
1606 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
1609 case Intrinsic::write_register: {
1610 Value *Arg = CI.getArgOperand(0);
1611 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
1612 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
1613 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
1616 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1617 case Intrinsic::INTRINSIC:
1618 #include "llvm/IR/ConstrainedOps.def"
1619 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
1626 bool IRTranslator::translateInlineAsm(const CallBase &CB,
1627 MachineIRBuilder &MIRBuilder) {
1629 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
1633 dbgs() << "Inline asm lowering is not supported for this target yet\n");
1637 return ALI->lowerInlineAsm(
1638 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
1641 bool IRTranslator::translateCallBase(const CallBase &CB,
1642 MachineIRBuilder &MIRBuilder) {
1643 ArrayRef<Register> Res = getOrCreateVRegs(CB);
1645 SmallVector<ArrayRef<Register>, 8> Args;
1646 Register SwiftInVReg = 0;
1647 Register SwiftErrorVReg = 0;
1648 for (auto &Arg : CB.args()) {
1649 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1650 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1651 LLT Ty = getLLTForType(*Arg->getType(), *DL);
1652 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1653 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1654 &CB, &MIRBuilder.getMBB(), Arg));
1655 Args.emplace_back(makeArrayRef(SwiftInVReg));
1657 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
1660 Args.push_back(getOrCreateVRegs(*Arg));
1663 // We don't set HasCalls on MFI here yet because call lowering may decide to
1664 // optimize into tail calls. Instead, we defer that to selection where a final
1665 // scan is done to check if any instructions are calls.
1667 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
1668 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
1670 // Check if we just inserted a tail call.
1672 assert(!HasTailCall && "Can't tail call return twice from block?");
1673 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1674 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
1680 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1681 const CallInst &CI = cast<CallInst>(U);
1682 auto TII = MF->getTarget().getIntrinsicInfo();
1683 const Function *F = CI.getCalledFunction();
1685 // FIXME: support Windows dllimport function calls.
1686 if (F && (F->hasDLLImportStorageClass() ||
1687 (MF->getTarget().getTargetTriple().isOSWindows() &&
1688 F->hasExternalWeakLinkage())))
1691 // FIXME: support control flow guard targets.
1692 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
1695 if (CI.isInlineAsm())
1696 return translateInlineAsm(CI, MIRBuilder);
1698 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1699 if (F && F->isIntrinsic()) {
1700 ID = F->getIntrinsicID();
1701 if (TII && ID == Intrinsic::not_intrinsic)
1702 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1705 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
1706 return translateCallBase(CI, MIRBuilder);
1708 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1710 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1713 ArrayRef<Register> ResultRegs;
1714 if (!CI.getType()->isVoidTy())
1715 ResultRegs = getOrCreateVRegs(CI);
1717 // Ignore the callsite attributes. Backend code is most likely not expecting
1718 // an intrinsic to sometimes have side effects and sometimes not.
1719 MachineInstrBuilder MIB =
1720 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1721 if (isa<FPMathOperator>(CI))
1722 MIB->copyIRFlags(CI);
1724 for (auto &Arg : enumerate(CI.arg_operands())) {
1725 // Some intrinsics take metadata parameters. Reject them.
1726 if (isa<MetadataAsValue>(Arg.value()))
1729 // If this is required to be an immediate, don't materialize it in a
1731 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
1732 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
1733 // imm arguments are more convenient than cimm (and realistically
1734 // probably sufficient), so use them.
1735 assert(CI->getBitWidth() <= 64 &&
1736 "large intrinsic immediates not handled");
1737 MIB.addImm(CI->getSExtValue());
1739 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
1742 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
1743 if (VRegs.size() > 1)
1745 MIB.addUse(VRegs[0]);
1749 // Add a MachineMemOperand if it is a target mem intrinsic.
1750 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1751 TargetLowering::IntrinsicInfo Info;
1752 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1753 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1754 Align Alignment = Info.align.getValueOr(
1755 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
1757 uint64_t Size = Info.memVT.getStoreSize();
1758 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1759 Info.flags, Size, Alignment));
1765 bool IRTranslator::translateInvoke(const User &U,
1766 MachineIRBuilder &MIRBuilder) {
1767 const InvokeInst &I = cast<InvokeInst>(U);
1768 MCContext &Context = MF->getContext();
1770 const BasicBlock *ReturnBB = I.getSuccessor(0);
1771 const BasicBlock *EHPadBB = I.getSuccessor(1);
1773 const Function *Fn = I.getCalledFunction();
1774 if (I.isInlineAsm())
1777 // FIXME: support invoking patchpoint and statepoint intrinsics.
1778 if (Fn && Fn->isIntrinsic())
1781 // FIXME: support whatever these are.
1782 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1785 // FIXME: support control flow guard targets.
1786 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
1789 // FIXME: support Windows exception handling.
1790 if (!isa<LandingPadInst>(EHPadBB->front()))
1793 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1794 // the region covered by the try.
1795 MCSymbol *BeginSymbol = Context.createTempSymbol();
1796 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1798 if (!translateCallBase(I, MIRBuilder))
1801 MCSymbol *EndSymbol = Context.createTempSymbol();
1802 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1804 // FIXME: track probabilities.
1805 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1806 &ReturnMBB = getMBB(*ReturnBB);
1807 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1808 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1809 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1810 MIRBuilder.buildBr(ReturnMBB);
1815 bool IRTranslator::translateCallBr(const User &U,
1816 MachineIRBuilder &MIRBuilder) {
1817 // FIXME: Implement this.
1821 bool IRTranslator::translateLandingPad(const User &U,
1822 MachineIRBuilder &MIRBuilder) {
1823 const LandingPadInst &LP = cast<LandingPadInst>(U);
1825 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1829 // If there aren't registers to copy the values into (e.g., during SjLj
1830 // exceptions), then don't bother.
1831 auto &TLI = *MF->getSubtarget().getTargetLowering();
1832 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1833 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1834 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1837 // If landingpad's return type is token type, we don't create DAG nodes
1838 // for its exception pointer and selector value. The extraction of exception
1839 // pointer or selector value from token type landingpads is not currently
1841 if (LP.getType()->isTokenTy())
1844 // Add a label to mark the beginning of the landing pad. Deletion of the
1845 // landing pad can thus be detected via the MachineModuleInfo.
1846 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1847 .addSym(MF->addLandingPad(&MBB));
1849 LLT Ty = getLLTForType(*LP.getType(), *DL);
1850 Register Undef = MRI->createGenericVirtualRegister(Ty);
1851 MIRBuilder.buildUndef(Undef);
1853 SmallVector<LLT, 2> Tys;
1854 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1855 Tys.push_back(getLLTForType(*Ty, *DL));
1856 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1858 // Mark exception register as live in.
1859 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1863 MBB.addLiveIn(ExceptionReg);
1864 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1865 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1867 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1871 MBB.addLiveIn(SelectorReg);
1872 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1873 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1874 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1879 bool IRTranslator::translateAlloca(const User &U,
1880 MachineIRBuilder &MIRBuilder) {
1881 auto &AI = cast<AllocaInst>(U);
1883 if (AI.isSwiftError())
1886 if (AI.isStaticAlloca()) {
1887 Register Res = getOrCreateVReg(AI);
1888 int FI = getOrCreateFrameIndex(AI);
1889 MIRBuilder.buildFrameIndex(Res, FI);
1893 // FIXME: support stack probing for Windows.
1894 if (MF->getTarget().getTargetTriple().isOSWindows())
1897 // Now we're in the harder dynamic case.
1898 Register NumElts = getOrCreateVReg(*AI.getArraySize());
1899 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1900 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1901 if (MRI->getType(NumElts) != IntPtrTy) {
1902 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1903 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1907 Type *Ty = AI.getAllocatedType();
1909 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1911 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
1912 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1914 // Round the size of the allocation up to the stack alignment size
1915 // by add SA-1 to the size. This doesn't overflow because we're computing
1916 // an address inside an alloca.
1917 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
1918 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
1919 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
1920 MachineInstr::NoUWrap);
1922 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
1923 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
1925 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
1926 if (Alignment <= StackAlign)
1927 Alignment = Align(1);
1928 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
1930 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
1931 assert(MF->getFrameInfo().hasVarSizedObjects());
1935 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1936 // FIXME: We may need more info about the type. Because of how LLT works,
1937 // we're completely discarding the i64/double distinction here (amongst
1938 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1939 // anyway but that's not guaranteed.
1940 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
1941 {getOrCreateVReg(*U.getOperand(0)),
1942 DL->getABITypeAlign(U.getType()).value()});
1946 bool IRTranslator::translateInsertElement(const User &U,
1947 MachineIRBuilder &MIRBuilder) {
1948 // If it is a <1 x Ty> vector, use the scalar as it is
1949 // not a legal vector type in LLT.
1950 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
1951 return translateCopy(U, *U.getOperand(1), MIRBuilder);
1953 Register Res = getOrCreateVReg(U);
1954 Register Val = getOrCreateVReg(*U.getOperand(0));
1955 Register Elt = getOrCreateVReg(*U.getOperand(1));
1956 Register Idx = getOrCreateVReg(*U.getOperand(2));
1957 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1961 bool IRTranslator::translateExtractElement(const User &U,
1962 MachineIRBuilder &MIRBuilder) {
1963 // If it is a <1 x Ty> vector, use the scalar as it is
1964 // not a legal vector type in LLT.
1965 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
1966 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1968 Register Res = getOrCreateVReg(U);
1969 Register Val = getOrCreateVReg(*U.getOperand(0));
1970 const auto &TLI = *MF->getSubtarget().getTargetLowering();
1971 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1973 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1974 if (CI->getBitWidth() != PreferredVecIdxWidth) {
1975 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1976 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1977 Idx = getOrCreateVReg(*NewIdxCI);
1981 Idx = getOrCreateVReg(*U.getOperand(1));
1982 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1983 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1984 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
1986 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1990 bool IRTranslator::translateShuffleVector(const User &U,
1991 MachineIRBuilder &MIRBuilder) {
1993 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
1994 Mask = SVI->getShuffleMask();
1996 Mask = cast<ConstantExpr>(U).getShuffleMask();
1997 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
1999 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2000 {getOrCreateVReg(*U.getOperand(0)),
2001 getOrCreateVReg(*U.getOperand(1))})
2002 .addShuffleMask(MaskAlloc);
2006 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2007 const PHINode &PI = cast<PHINode>(U);
2009 SmallVector<MachineInstr *, 4> Insts;
2010 for (auto Reg : getOrCreateVRegs(PI)) {
2011 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2012 Insts.push_back(MIB.getInstr());
2015 PendingPHIs.emplace_back(&PI, std::move(Insts));
2019 bool IRTranslator::translateAtomicCmpXchg(const User &U,
2020 MachineIRBuilder &MIRBuilder) {
2021 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2023 auto &TLI = *MF->getSubtarget().getTargetLowering();
2024 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2026 Type *ResType = I.getType();
2027 Type *ValType = ResType->Type::getStructElementType(0);
2029 auto Res = getOrCreateVRegs(I);
2030 Register OldValRes = Res[0];
2031 Register SuccessRes = Res[1];
2032 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2033 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2034 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2036 AAMDNodes AAMetadata;
2037 I.getAAMetadata(AAMetadata);
2039 MIRBuilder.buildAtomicCmpXchgWithSuccess(
2040 OldValRes, SuccessRes, Addr, Cmp, NewVal,
2041 *MF->getMachineMemOperand(
2042 MachinePointerInfo(I.getPointerOperand()), Flags,
2043 DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr,
2044 I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering()));
2048 bool IRTranslator::translateAtomicRMW(const User &U,
2049 MachineIRBuilder &MIRBuilder) {
2050 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2051 auto &TLI = *MF->getSubtarget().getTargetLowering();
2052 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2054 Type *ResType = I.getType();
2056 Register Res = getOrCreateVReg(I);
2057 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2058 Register Val = getOrCreateVReg(*I.getValOperand());
2060 unsigned Opcode = 0;
2061 switch (I.getOperation()) {
2064 case AtomicRMWInst::Xchg:
2065 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2067 case AtomicRMWInst::Add:
2068 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2070 case AtomicRMWInst::Sub:
2071 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2073 case AtomicRMWInst::And:
2074 Opcode = TargetOpcode::G_ATOMICRMW_AND;
2076 case AtomicRMWInst::Nand:
2077 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2079 case AtomicRMWInst::Or:
2080 Opcode = TargetOpcode::G_ATOMICRMW_OR;
2082 case AtomicRMWInst::Xor:
2083 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2085 case AtomicRMWInst::Max:
2086 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2088 case AtomicRMWInst::Min:
2089 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2091 case AtomicRMWInst::UMax:
2092 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2094 case AtomicRMWInst::UMin:
2095 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2097 case AtomicRMWInst::FAdd:
2098 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2100 case AtomicRMWInst::FSub:
2101 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2105 AAMDNodes AAMetadata;
2106 I.getAAMetadata(AAMetadata);
2108 MIRBuilder.buildAtomicRMW(
2109 Opcode, Res, Addr, Val,
2110 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2111 Flags, DL->getTypeStoreSize(ResType),
2112 getMemOpAlign(I), AAMetadata, nullptr,
2113 I.getSyncScopeID(), I.getOrdering()));
2117 bool IRTranslator::translateFence(const User &U,
2118 MachineIRBuilder &MIRBuilder) {
2119 const FenceInst &Fence = cast<FenceInst>(U);
2120 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2121 Fence.getSyncScopeID());
2125 bool IRTranslator::translateFreeze(const User &U,
2126 MachineIRBuilder &MIRBuilder) {
2127 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2128 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2130 assert(DstRegs.size() == SrcRegs.size() &&
2131 "Freeze with different source and destination type?");
2133 for (unsigned I = 0; I < DstRegs.size(); ++I) {
2134 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2140 void IRTranslator::finishPendingPhis() {
2142 DILocationVerifier Verifier;
2143 GISelObserverWrapper WrapperObserver(&Verifier);
2144 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2145 #endif // ifndef NDEBUG
2146 for (auto &Phi : PendingPHIs) {
2147 const PHINode *PI = Phi.first;
2148 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2149 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2150 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2152 Verifier.setCurrentInst(PI);
2153 #endif // ifndef NDEBUG
2155 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2156 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2157 auto IRPred = PI->getIncomingBlock(i);
2158 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2159 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2160 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2162 SeenPreds.insert(Pred);
2163 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2164 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2165 MIB.addUse(ValRegs[j]);
2173 bool IRTranslator::valueIsSplit(const Value &V,
2174 SmallVectorImpl<uint64_t> *Offsets) {
2175 SmallVector<LLT, 4> SplitTys;
2176 if (Offsets && !Offsets->empty())
2178 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2179 return SplitTys.size() > 1;
2182 bool IRTranslator::translate(const Instruction &Inst) {
2183 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2184 // We only emit constants into the entry block from here. To prevent jumpy
2185 // debug behaviour set the line to 0.
2186 if (const DebugLoc &DL = Inst.getDebugLoc())
2187 EntryBuilder->setDebugLoc(
2188 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2190 EntryBuilder->setDebugLoc(DebugLoc());
2192 auto &TLI = *MF->getSubtarget().getTargetLowering();
2193 if (TLI.fallBackToDAGISel(Inst))
2196 switch (Inst.getOpcode()) {
2197 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2198 case Instruction::OPCODE: \
2199 return translate##OPCODE(Inst, *CurBuilder.get());
2200 #include "llvm/IR/Instruction.def"
2206 bool IRTranslator::translate(const Constant &C, Register Reg) {
2207 if (auto CI = dyn_cast<ConstantInt>(&C))
2208 EntryBuilder->buildConstant(Reg, *CI);
2209 else if (auto CF = dyn_cast<ConstantFP>(&C))
2210 EntryBuilder->buildFConstant(Reg, *CF);
2211 else if (isa<UndefValue>(C))
2212 EntryBuilder->buildUndef(Reg);
2213 else if (isa<ConstantPointerNull>(C))
2214 EntryBuilder->buildConstant(Reg, 0);
2215 else if (auto GV = dyn_cast<GlobalValue>(&C))
2216 EntryBuilder->buildGlobalValue(Reg, GV);
2217 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2218 if (!CAZ->getType()->isVectorTy())
2220 // Return the scalar if it is a <1 x Ty> vector.
2221 if (CAZ->getNumElements() == 1)
2222 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get());
2223 SmallVector<Register, 4> Ops;
2224 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2225 Constant &Elt = *CAZ->getElementValue(i);
2226 Ops.push_back(getOrCreateVReg(Elt));
2228 EntryBuilder->buildBuildVector(Reg, Ops);
2229 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2230 // Return the scalar if it is a <1 x Ty> vector.
2231 if (CV->getNumElements() == 1)
2232 return translateCopy(C, *CV->getElementAsConstant(0),
2233 *EntryBuilder.get());
2234 SmallVector<Register, 4> Ops;
2235 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2236 Constant &Elt = *CV->getElementAsConstant(i);
2237 Ops.push_back(getOrCreateVReg(Elt));
2239 EntryBuilder->buildBuildVector(Reg, Ops);
2240 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2241 switch(CE->getOpcode()) {
2242 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2243 case Instruction::OPCODE: \
2244 return translate##OPCODE(*CE, *EntryBuilder.get());
2245 #include "llvm/IR/Instruction.def"
2249 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2250 if (CV->getNumOperands() == 1)
2251 return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get());
2252 SmallVector<Register, 4> Ops;
2253 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2254 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2256 EntryBuilder->buildBuildVector(Reg, Ops);
2257 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2258 EntryBuilder->buildBlockAddress(Reg, BA);
2265 void IRTranslator::finalizeBasicBlock() {
2266 for (auto &JTCase : SL->JTCases) {
2267 // Emit header first, if it wasn't already emitted.
2268 if (!JTCase.first.Emitted)
2269 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2271 emitJumpTable(JTCase.second, JTCase.second.MBB);
2273 SL->JTCases.clear();
2276 void IRTranslator::finalizeFunction() {
2277 // Release the memory used by the different maps we
2278 // needed during the translation.
2279 PendingPHIs.clear();
2281 FrameIndices.clear();
2282 MachinePreds.clear();
2283 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2284 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
2285 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2286 EntryBuilder.reset();
2291 /// Returns true if a BasicBlock \p BB within a variadic function contains a
2292 /// variadic musttail call.
2293 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
2297 // Walk the block backwards, because tail calls usually only appear at the end
2299 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
2300 const auto *CI = dyn_cast<CallInst>(&I);
2301 return CI && CI->isMustTailCall();
2305 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2307 const Function &F = MF->getFunction();
2310 GISelCSEAnalysisWrapper &Wrapper =
2311 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2312 // Set the CSEConfig and run the analysis.
2313 GISelCSEInfo *CSEInfo = nullptr;
2314 TPC = &getAnalysis<TargetPassConfig>();
2315 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
2316 ? EnableCSEInIRTranslator
2317 : TPC->isGISelCSEEnabled();
2320 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2321 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2322 EntryBuilder->setCSEInfo(CSEInfo);
2323 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2324 CurBuilder->setCSEInfo(CSEInfo);
2326 EntryBuilder = std::make_unique<MachineIRBuilder>();
2327 CurBuilder = std::make_unique<MachineIRBuilder>();
2329 CLI = MF->getSubtarget().getCallLowering();
2330 CurBuilder->setMF(*MF);
2331 EntryBuilder->setMF(*MF);
2332 MRI = &MF->getRegInfo();
2333 DL = &F.getParent()->getDataLayout();
2334 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2336 FuncInfo.BPI = nullptr;
2337 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2338 const TargetMachine &TM = MF->getTarget();
2339 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
2340 SL->init(TLI, TM, *DL);
2342 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
2344 assert(PendingPHIs.empty() && "stale PHIs");
2346 if (!DL->isLittleEndian()) {
2347 // Currently we don't properly handle big endian code.
2348 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2349 F.getSubprogram(), &F.getEntryBlock());
2350 R << "unable to translate in big endian mode";
2351 reportTranslationError(*MF, *TPC, *ORE, R);
2354 // Release the per-function state when we return, whether we succeeded or not.
2355 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2357 // Setup a separate basic-block for the arguments and constants
2358 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2359 MF->push_back(EntryBB);
2360 EntryBuilder->setMBB(*EntryBB);
2362 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2363 SwiftError.setFunction(CurMF);
2364 SwiftError.createEntriesInEntryBlock(DbgLoc);
2366 bool IsVarArg = F.isVarArg();
2367 bool HasMustTailInVarArgFn = false;
2369 // Create all blocks, in IR order, to preserve the layout.
2370 for (const BasicBlock &BB: F) {
2371 auto *&MBB = BBToMBB[&BB];
2373 MBB = MF->CreateMachineBasicBlock(&BB);
2376 if (BB.hasAddressTaken())
2377 MBB->setHasAddressTaken();
2379 if (!HasMustTailInVarArgFn)
2380 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
2383 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
2385 // Make our arguments/constants entry block fallthrough to the IR entry block.
2386 EntryBB->addSuccessor(&getMBB(F.front()));
2388 if (CLI->fallBackToDAGISel(F)) {
2389 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2390 F.getSubprogram(), &F.getEntryBlock());
2391 R << "unable to lower function: " << ore::NV("Prototype", F.getType());
2392 reportTranslationError(*MF, *TPC, *ORE, R);
2396 // Lower the actual args into this basic block.
2397 SmallVector<ArrayRef<Register>, 8> VRegArgs;
2398 for (const Argument &Arg: F.args()) {
2399 if (DL->getTypeStoreSize(Arg.getType()).isZero())
2400 continue; // Don't handle zero sized types.
2401 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2402 VRegArgs.push_back(VRegs);
2404 if (Arg.hasSwiftErrorAttr()) {
2405 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
2406 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2410 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
2411 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2412 F.getSubprogram(), &F.getEntryBlock());
2413 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2414 reportTranslationError(*MF, *TPC, *ORE, R);
2418 // Need to visit defs before uses when translating instructions.
2419 GISelObserverWrapper WrapperObserver;
2420 if (EnableCSE && CSEInfo)
2421 WrapperObserver.addObserver(CSEInfo);
2423 ReversePostOrderTraversal<const Function *> RPOT(&F);
2425 DILocationVerifier Verifier;
2426 WrapperObserver.addObserver(&Verifier);
2427 #endif // ifndef NDEBUG
2428 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2429 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
2430 for (const BasicBlock *BB : RPOT) {
2431 MachineBasicBlock &MBB = getMBB(*BB);
2432 // Set the insertion point of all the following translations to
2433 // the end of this basic block.
2434 CurBuilder->setMBB(MBB);
2435 HasTailCall = false;
2436 for (const Instruction &Inst : *BB) {
2437 // If we translated a tail call in the last step, then we know
2438 // everything after the call is either a return, or something that is
2439 // handled by the call itself. (E.g. a lifetime marker or assume
2440 // intrinsic.) In this case, we should stop translating the block and
2445 Verifier.setCurrentInst(&Inst);
2446 #endif // ifndef NDEBUG
2447 if (translate(Inst))
2450 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2451 Inst.getDebugLoc(), BB);
2452 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2454 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2455 std::string InstStrStorage;
2456 raw_string_ostream InstStr(InstStrStorage);
2459 R << ": '" << InstStr.str() << "'";
2462 reportTranslationError(*MF, *TPC, *ORE, R);
2466 finalizeBasicBlock();
2469 WrapperObserver.removeObserver(&Verifier);
2473 finishPendingPhis();
2475 SwiftError.propagateVRegs();
2477 // Merge the argument lowering and constants block with its single
2478 // successor, the LLVM-IR entry block. We want the basic block to
2480 assert(EntryBB->succ_size() == 1 &&
2481 "Custom BB used for lowering should have only one successor");
2482 // Get the successor of the current entry block.
2483 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2484 assert(NewEntryBB.pred_size() == 1 &&
2485 "LLVM-IR entry block has a predecessor!?");
2486 // Move all the instruction from the current entry block to the
2488 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2491 // Update the live-in information for the new entry block.
2492 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2493 NewEntryBB.addLiveIn(LiveIn);
2494 NewEntryBB.sortUniqueLiveIns();
2496 // Get rid of the now empty basic block.
2497 EntryBB->removeSuccessor(&NewEntryBB);
2498 MF->remove(EntryBB);
2499 MF->DeleteMachineBasicBlock(EntryBB);
2501 assert(&MF->front() == &NewEntryBB &&
2502 "New entry wasn't next in the list of basic block!");
2504 // Initialize stack protector information.
2505 StackProtector &SP = getAnalysis<StackProtector>();
2506 SP.copyToMachineFrameInfo(MF->getFrameInfo());