1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 #include "llvm/ADT/PostOrderIterator.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
22 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
23 #include "llvm/CodeGen/LowLevelType.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/StackProtector.h"
32 #include "llvm/CodeGen/TargetFrameLowering.h"
33 #include "llvm/CodeGen/TargetLowering.h"
34 #include "llvm/CodeGen/TargetPassConfig.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/TargetSubtargetInfo.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfo.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/InlineAsm.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/LowLevelTypeImpl.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Target/TargetIntrinsicInfo.h"
66 #include "llvm/Target/TargetMachine.h"
75 #define DEBUG_TYPE "irtranslator"
80 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
81 cl::desc("Should enable CSE in irtranslator"),
82 cl::Optional, cl::init(false));
83 char IRTranslator::ID = 0;
85 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
87 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
88 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
89 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
92 static void reportTranslationError(MachineFunction &MF,
93 const TargetPassConfig &TPC,
94 OptimizationRemarkEmitter &ORE,
95 OptimizationRemarkMissed &R) {
96 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
98 // Print the function name explicitly if we don't have a debug location (which
99 // makes the diagnostic less useful) or if we're going to emit a raw error.
100 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
101 R << (" (in function: " + MF.getName() + ")").str();
103 if (TPC.isGlobalISelAbortEnabled())
104 report_fatal_error(R.getMsg());
109 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
110 initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118 const Instruction *CurrInst = nullptr;
121 DILocationVerifier() = default;
122 ~DILocationVerifier() = default;
124 const Instruction *getCurrentInst() const { return CurrInst; }
125 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
127 void erasingInstr(MachineInstr &MI) override {}
128 void changingInstr(MachineInstr &MI) override {}
129 void changedInstr(MachineInstr &MI) override {}
131 void createdInstr(MachineInstr &MI) override {
132 assert(getCurrentInst() && "Inserted instruction without a current MI");
134 // Only print the check message if we're actually checking it.
136 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137 << " was copied to " << MI);
139 assert(CurrInst->getDebugLoc() == MI.getDebugLoc() &&
140 "Line info was not transferred to all instructions");
144 #endif // ifndef NDEBUG
147 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
148 AU.addRequired<StackProtector>();
149 AU.addRequired<TargetPassConfig>();
150 AU.addRequired<GISelCSEAnalysisWrapperPass>();
151 getSelectionDAGFallbackAnalysisUsage(AU);
152 MachineFunctionPass::getAnalysisUsage(AU);
155 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
156 SmallVectorImpl<LLT> &ValueTys,
157 SmallVectorImpl<uint64_t> *Offsets = nullptr,
158 uint64_t StartingOffset = 0) {
159 // Given a struct type, recursively traverse the elements.
160 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
161 const StructLayout *SL = DL.getStructLayout(STy);
162 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
163 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
164 StartingOffset + SL->getElementOffset(I));
167 // Given an array type, recursively traverse the elements.
168 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
169 Type *EltTy = ATy->getElementType();
170 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
171 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
172 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
173 StartingOffset + i * EltSize);
176 // Interpret void as zero return values.
179 // Base case: we can get an LLT for this LLVM IR type.
180 ValueTys.push_back(getLLTForType(Ty, DL));
181 if (Offsets != nullptr)
182 Offsets->push_back(StartingOffset * 8);
185 IRTranslator::ValueToVRegInfo::VRegListT &
186 IRTranslator::allocateVRegs(const Value &Val) {
187 assert(!VMap.contains(Val) && "Value already allocated in VMap");
188 auto *Regs = VMap.getVRegs(Val);
189 auto *Offsets = VMap.getOffsets(Val);
190 SmallVector<LLT, 4> SplitTys;
191 computeValueLLTs(*DL, *Val.getType(), SplitTys,
192 Offsets->empty() ? Offsets : nullptr);
193 for (unsigned i = 0; i < SplitTys.size(); ++i)
198 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
199 auto VRegsIt = VMap.findVRegs(Val);
200 if (VRegsIt != VMap.vregs_end())
201 return *VRegsIt->second;
203 if (Val.getType()->isVoidTy())
204 return *VMap.getVRegs(Val);
206 // Create entry for this type.
207 auto *VRegs = VMap.getVRegs(Val);
208 auto *Offsets = VMap.getOffsets(Val);
210 assert(Val.getType()->isSized() &&
211 "Don't know how to create an empty vreg");
213 SmallVector<LLT, 4> SplitTys;
214 computeValueLLTs(*DL, *Val.getType(), SplitTys,
215 Offsets->empty() ? Offsets : nullptr);
217 if (!isa<Constant>(Val)) {
218 for (auto Ty : SplitTys)
219 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
223 if (Val.getType()->isAggregateType()) {
224 // UndefValue, ConstantAggregateZero
225 auto &C = cast<Constant>(Val);
227 while (auto Elt = C.getAggregateElement(Idx++)) {
228 auto EltRegs = getOrCreateVRegs(*Elt);
229 llvm::copy(EltRegs, std::back_inserter(*VRegs));
232 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
233 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
234 bool Success = translate(cast<Constant>(Val), VRegs->front());
236 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
237 MF->getFunction().getSubprogram(),
238 &MF->getFunction().getEntryBlock());
239 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
240 reportTranslationError(*MF, *TPC, *ORE, R);
248 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
249 if (FrameIndices.find(&AI) != FrameIndices.end())
250 return FrameIndices[&AI];
252 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
254 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
256 // Always allocate at least one byte.
257 Size = std::max(Size, 1u);
259 unsigned Alignment = AI.getAlignment();
261 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
263 int &FI = FrameIndices[&AI];
264 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
268 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
269 unsigned Alignment = 0;
270 Type *ValTy = nullptr;
271 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
272 Alignment = SI->getAlignment();
273 ValTy = SI->getValueOperand()->getType();
274 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
275 Alignment = LI->getAlignment();
276 ValTy = LI->getType();
277 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
278 // TODO(PR27168): This instruction has no alignment attribute, but unlike
279 // the default alignment for load/store, the default here is to assume
280 // it has NATURAL alignment, not DataLayout-specified alignment.
281 const DataLayout &DL = AI->getModule()->getDataLayout();
282 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
283 ValTy = AI->getCompareOperand()->getType();
284 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
285 // TODO(PR27168): This instruction has no alignment attribute, but unlike
286 // the default alignment for load/store, the default here is to assume
287 // it has NATURAL alignment, not DataLayout-specified alignment.
288 const DataLayout &DL = AI->getModule()->getDataLayout();
289 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
290 ValTy = AI->getType();
292 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
293 R << "unable to translate memop: " << ore::NV("Opcode", &I);
294 reportTranslationError(*MF, *TPC, *ORE, R);
298 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
301 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
302 MachineBasicBlock *&MBB = BBToMBB[&BB];
303 assert(MBB && "BasicBlock was not encountered before");
307 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
308 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
309 MachinePreds[Edge].push_back(NewPred);
312 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
313 MachineIRBuilder &MIRBuilder) {
314 // FIXME: handle signed/unsigned wrapping flags.
316 // Get or create a virtual register for each value.
317 // Unless the value is a Constant => loadimm cst?
318 // or inline constant each time?
319 // Creation of a virtual register needs to have a size.
320 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
321 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
322 unsigned Res = getOrCreateVReg(U);
323 auto FBinOp = MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
324 if (isa<Instruction>(U)) {
325 MachineInstr *FBinOpMI = FBinOp.getInstr();
326 const Instruction &I = cast<Instruction>(U);
327 FBinOpMI->copyIRFlags(I);
332 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
333 // -0.0 - X --> G_FNEG
334 if (isa<Constant>(U.getOperand(0)) &&
335 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
336 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
337 .addDef(getOrCreateVReg(U))
338 .addUse(getOrCreateVReg(*U.getOperand(1)));
341 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
344 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
345 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
346 .addDef(getOrCreateVReg(U))
347 .addUse(getOrCreateVReg(*U.getOperand(1)));
351 bool IRTranslator::translateCompare(const User &U,
352 MachineIRBuilder &MIRBuilder) {
353 const CmpInst *CI = dyn_cast<CmpInst>(&U);
354 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
355 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
356 unsigned Res = getOrCreateVReg(U);
357 CmpInst::Predicate Pred =
358 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
359 cast<ConstantExpr>(U).getPredicate());
360 if (CmpInst::isIntPredicate(Pred))
361 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
362 else if (Pred == CmpInst::FCMP_FALSE)
363 MIRBuilder.buildCopy(
364 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
365 else if (Pred == CmpInst::FCMP_TRUE)
366 MIRBuilder.buildCopy(
367 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
369 auto FCmp = MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
370 FCmp->copyIRFlags(*CI);
376 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
377 const ReturnInst &RI = cast<ReturnInst>(U);
378 const Value *Ret = RI.getReturnValue();
379 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
382 ArrayRef<unsigned> VRegs;
384 VRegs = getOrCreateVRegs(*Ret);
386 // The target may mess up with the insertion point, but
387 // this is not important as a return is the last instruction
388 // of the block anyway.
390 return CLI->lowerReturn(MIRBuilder, Ret, VRegs);
393 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
394 const BranchInst &BrInst = cast<BranchInst>(U);
396 if (!BrInst.isUnconditional()) {
397 // We want a G_BRCOND to the true BB followed by an unconditional branch.
398 unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
399 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
400 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
401 MIRBuilder.buildBrCond(Tst, TrueBB);
404 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
405 MachineBasicBlock &TgtBB = getMBB(BrTgt);
406 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
408 // If the unconditional target is the layout successor, fallthrough.
409 if (!CurBB.isLayoutSuccessor(&TgtBB))
410 MIRBuilder.buildBr(TgtBB);
413 for (const BasicBlock *Succ : successors(&BrInst))
414 CurBB.addSuccessor(&getMBB(*Succ));
418 bool IRTranslator::translateSwitch(const User &U,
419 MachineIRBuilder &MIRBuilder) {
420 // For now, just translate as a chain of conditional branches.
421 // FIXME: could we share most of the logic/code in
422 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
423 // At first sight, it seems most of the logic in there is independent of
424 // SelectionDAG-specifics and a lot of work went in to optimize switch
425 // lowering in there.
427 const SwitchInst &SwInst = cast<SwitchInst>(U);
428 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
429 const BasicBlock *OrigBB = SwInst.getParent();
431 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
432 for (auto &CaseIt : SwInst.cases()) {
433 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
434 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
435 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
436 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
437 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
438 MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
440 MIRBuilder.buildBrCond(Tst, TrueMBB);
441 CurMBB.addSuccessor(&TrueMBB);
442 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
444 MachineBasicBlock *FalseMBB =
445 MF->CreateMachineBasicBlock(SwInst.getParent());
446 // Insert the comparison blocks one after the other.
447 MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
448 MIRBuilder.buildBr(*FalseMBB);
449 CurMBB.addSuccessor(FalseMBB);
451 MIRBuilder.setMBB(*FalseMBB);
453 // handle default case
454 const BasicBlock *DefaultBB = SwInst.getDefaultDest();
455 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
456 MIRBuilder.buildBr(DefaultMBB);
457 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
458 CurMBB.addSuccessor(&DefaultMBB);
459 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
464 bool IRTranslator::translateIndirectBr(const User &U,
465 MachineIRBuilder &MIRBuilder) {
466 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
468 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
469 MIRBuilder.buildBrIndirect(Tgt);
472 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
473 for (const BasicBlock *Succ : successors(&BrInst))
474 CurBB.addSuccessor(&getMBB(*Succ));
479 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
480 const LoadInst &LI = cast<LoadInst>(U);
482 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
483 : MachineMemOperand::MONone;
484 Flags |= MachineMemOperand::MOLoad;
486 if (DL->getTypeStoreSize(LI.getType()) == 0)
489 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
490 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
491 unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
493 for (unsigned i = 0; i < Regs.size(); ++i) {
495 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
497 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
498 unsigned BaseAlign = getMemOpAlignment(LI);
499 auto MMO = MF->getMachineMemOperand(
500 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
501 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
502 LI.getSyncScopeID(), LI.getOrdering());
503 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
509 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
510 const StoreInst &SI = cast<StoreInst>(U);
511 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
512 : MachineMemOperand::MONone;
513 Flags |= MachineMemOperand::MOStore;
515 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
518 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
519 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
520 unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
522 for (unsigned i = 0; i < Vals.size(); ++i) {
524 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
526 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
527 unsigned BaseAlign = getMemOpAlignment(SI);
528 auto MMO = MF->getMachineMemOperand(
529 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
530 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
531 SI.getSyncScopeID(), SI.getOrdering());
532 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
537 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
538 const Value *Src = U.getOperand(0);
539 Type *Int32Ty = Type::getInt32Ty(U.getContext());
541 // getIndexedOffsetInType is designed for GEPs, so the first index is the
542 // usual array element rather than looking into the actual aggregate.
543 SmallVector<Value *, 1> Indices;
544 Indices.push_back(ConstantInt::get(Int32Ty, 0));
546 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
547 for (auto Idx : EVI->indices())
548 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
549 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
550 for (auto Idx : IVI->indices())
551 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
553 for (unsigned i = 1; i < U.getNumOperands(); ++i)
554 Indices.push_back(U.getOperand(i));
557 return 8 * static_cast<uint64_t>(
558 DL.getIndexedOffsetInType(Src->getType(), Indices));
561 bool IRTranslator::translateExtractValue(const User &U,
562 MachineIRBuilder &MIRBuilder) {
563 const Value *Src = U.getOperand(0);
564 uint64_t Offset = getOffsetFromIndices(U, *DL);
565 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
566 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
567 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
569 auto &DstRegs = allocateVRegs(U);
571 for (unsigned i = 0; i < DstRegs.size(); ++i)
572 DstRegs[i] = SrcRegs[Idx++];
577 bool IRTranslator::translateInsertValue(const User &U,
578 MachineIRBuilder &MIRBuilder) {
579 const Value *Src = U.getOperand(0);
580 uint64_t Offset = getOffsetFromIndices(U, *DL);
581 auto &DstRegs = allocateVRegs(U);
582 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
583 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
584 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
585 auto InsertedIt = InsertedRegs.begin();
587 for (unsigned i = 0; i < DstRegs.size(); ++i) {
588 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
589 DstRegs[i] = *InsertedIt++;
591 DstRegs[i] = SrcRegs[i];
597 bool IRTranslator::translateSelect(const User &U,
598 MachineIRBuilder &MIRBuilder) {
599 unsigned Tst = getOrCreateVReg(*U.getOperand(0));
600 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
601 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
602 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
604 const SelectInst &SI = cast<SelectInst>(U);
605 const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition());
606 for (unsigned i = 0; i < ResRegs.size(); ++i) {
608 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
609 if (Cmp && isa<FPMathOperator>(Cmp)) {
610 Select->copyIRFlags(*Cmp);
617 bool IRTranslator::translateBitCast(const User &U,
618 MachineIRBuilder &MIRBuilder) {
619 // If we're bitcasting to the source type, we can reuse the source vreg.
620 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
621 getLLTForType(*U.getType(), *DL)) {
622 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
623 auto &Regs = *VMap.getVRegs(U);
624 // If we already assigned a vreg for this bitcast, we can't change that.
625 // Emit a copy to satisfy the users we already emitted.
627 MIRBuilder.buildCopy(Regs[0], SrcReg);
629 Regs.push_back(SrcReg);
630 VMap.getOffsets(U)->push_back(0);
634 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
637 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
638 MachineIRBuilder &MIRBuilder) {
639 unsigned Op = getOrCreateVReg(*U.getOperand(0));
640 unsigned Res = getOrCreateVReg(U);
641 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
645 bool IRTranslator::translateGetElementPtr(const User &U,
646 MachineIRBuilder &MIRBuilder) {
647 // FIXME: support vector GEPs.
648 if (U.getType()->isVectorTy())
651 Value &Op0 = *U.getOperand(0);
652 unsigned BaseReg = getOrCreateVReg(Op0);
653 Type *PtrIRTy = Op0.getType();
654 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
655 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
656 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
659 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
661 const Value *Idx = GTI.getOperand();
662 if (StructType *StTy = GTI.getStructTypeOrNull()) {
663 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
664 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
667 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
669 // If this is a scalar constant or a splat vector of constants,
670 // handle it quickly.
671 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
672 Offset += ElementSize * CI->getSExtValue();
677 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
679 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
680 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
682 BaseReg = NewBaseReg;
686 unsigned IdxReg = getOrCreateVReg(*Idx);
687 if (MRI->getType(IdxReg) != OffsetTy) {
688 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
689 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
693 // N = N + Idx * ElementSize;
694 // Avoid doing it for ElementSize of 1.
695 unsigned GepOffsetReg;
696 if (ElementSize != 1) {
697 unsigned ElementSizeReg =
698 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
700 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
701 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
703 GepOffsetReg = IdxReg;
705 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
706 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
707 BaseReg = NewBaseReg;
712 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
713 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
717 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
721 bool IRTranslator::translateMemfunc(const CallInst &CI,
722 MachineIRBuilder &MIRBuilder,
724 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
725 Type *DstTy = CI.getArgOperand(0)->getType();
726 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
727 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
730 SmallVector<CallLowering::ArgInfo, 8> Args;
731 for (int i = 0; i < 3; ++i) {
732 const auto &Arg = CI.getArgOperand(i);
733 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
738 case Intrinsic::memmove:
739 case Intrinsic::memcpy: {
740 Type *SrcTy = CI.getArgOperand(1)->getType();
741 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
743 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
746 case Intrinsic::memset:
753 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
754 MachineOperand::CreateES(Callee),
755 CallLowering::ArgInfo(0, CI.getType()), Args);
758 void IRTranslator::getStackGuard(unsigned DstReg,
759 MachineIRBuilder &MIRBuilder) {
760 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
761 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
762 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
765 auto &TLI = *MF->getSubtarget().getTargetLowering();
766 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
770 MachinePointerInfo MPInfo(Global);
771 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
772 MachineMemOperand::MODereferenceable;
773 MachineMemOperand *MemRef =
774 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
775 DL->getPointerABIAlignment(0));
776 MIB.setMemRefs({MemRef});
779 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
780 MachineIRBuilder &MIRBuilder) {
781 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
782 MIRBuilder.buildInstr(Op)
785 .addUse(getOrCreateVReg(*CI.getOperand(0)))
786 .addUse(getOrCreateVReg(*CI.getOperand(1)));
791 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
792 MachineIRBuilder &MIRBuilder) {
796 case Intrinsic::lifetime_start:
797 case Intrinsic::lifetime_end:
798 // Stack coloring is not enabled in O0 (which we care about now) so we can
799 // drop these. Make sure someone notices when we start compiling at higher
801 if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
804 case Intrinsic::dbg_declare: {
805 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
806 assert(DI.getVariable() && "Missing variable");
808 const Value *Address = DI.getAddress();
809 if (!Address || isa<UndefValue>(Address)) {
810 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
814 assert(DI.getVariable()->isValidLocationForIntrinsic(
815 MIRBuilder.getDebugLoc()) &&
816 "Expected inlined-at fields to agree");
817 auto AI = dyn_cast<AllocaInst>(Address);
818 if (AI && AI->isStaticAlloca()) {
819 // Static allocas are tracked at the MF level, no need for DBG_VALUE
820 // instructions (in fact, they get ignored if they *do* exist).
821 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
822 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
824 // A dbg.declare describes the address of a source variable, so lower it
825 // into an indirect DBG_VALUE.
826 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
827 DI.getVariable(), DI.getExpression());
831 case Intrinsic::dbg_label: {
832 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
833 assert(DI.getLabel() && "Missing label");
835 assert(DI.getLabel()->isValidLocationForIntrinsic(
836 MIRBuilder.getDebugLoc()) &&
837 "Expected inlined-at fields to agree");
839 MIRBuilder.buildDbgLabel(DI.getLabel());
842 case Intrinsic::vaend:
843 // No target I know of cares about va_end. Certainly no in-tree target
844 // does. Simplest intrinsic ever!
846 case Intrinsic::vastart: {
847 auto &TLI = *MF->getSubtarget().getTargetLowering();
848 Value *Ptr = CI.getArgOperand(0);
849 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
851 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
852 .addUse(getOrCreateVReg(*Ptr))
853 .addMemOperand(MF->getMachineMemOperand(
854 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
857 case Intrinsic::dbg_value: {
858 // This form of DBG_VALUE is target-independent.
859 const DbgValueInst &DI = cast<DbgValueInst>(CI);
860 const Value *V = DI.getValue();
861 assert(DI.getVariable()->isValidLocationForIntrinsic(
862 MIRBuilder.getDebugLoc()) &&
863 "Expected inlined-at fields to agree");
865 // Currently the optimizer can produce this; insert an undef to
866 // help debugging. Probably the optimizer should not do this.
867 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
868 } else if (const auto *CI = dyn_cast<Constant>(V)) {
869 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
871 unsigned Reg = getOrCreateVReg(*V);
872 // FIXME: This does not handle register-indirect values at offset 0. The
873 // direct/indirect thing shouldn't really be handled by something as
874 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
875 // pretty baked in right now.
876 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
880 case Intrinsic::uadd_with_overflow:
881 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
882 case Intrinsic::sadd_with_overflow:
883 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
884 case Intrinsic::usub_with_overflow:
885 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
886 case Intrinsic::ssub_with_overflow:
887 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
888 case Intrinsic::umul_with_overflow:
889 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
890 case Intrinsic::smul_with_overflow:
891 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
892 case Intrinsic::pow: {
893 auto Pow = MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
894 .addDef(getOrCreateVReg(CI))
895 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
896 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
897 Pow->copyIRFlags(CI);
900 case Intrinsic::exp: {
901 auto Exp = MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
902 .addDef(getOrCreateVReg(CI))
903 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
904 Exp->copyIRFlags(CI);
907 case Intrinsic::exp2: {
908 auto Exp2 = MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
909 .addDef(getOrCreateVReg(CI))
910 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
911 Exp2->copyIRFlags(CI);
914 case Intrinsic::log: {
915 auto Log = MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
916 .addDef(getOrCreateVReg(CI))
917 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
918 Log->copyIRFlags(CI);
921 case Intrinsic::log2: {
922 auto Log2 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
923 .addDef(getOrCreateVReg(CI))
924 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
925 Log2->copyIRFlags(CI);
928 case Intrinsic::log10: {
929 auto Log10 = MIRBuilder.buildInstr(TargetOpcode::G_FLOG10)
930 .addDef(getOrCreateVReg(CI))
931 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
932 Log10->copyIRFlags(CI);
935 case Intrinsic::fabs: {
936 auto Fabs = MIRBuilder.buildInstr(TargetOpcode::G_FABS)
937 .addDef(getOrCreateVReg(CI))
938 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
939 Fabs->copyIRFlags(CI);
942 case Intrinsic::trunc:
943 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC)
944 .addDef(getOrCreateVReg(CI))
945 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
947 case Intrinsic::round:
948 MIRBuilder.buildInstr(TargetOpcode::G_INTRINSIC_ROUND)
949 .addDef(getOrCreateVReg(CI))
950 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
952 case Intrinsic::fma: {
953 auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA)
954 .addDef(getOrCreateVReg(CI))
955 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
956 .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
957 .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
958 FMA->copyIRFlags(CI);
961 case Intrinsic::fmuladd: {
962 const TargetMachine &TM = MF->getTarget();
963 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
964 unsigned Dst = getOrCreateVReg(CI);
965 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
966 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
967 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
968 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
969 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
970 // TODO: Revisit this to see if we should move this part of the
971 // lowering to the combiner.
972 auto FMA = MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2});
973 FMA->copyIRFlags(CI);
975 LLT Ty = getLLTForType(*CI.getType(), *DL);
976 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1});
977 FMul->copyIRFlags(CI);
978 auto FAdd = MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2});
979 FAdd->copyIRFlags(CI);
983 case Intrinsic::memcpy:
984 case Intrinsic::memmove:
985 case Intrinsic::memset:
986 return translateMemfunc(CI, MIRBuilder, ID);
987 case Intrinsic::eh_typeid_for: {
988 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
989 unsigned Reg = getOrCreateVReg(CI);
990 unsigned TypeID = MF->getTypeIDFor(GV);
991 MIRBuilder.buildConstant(Reg, TypeID);
994 case Intrinsic::objectsize: {
995 // If we don't know by now, we're never going to know.
996 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
998 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1001 case Intrinsic::is_constant:
1002 // If this wasn't constant-folded away by now, then it's not a
1004 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1006 case Intrinsic::stackguard:
1007 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1009 case Intrinsic::stackprotector: {
1010 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1011 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1012 getStackGuard(GuardVal, MIRBuilder);
1014 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1015 int FI = getOrCreateFrameIndex(*Slot);
1016 MF->getFrameInfo().setStackProtectorIndex(FI);
1018 MIRBuilder.buildStore(
1019 GuardVal, getOrCreateVReg(*Slot),
1020 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1021 MachineMemOperand::MOStore |
1022 MachineMemOperand::MOVolatile,
1023 PtrTy.getSizeInBits() / 8, 8));
1026 case Intrinsic::cttz:
1027 case Intrinsic::ctlz: {
1028 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1029 bool isTrailing = ID == Intrinsic::cttz;
1030 unsigned Opcode = isTrailing
1031 ? Cst->isZero() ? TargetOpcode::G_CTTZ
1032 : TargetOpcode::G_CTTZ_ZERO_UNDEF
1033 : Cst->isZero() ? TargetOpcode::G_CTLZ
1034 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1035 MIRBuilder.buildInstr(Opcode)
1036 .addDef(getOrCreateVReg(CI))
1037 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1040 case Intrinsic::ctpop: {
1041 MIRBuilder.buildInstr(TargetOpcode::G_CTPOP)
1042 .addDef(getOrCreateVReg(CI))
1043 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1046 case Intrinsic::invariant_start: {
1047 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1048 unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
1049 MIRBuilder.buildUndef(Undef);
1052 case Intrinsic::invariant_end:
1054 case Intrinsic::ceil:
1055 MIRBuilder.buildInstr(TargetOpcode::G_FCEIL)
1056 .addDef(getOrCreateVReg(CI))
1057 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1063 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1064 MachineIRBuilder &MIRBuilder) {
1065 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1066 if (!IA.getConstraintString().empty())
1069 unsigned ExtraInfo = 0;
1070 if (IA.hasSideEffects())
1071 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1072 if (IA.getDialect() == InlineAsm::AD_Intel)
1073 ExtraInfo |= InlineAsm::Extra_AsmDialect;
1075 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1076 .addExternalSymbol(IA.getAsmString().c_str())
1082 unsigned IRTranslator::packRegs(const Value &V,
1083 MachineIRBuilder &MIRBuilder) {
1084 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1085 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1086 LLT BigTy = getLLTForType(*V.getType(), *DL);
1088 if (Regs.size() == 1)
1091 unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
1092 MIRBuilder.buildUndef(Dst);
1093 for (unsigned i = 0; i < Regs.size(); ++i) {
1094 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
1095 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
1101 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
1102 MachineIRBuilder &MIRBuilder) {
1103 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1104 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
1106 for (unsigned i = 0; i < Regs.size(); ++i)
1107 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
1110 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1111 const CallInst &CI = cast<CallInst>(U);
1112 auto TII = MF->getTarget().getIntrinsicInfo();
1113 const Function *F = CI.getCalledFunction();
1115 // FIXME: support Windows dllimport function calls.
1116 if (F && F->hasDLLImportStorageClass())
1119 if (CI.isInlineAsm())
1120 return translateInlineAsm(CI, MIRBuilder);
1122 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1123 if (F && F->isIntrinsic()) {
1124 ID = F->getIntrinsicID();
1125 if (TII && ID == Intrinsic::not_intrinsic)
1126 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1129 bool IsSplitType = valueIsSplit(CI);
1130 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1131 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1132 getLLTForType(*CI.getType(), *DL))
1133 : getOrCreateVReg(CI);
1135 SmallVector<unsigned, 8> Args;
1136 for (auto &Arg: CI.arg_operands())
1137 Args.push_back(packRegs(*Arg, MIRBuilder));
1139 MF->getFrameInfo().setHasCalls(true);
1140 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
1141 return getOrCreateVReg(*CI.getCalledValue());
1145 unpackRegs(CI, Res, MIRBuilder);
1149 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1151 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1155 if (!CI.getType()->isVoidTy()) {
1158 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
1160 Res = getOrCreateVReg(CI);
1162 MachineInstrBuilder MIB =
1163 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1165 for (auto &Arg : CI.arg_operands()) {
1166 // Some intrinsics take metadata parameters. Reject them.
1167 if (isa<MetadataAsValue>(Arg))
1169 MIB.addUse(packRegs(*Arg, MIRBuilder));
1173 unpackRegs(CI, Res, MIRBuilder);
1175 // Add a MachineMemOperand if it is a target mem intrinsic.
1176 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1177 TargetLowering::IntrinsicInfo Info;
1178 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1179 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1180 uint64_t Size = Info.memVT.getStoreSize();
1181 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1182 Info.flags, Size, Info.align));
1188 bool IRTranslator::translateInvoke(const User &U,
1189 MachineIRBuilder &MIRBuilder) {
1190 const InvokeInst &I = cast<InvokeInst>(U);
1191 MCContext &Context = MF->getContext();
1193 const BasicBlock *ReturnBB = I.getSuccessor(0);
1194 const BasicBlock *EHPadBB = I.getSuccessor(1);
1196 const Value *Callee = I.getCalledValue();
1197 const Function *Fn = dyn_cast<Function>(Callee);
1198 if (isa<InlineAsm>(Callee))
1201 // FIXME: support invoking patchpoint and statepoint intrinsics.
1202 if (Fn && Fn->isIntrinsic())
1205 // FIXME: support whatever these are.
1206 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1209 // FIXME: support Windows exception handling.
1210 if (!isa<LandingPadInst>(EHPadBB->front()))
1213 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1214 // the region covered by the try.
1215 MCSymbol *BeginSymbol = Context.createTempSymbol();
1216 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1219 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1220 SmallVector<unsigned, 8> Args;
1221 for (auto &Arg: I.arg_operands())
1222 Args.push_back(packRegs(*Arg, MIRBuilder));
1224 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1225 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1228 unpackRegs(I, Res, MIRBuilder);
1230 MCSymbol *EndSymbol = Context.createTempSymbol();
1231 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1233 // FIXME: track probabilities.
1234 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1235 &ReturnMBB = getMBB(*ReturnBB);
1236 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1237 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1238 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1239 MIRBuilder.buildBr(ReturnMBB);
1244 bool IRTranslator::translateLandingPad(const User &U,
1245 MachineIRBuilder &MIRBuilder) {
1246 const LandingPadInst &LP = cast<LandingPadInst>(U);
1248 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1252 // If there aren't registers to copy the values into (e.g., during SjLj
1253 // exceptions), then don't bother.
1254 auto &TLI = *MF->getSubtarget().getTargetLowering();
1255 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1256 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1257 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1260 // If landingpad's return type is token type, we don't create DAG nodes
1261 // for its exception pointer and selector value. The extraction of exception
1262 // pointer or selector value from token type landingpads is not currently
1264 if (LP.getType()->isTokenTy())
1267 // Add a label to mark the beginning of the landing pad. Deletion of the
1268 // landing pad can thus be detected via the MachineModuleInfo.
1269 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1270 .addSym(MF->addLandingPad(&MBB));
1272 LLT Ty = getLLTForType(*LP.getType(), *DL);
1273 unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1274 MIRBuilder.buildUndef(Undef);
1276 SmallVector<LLT, 2> Tys;
1277 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1278 Tys.push_back(getLLTForType(*Ty, *DL));
1279 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1281 // Mark exception register as live in.
1282 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1286 MBB.addLiveIn(ExceptionReg);
1287 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1288 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1290 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1294 MBB.addLiveIn(SelectorReg);
1295 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1296 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1297 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1302 bool IRTranslator::translateAlloca(const User &U,
1303 MachineIRBuilder &MIRBuilder) {
1304 auto &AI = cast<AllocaInst>(U);
1306 if (AI.isSwiftError())
1309 if (AI.isStaticAlloca()) {
1310 unsigned Res = getOrCreateVReg(AI);
1311 int FI = getOrCreateFrameIndex(AI);
1312 MIRBuilder.buildFrameIndex(Res, FI);
1316 // FIXME: support stack probing for Windows.
1317 if (MF->getTarget().getTargetTriple().isOSWindows())
1320 // Now we're in the harder dynamic case.
1321 Type *Ty = AI.getAllocatedType();
1323 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1325 unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1327 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1328 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1329 if (MRI->getType(NumElts) != IntPtrTy) {
1330 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1331 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1335 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1337 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1338 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1340 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1341 auto &TLI = *MF->getSubtarget().getTargetLowering();
1342 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1344 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1345 MIRBuilder.buildCopy(SPTmp, SPReg);
1347 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1348 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1350 // Handle alignment. We have to realign if the allocation granule was smaller
1351 // than stack alignment, or the specific alloca requires more than stack
1353 unsigned StackAlign =
1354 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1355 Align = std::max(Align, StackAlign);
1356 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1357 // Round the size of the allocation up to the stack alignment size
1358 // by add SA-1 to the size. This doesn't overflow because we're computing
1359 // an address inside an alloca.
1360 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1361 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1362 AllocTmp = AlignedAlloc;
1365 MIRBuilder.buildCopy(SPReg, AllocTmp);
1366 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1368 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1369 assert(MF->getFrameInfo().hasVarSizedObjects());
1373 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1374 // FIXME: We may need more info about the type. Because of how LLT works,
1375 // we're completely discarding the i64/double distinction here (amongst
1376 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1377 // anyway but that's not guaranteed.
1378 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1379 .addDef(getOrCreateVReg(U))
1380 .addUse(getOrCreateVReg(*U.getOperand(0)))
1381 .addImm(DL->getABITypeAlignment(U.getType()));
1385 bool IRTranslator::translateInsertElement(const User &U,
1386 MachineIRBuilder &MIRBuilder) {
1387 // If it is a <1 x Ty> vector, use the scalar as it is
1388 // not a legal vector type in LLT.
1389 if (U.getType()->getVectorNumElements() == 1) {
1390 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1391 auto &Regs = *VMap.getVRegs(U);
1393 Regs.push_back(Elt);
1394 VMap.getOffsets(U)->push_back(0);
1396 MIRBuilder.buildCopy(Regs[0], Elt);
1401 unsigned Res = getOrCreateVReg(U);
1402 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1403 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1404 unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1405 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1409 bool IRTranslator::translateExtractElement(const User &U,
1410 MachineIRBuilder &MIRBuilder) {
1411 // If it is a <1 x Ty> vector, use the scalar as it is
1412 // not a legal vector type in LLT.
1413 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1414 unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1415 auto &Regs = *VMap.getVRegs(U);
1417 Regs.push_back(Elt);
1418 VMap.getOffsets(U)->push_back(0);
1420 MIRBuilder.buildCopy(Regs[0], Elt);
1424 unsigned Res = getOrCreateVReg(U);
1425 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1426 const auto &TLI = *MF->getSubtarget().getTargetLowering();
1427 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1429 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1430 if (CI->getBitWidth() != PreferredVecIdxWidth) {
1431 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1432 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1433 Idx = getOrCreateVReg(*NewIdxCI);
1437 Idx = getOrCreateVReg(*U.getOperand(1));
1438 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1439 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1440 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1442 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1446 bool IRTranslator::translateShuffleVector(const User &U,
1447 MachineIRBuilder &MIRBuilder) {
1448 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1449 .addDef(getOrCreateVReg(U))
1450 .addUse(getOrCreateVReg(*U.getOperand(0)))
1451 .addUse(getOrCreateVReg(*U.getOperand(1)))
1452 .addUse(getOrCreateVReg(*U.getOperand(2)));
1456 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1457 const PHINode &PI = cast<PHINode>(U);
1459 SmallVector<MachineInstr *, 4> Insts;
1460 for (auto Reg : getOrCreateVRegs(PI)) {
1461 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1462 Insts.push_back(MIB.getInstr());
1465 PendingPHIs.emplace_back(&PI, std::move(Insts));
1469 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1470 MachineIRBuilder &MIRBuilder) {
1471 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1476 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1477 : MachineMemOperand::MONone;
1478 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1480 Type *ResType = I.getType();
1481 Type *ValType = ResType->Type::getStructElementType(0);
1483 auto Res = getOrCreateVRegs(I);
1484 unsigned OldValRes = Res[0];
1485 unsigned SuccessRes = Res[1];
1486 unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1487 unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
1488 unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
1490 MIRBuilder.buildAtomicCmpXchgWithSuccess(
1491 OldValRes, SuccessRes, Addr, Cmp, NewVal,
1492 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1493 Flags, DL->getTypeStoreSize(ValType),
1494 getMemOpAlignment(I), AAMDNodes(), nullptr,
1495 I.getSyncScopeID(), I.getSuccessOrdering(),
1496 I.getFailureOrdering()));
1500 bool IRTranslator::translateAtomicRMW(const User &U,
1501 MachineIRBuilder &MIRBuilder) {
1502 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1504 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1505 : MachineMemOperand::MONone;
1506 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1508 Type *ResType = I.getType();
1510 unsigned Res = getOrCreateVReg(I);
1511 unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
1512 unsigned Val = getOrCreateVReg(*I.getValOperand());
1514 unsigned Opcode = 0;
1515 switch (I.getOperation()) {
1517 llvm_unreachable("Unknown atomicrmw op");
1519 case AtomicRMWInst::Xchg:
1520 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1522 case AtomicRMWInst::Add:
1523 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1525 case AtomicRMWInst::Sub:
1526 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1528 case AtomicRMWInst::And:
1529 Opcode = TargetOpcode::G_ATOMICRMW_AND;
1531 case AtomicRMWInst::Nand:
1532 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1534 case AtomicRMWInst::Or:
1535 Opcode = TargetOpcode::G_ATOMICRMW_OR;
1537 case AtomicRMWInst::Xor:
1538 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
1540 case AtomicRMWInst::Max:
1541 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
1543 case AtomicRMWInst::Min:
1544 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
1546 case AtomicRMWInst::UMax:
1547 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
1549 case AtomicRMWInst::UMin:
1550 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
1554 MIRBuilder.buildAtomicRMW(
1555 Opcode, Res, Addr, Val,
1556 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1557 Flags, DL->getTypeStoreSize(ResType),
1558 getMemOpAlignment(I), AAMDNodes(), nullptr,
1559 I.getSyncScopeID(), I.getOrdering()));
1563 void IRTranslator::finishPendingPhis() {
1565 DILocationVerifier Verifier;
1566 GISelObserverWrapper WrapperObserver(&Verifier);
1567 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1568 #endif // ifndef NDEBUG
1569 for (auto &Phi : PendingPHIs) {
1570 const PHINode *PI = Phi.first;
1571 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1572 EntryBuilder->setDebugLoc(PI->getDebugLoc());
1574 Verifier.setCurrentInst(PI);
1575 #endif // ifndef NDEBUG
1577 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1578 // won't create extra control flow here, otherwise we need to find the
1579 // dominating predecessor here (or perhaps force the weirder IRTranslators
1580 // to provide a simple boundary).
1581 SmallSet<const BasicBlock *, 4> HandledPreds;
1583 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1584 auto IRPred = PI->getIncomingBlock(i);
1585 if (HandledPreds.count(IRPred))
1588 HandledPreds.insert(IRPred);
1589 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1590 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1591 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1592 "incorrect CFG at MachineBasicBlock level");
1593 for (unsigned j = 0; j < ValRegs.size(); ++j) {
1594 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1595 MIB.addUse(ValRegs[j]);
1603 bool IRTranslator::valueIsSplit(const Value &V,
1604 SmallVectorImpl<uint64_t> *Offsets) {
1605 SmallVector<LLT, 4> SplitTys;
1606 if (Offsets && !Offsets->empty())
1608 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1609 return SplitTys.size() > 1;
1612 bool IRTranslator::translate(const Instruction &Inst) {
1613 CurBuilder->setDebugLoc(Inst.getDebugLoc());
1614 EntryBuilder->setDebugLoc(Inst.getDebugLoc());
1615 switch(Inst.getOpcode()) {
1616 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1617 case Instruction::OPCODE: \
1618 return translate##OPCODE(Inst, *CurBuilder.get());
1619 #include "llvm/IR/Instruction.def"
1625 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1626 if (auto CI = dyn_cast<ConstantInt>(&C))
1627 EntryBuilder->buildConstant(Reg, *CI);
1628 else if (auto CF = dyn_cast<ConstantFP>(&C))
1629 EntryBuilder->buildFConstant(Reg, *CF);
1630 else if (isa<UndefValue>(C))
1631 EntryBuilder->buildUndef(Reg);
1632 else if (isa<ConstantPointerNull>(C)) {
1633 // As we are trying to build a constant val of 0 into a pointer,
1634 // insert a cast to make them correct with respect to types.
1635 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1636 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1637 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1638 unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1639 EntryBuilder->buildCast(Reg, ZeroReg);
1640 } else if (auto GV = dyn_cast<GlobalValue>(&C))
1641 EntryBuilder->buildGlobalValue(Reg, GV);
1642 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1643 if (!CAZ->getType()->isVectorTy())
1645 // Return the scalar if it is a <1 x Ty> vector.
1646 if (CAZ->getNumElements() == 1)
1647 return translate(*CAZ->getElementValue(0u), Reg);
1648 SmallVector<unsigned, 4> Ops;
1649 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1650 Constant &Elt = *CAZ->getElementValue(i);
1651 Ops.push_back(getOrCreateVReg(Elt));
1653 EntryBuilder->buildBuildVector(Reg, Ops);
1654 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1655 // Return the scalar if it is a <1 x Ty> vector.
1656 if (CV->getNumElements() == 1)
1657 return translate(*CV->getElementAsConstant(0), Reg);
1658 SmallVector<unsigned, 4> Ops;
1659 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1660 Constant &Elt = *CV->getElementAsConstant(i);
1661 Ops.push_back(getOrCreateVReg(Elt));
1663 EntryBuilder->buildBuildVector(Reg, Ops);
1664 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1665 switch(CE->getOpcode()) {
1666 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1667 case Instruction::OPCODE: \
1668 return translate##OPCODE(*CE, *EntryBuilder.get());
1669 #include "llvm/IR/Instruction.def"
1673 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1674 if (CV->getNumOperands() == 1)
1675 return translate(*CV->getOperand(0), Reg);
1676 SmallVector<unsigned, 4> Ops;
1677 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1678 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1680 EntryBuilder->buildBuildVector(Reg, Ops);
1681 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
1682 EntryBuilder->buildBlockAddress(Reg, BA);
1689 void IRTranslator::finalizeFunction() {
1690 // Release the memory used by the different maps we
1691 // needed during the translation.
1692 PendingPHIs.clear();
1694 FrameIndices.clear();
1695 MachinePreds.clear();
1696 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1697 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1698 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1699 EntryBuilder.reset();
1703 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1705 const Function &F = MF->getFunction();
1708 GISelCSEAnalysisWrapper &Wrapper =
1709 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
1710 // Set the CSEConfig and run the analysis.
1711 GISelCSEInfo *CSEInfo = nullptr;
1712 TPC = &getAnalysis<TargetPassConfig>();
1713 bool IsO0 = TPC->getOptLevel() == CodeGenOpt::Level::None;
1714 // Disable CSE for O0.
1715 bool EnableCSE = !IsO0 && EnableCSEInIRTranslator;
1717 EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
1718 std::unique_ptr<CSEConfig> Config = make_unique<CSEConfig>();
1719 CSEInfo = &Wrapper.get(std::move(Config));
1720 EntryBuilder->setCSEInfo(CSEInfo);
1721 CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
1722 CurBuilder->setCSEInfo(CSEInfo);
1724 EntryBuilder = make_unique<MachineIRBuilder>();
1725 CurBuilder = make_unique<MachineIRBuilder>();
1727 CLI = MF->getSubtarget().getCallLowering();
1728 CurBuilder->setMF(*MF);
1729 EntryBuilder->setMF(*MF);
1730 MRI = &MF->getRegInfo();
1731 DL = &F.getParent()->getDataLayout();
1732 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1734 assert(PendingPHIs.empty() && "stale PHIs");
1736 if (!DL->isLittleEndian()) {
1737 // Currently we don't properly handle big endian code.
1738 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1739 F.getSubprogram(), &F.getEntryBlock());
1740 R << "unable to translate in big endian mode";
1741 reportTranslationError(*MF, *TPC, *ORE, R);
1744 // Release the per-function state when we return, whether we succeeded or not.
1745 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1747 // Setup a separate basic-block for the arguments and constants
1748 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1749 MF->push_back(EntryBB);
1750 EntryBuilder->setMBB(*EntryBB);
1752 // Create all blocks, in IR order, to preserve the layout.
1753 for (const BasicBlock &BB: F) {
1754 auto *&MBB = BBToMBB[&BB];
1756 MBB = MF->CreateMachineBasicBlock(&BB);
1759 if (BB.hasAddressTaken())
1760 MBB->setHasAddressTaken();
1763 // Make our arguments/constants entry block fallthrough to the IR entry block.
1764 EntryBB->addSuccessor(&getMBB(F.front()));
1766 // Lower the actual args into this basic block.
1767 SmallVector<unsigned, 8> VRegArgs;
1768 for (const Argument &Arg: F.args()) {
1769 if (DL->getTypeStoreSize(Arg.getType()) == 0)
1770 continue; // Don't handle zero sized types.
1772 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1775 // We don't currently support translating swifterror or swiftself functions.
1776 for (auto &Arg : F.args()) {
1777 if (Arg.hasSwiftErrorAttr() || Arg.hasSwiftSelfAttr()) {
1778 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1779 F.getSubprogram(), &F.getEntryBlock());
1780 R << "unable to lower arguments due to swifterror/swiftself: "
1781 << ore::NV("Prototype", F.getType());
1782 reportTranslationError(*MF, *TPC, *ORE, R);
1787 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
1788 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1789 F.getSubprogram(), &F.getEntryBlock());
1790 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1791 reportTranslationError(*MF, *TPC, *ORE, R);
1795 auto ArgIt = F.arg_begin();
1796 for (auto &VArg : VRegArgs) {
1797 // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1798 // creating redundant copies.
1799 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1800 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1801 assert(VRegs.empty() && "VRegs already populated?");
1802 VRegs.push_back(VArg);
1804 unpackRegs(*ArgIt, VArg, *EntryBuilder.get());
1809 // Need to visit defs before uses when translating instructions.
1810 GISelObserverWrapper WrapperObserver;
1811 if (EnableCSE && CSEInfo)
1812 WrapperObserver.addObserver(CSEInfo);
1814 ReversePostOrderTraversal<const Function *> RPOT(&F);
1816 DILocationVerifier Verifier;
1817 WrapperObserver.addObserver(&Verifier);
1818 #endif // ifndef NDEBUG
1819 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
1820 for (const BasicBlock *BB : RPOT) {
1821 MachineBasicBlock &MBB = getMBB(*BB);
1822 // Set the insertion point of all the following translations to
1823 // the end of this basic block.
1824 CurBuilder->setMBB(MBB);
1826 for (const Instruction &Inst : *BB) {
1828 Verifier.setCurrentInst(&Inst);
1829 #endif // ifndef NDEBUG
1830 if (translate(Inst))
1833 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1834 Inst.getDebugLoc(), BB);
1835 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1837 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1838 std::string InstStrStorage;
1839 raw_string_ostream InstStr(InstStrStorage);
1842 R << ": '" << InstStr.str() << "'";
1845 reportTranslationError(*MF, *TPC, *ORE, R);
1850 WrapperObserver.removeObserver(&Verifier);
1854 finishPendingPhis();
1856 // Merge the argument lowering and constants block with its single
1857 // successor, the LLVM-IR entry block. We want the basic block to
1859 assert(EntryBB->succ_size() == 1 &&
1860 "Custom BB used for lowering should have only one successor");
1861 // Get the successor of the current entry block.
1862 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1863 assert(NewEntryBB.pred_size() == 1 &&
1864 "LLVM-IR entry block has a predecessor!?");
1865 // Move all the instruction from the current entry block to the
1867 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1870 // Update the live-in information for the new entry block.
1871 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1872 NewEntryBB.addLiveIn(LiveIn);
1873 NewEntryBB.sortUniqueLiveIns();
1875 // Get rid of the now empty basic block.
1876 EntryBB->removeSuccessor(&NewEntryBB);
1877 MF->remove(EntryBB);
1878 MF->DeleteMachineBasicBlock(EntryBB);
1880 assert(&MF->front() == &NewEntryBB &&
1881 "New entry wasn't next in the list of basic block!");
1883 // Initialize stack protector information.
1884 StackProtector &SP = getAnalysis<StackProtector>();
1885 SP.copyToMachineFrameInfo(MF->getFrameInfo());