1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
51 #include "llvm/Analysis/BranchProbabilityInfo.h"
52 #include "llvm/Analysis/TargetLibraryInfo.h"
53 #include "llvm/CodeGen/Analysis.h"
54 #include "llvm/CodeGen/FunctionLoweringInfo.h"
55 #include "llvm/CodeGen/ISDOpcodes.h"
56 #include "llvm/CodeGen/MachineBasicBlock.h"
57 #include "llvm/CodeGen/MachineFrameInfo.h"
58 #include "llvm/CodeGen/MachineInstr.h"
59 #include "llvm/CodeGen/MachineInstrBuilder.h"
60 #include "llvm/CodeGen/MachineMemOperand.h"
61 #include "llvm/CodeGen/MachineModuleInfo.h"
62 #include "llvm/CodeGen/MachineOperand.h"
63 #include "llvm/CodeGen/MachineRegisterInfo.h"
64 #include "llvm/CodeGen/MachineValueType.h"
65 #include "llvm/CodeGen/StackMaps.h"
66 #include "llvm/CodeGen/TargetInstrInfo.h"
67 #include "llvm/CodeGen/TargetLowering.h"
68 #include "llvm/CodeGen/TargetSubtargetInfo.h"
69 #include "llvm/CodeGen/ValueTypes.h"
70 #include "llvm/IR/Argument.h"
71 #include "llvm/IR/Attributes.h"
72 #include "llvm/IR/BasicBlock.h"
73 #include "llvm/IR/CallSite.h"
74 #include "llvm/IR/CallingConv.h"
75 #include "llvm/IR/Constant.h"
76 #include "llvm/IR/Constants.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/DebugInfo.h"
79 #include "llvm/IR/DebugLoc.h"
80 #include "llvm/IR/DerivedTypes.h"
81 #include "llvm/IR/Function.h"
82 #include "llvm/IR/GetElementPtrTypeIterator.h"
83 #include "llvm/IR/GlobalValue.h"
84 #include "llvm/IR/InlineAsm.h"
85 #include "llvm/IR/InstrTypes.h"
86 #include "llvm/IR/Instruction.h"
87 #include "llvm/IR/Instructions.h"
88 #include "llvm/IR/IntrinsicInst.h"
89 #include "llvm/IR/LLVMContext.h"
90 #include "llvm/IR/Mangler.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Operator.h"
93 #include "llvm/IR/Type.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/MC/MCContext.h"
97 #include "llvm/MC/MCInstrDesc.h"
98 #include "llvm/MC/MCRegisterInfo.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/Debug.h"
101 #include "llvm/Support/ErrorHandling.h"
102 #include "llvm/Support/MathExtras.h"
103 #include "llvm/Support/raw_ostream.h"
104 #include "llvm/Target/TargetMachine.h"
105 #include "llvm/Target/TargetOptions.h"
112 using namespace llvm;
114 #define DEBUG_TYPE "isel"
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117 "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119 "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
122 /// Set the current block to which generated machine instructions will be
123 /// appended, and clear the local CSE map.
124 void FastISel::startNewBlock() {
125 LocalValueMap.clear();
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
132 EmitStartPt = &FuncInfo.MBB->back();
133 LastLocalValue = EmitStartPt;
136 bool FastISel::lowerArguments() {
137 if (!FuncInfo.CanLowerReturn)
138 // Fallback to SDISel argument lowering code to deal with sret pointer
142 if (!fastLowerArguments())
145 // Enter arguments into ValueMap for uses in non-entry BBs.
146 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
147 E = FuncInfo.Fn->arg_end();
149 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
150 assert(VI != LocalValueMap.end() && "Missed an argument?");
151 FuncInfo.ValueMap[&*I] = VI->second;
156 void FastISel::flushLocalValueMap() {
157 LocalValueMap.clear();
158 LastLocalValue = EmitStartPt;
160 SavedInsertPt = FuncInfo.InsertPt;
163 bool FastISel::hasTrivialKill(const Value *V) {
164 // Don't consider constants or arguments to have trivial kills.
165 const Instruction *I = dyn_cast<Instruction>(V);
169 // No-op casts are trivially coalesced by fast-isel.
170 if (const auto *Cast = dyn_cast<CastInst>(I))
171 if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
174 // Even the value might have only one use in the LLVM IR, it is possible that
175 // FastISel might fold the use into another instruction and now there is more
176 // than one use at the Machine Instruction level.
177 unsigned Reg = lookUpRegForValue(V);
178 if (Reg && !MRI.use_empty(Reg))
181 // GEPs with all zero indices are trivially coalesced by fast-isel.
182 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
183 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
186 // Only instructions with a single use in the same basic block are considered
187 // to have trivial kills.
188 return I->hasOneUse() &&
189 !(I->getOpcode() == Instruction::BitCast ||
190 I->getOpcode() == Instruction::PtrToInt ||
191 I->getOpcode() == Instruction::IntToPtr) &&
192 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
195 unsigned FastISel::getRegForValue(const Value *V) {
196 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
197 // Don't handle non-simple values in FastISel.
198 if (!RealVT.isSimple())
201 // Ignore illegal types. We must do this before looking up the value
202 // in ValueMap because Arguments are given virtual registers regardless
203 // of whether FastISel can handle them.
204 MVT VT = RealVT.getSimpleVT();
205 if (!TLI.isTypeLegal(VT)) {
206 // Handle integer promotions, though, because they're common and easy.
207 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
208 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
213 // Look up the value to see if we already have a register for it.
214 unsigned Reg = lookUpRegForValue(V);
218 // In bottom-up mode, just create the virtual register which will be used
219 // to hold the value. It will be materialized later.
220 if (isa<Instruction>(V) &&
221 (!isa<AllocaInst>(V) ||
222 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
223 return FuncInfo.InitializeRegForValue(V);
225 SavePoint SaveInsertPt = enterLocalValueArea();
227 // Materialize the value in a register. Emit any instructions in the
229 Reg = materializeRegForValue(V, VT);
231 leaveLocalValueArea(SaveInsertPt);
236 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
238 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
239 if (CI->getValue().getActiveBits() <= 64)
240 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
241 } else if (isa<AllocaInst>(V))
242 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
243 else if (isa<ConstantPointerNull>(V))
244 // Translate this as an integer zero so that it can be
245 // local-CSE'd with actual integer zeros.
246 Reg = getRegForValue(
247 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
248 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
249 if (CF->isNullValue())
250 Reg = fastMaterializeFloatZero(CF);
252 // Try to emit the constant directly.
253 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
256 // Try to emit the constant by using an integer constant with a cast.
257 const APFloat &Flt = CF->getValueAPF();
258 EVT IntVT = TLI.getPointerTy(DL);
259 uint32_t IntBitWidth = IntVT.getSizeInBits();
260 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
262 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
264 unsigned IntegerReg =
265 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
267 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
271 } else if (const auto *Op = dyn_cast<Operator>(V)) {
272 if (!selectOperator(Op, Op->getOpcode()))
273 if (!isa<Instruction>(Op) ||
274 !fastSelectInstruction(cast<Instruction>(Op)))
276 Reg = lookUpRegForValue(Op);
277 } else if (isa<UndefValue>(V)) {
278 Reg = createResultReg(TLI.getRegClassFor(VT));
279 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
280 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
285 /// Helper for getRegForValue. This function is called when the value isn't
286 /// already available in a register and must be materialized with new
288 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
290 // Give the target-specific code a try first.
291 if (isa<Constant>(V))
292 Reg = fastMaterializeConstant(cast<Constant>(V));
294 // If target-specific code couldn't or didn't want to handle the value, then
295 // give target-independent code a try.
297 Reg = materializeConstant(V, VT);
299 // Don't cache constant materializations in the general ValueMap.
300 // To do so would require tracking what uses they dominate.
302 LocalValueMap[V] = Reg;
303 LastLocalValue = MRI.getVRegDef(Reg);
308 unsigned FastISel::lookUpRegForValue(const Value *V) {
309 // Look up the value to see if we already have a register for it. We
310 // cache values defined by Instructions across blocks, and other values
311 // only locally. This is because Instructions already have the SSA
312 // def-dominates-use requirement enforced.
313 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
314 if (I != FuncInfo.ValueMap.end())
316 return LocalValueMap[V];
319 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
320 if (!isa<Instruction>(I)) {
321 LocalValueMap[I] = Reg;
325 unsigned &AssignedReg = FuncInfo.ValueMap[I];
326 if (AssignedReg == 0)
327 // Use the new register.
329 else if (Reg != AssignedReg) {
330 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
331 for (unsigned i = 0; i < NumRegs; i++)
332 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
338 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
339 unsigned IdxN = getRegForValue(Idx);
341 // Unhandled operand. Halt "fast" selection and bail.
342 return std::pair<unsigned, bool>(0, false);
344 bool IdxNIsKill = hasTrivialKill(Idx);
346 // If the index is smaller or larger than intptr_t, truncate or extend it.
347 MVT PtrVT = TLI.getPointerTy(DL);
348 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
349 if (IdxVT.bitsLT(PtrVT)) {
350 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
353 } else if (IdxVT.bitsGT(PtrVT)) {
355 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
358 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
361 void FastISel::recomputeInsertPt() {
362 if (getLastLocalValue()) {
363 FuncInfo.InsertPt = getLastLocalValue();
364 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
367 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
369 // Now skip past any EH_LABELs, which must remain at the beginning.
370 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
371 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
375 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
376 MachineBasicBlock::iterator E) {
377 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
378 "Invalid iterator!");
380 MachineInstr *Dead = &*I;
382 Dead->eraseFromParent();
388 FastISel::SavePoint FastISel::enterLocalValueArea() {
389 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
390 DebugLoc OldDL = DbgLoc;
393 SavePoint SP = {OldInsertPt, OldDL};
397 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
398 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
399 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
401 // Restore the previous insert position.
402 FuncInfo.InsertPt = OldInsertPt.InsertPt;
403 DbgLoc = OldInsertPt.DL;
406 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
407 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
408 if (VT == MVT::Other || !VT.isSimple())
409 // Unhandled type. Halt "fast" selection and bail.
412 // We only handle legal types. For example, on x86-32 the instruction
413 // selector contains all of the 64-bit instructions from x86-64,
414 // under the assumption that i64 won't be used if the target doesn't
416 if (!TLI.isTypeLegal(VT)) {
417 // MVT::i1 is special. Allow AND, OR, or XOR because they
418 // don't require additional zeroing, which makes them easy.
419 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
420 ISDOpcode == ISD::XOR))
421 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
426 // Check if the first operand is a constant, and handle it as "ri". At -O0,
427 // we don't have anything that canonicalizes operand order.
428 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
429 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
430 unsigned Op1 = getRegForValue(I->getOperand(1));
433 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
436 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
437 CI->getZExtValue(), VT.getSimpleVT());
441 // We successfully emitted code for the given LLVM Instruction.
442 updateValueMap(I, ResultReg);
446 unsigned Op0 = getRegForValue(I->getOperand(0));
447 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
449 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
451 // Check if the second operand is a constant and handle it appropriately.
452 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
453 uint64_t Imm = CI->getSExtValue();
455 // Transform "sdiv exact X, 8" -> "sra X, 3".
456 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
457 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
459 ISDOpcode = ISD::SRA;
462 // Transform "urem x, pow2" -> "and x, pow2-1".
463 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
464 isPowerOf2_64(Imm)) {
466 ISDOpcode = ISD::AND;
469 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
470 Op0IsKill, Imm, VT.getSimpleVT());
474 // We successfully emitted code for the given LLVM Instruction.
475 updateValueMap(I, ResultReg);
479 unsigned Op1 = getRegForValue(I->getOperand(1));
480 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
482 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
484 // Now we have both operands in registers. Emit the instruction.
485 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
486 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
488 // Target-specific code wasn't able to find a machine opcode for
489 // the given ISD opcode and type. Halt "fast" selection and bail.
492 // We successfully emitted code for the given LLVM Instruction.
493 updateValueMap(I, ResultReg);
497 bool FastISel::selectGetElementPtr(const User *I) {
498 unsigned N = getRegForValue(I->getOperand(0));
499 if (!N) // Unhandled operand. Halt "fast" selection and bail.
501 bool NIsKill = hasTrivialKill(I->getOperand(0));
503 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
504 // into a single N = N + TotalOffset.
505 uint64_t TotalOffs = 0;
506 // FIXME: What's a good SWAG number for MaxOffs?
507 uint64_t MaxOffs = 2048;
508 MVT VT = TLI.getPointerTy(DL);
509 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
511 const Value *Idx = GTI.getOperand();
512 if (StructType *StTy = GTI.getStructTypeOrNull()) {
513 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
516 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
517 if (TotalOffs >= MaxOffs) {
518 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
519 if (!N) // Unhandled operand. Halt "fast" selection and bail.
526 Type *Ty = GTI.getIndexedType();
528 // If this is a constant subscript, handle it quickly.
529 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
533 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
534 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
535 if (TotalOffs >= MaxOffs) {
536 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
537 if (!N) // Unhandled operand. Halt "fast" selection and bail.
545 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
546 if (!N) // Unhandled operand. Halt "fast" selection and bail.
552 // N = N + Idx * ElementSize;
553 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
554 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
555 unsigned IdxN = Pair.first;
556 bool IdxNIsKill = Pair.second;
557 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
560 if (ElementSize != 1) {
561 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
562 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
566 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
567 if (!N) // Unhandled operand. Halt "fast" selection and bail.
572 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
573 if (!N) // Unhandled operand. Halt "fast" selection and bail.
577 // We successfully emitted code for the given LLVM Instruction.
578 updateValueMap(I, N);
582 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
583 const CallInst *CI, unsigned StartIdx) {
584 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
585 Value *Val = CI->getArgOperand(i);
586 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
587 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
588 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
589 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
590 } else if (isa<ConstantPointerNull>(Val)) {
591 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
592 Ops.push_back(MachineOperand::CreateImm(0));
593 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
594 // Values coming from a stack location also require a special encoding,
595 // but that is added later on by the target specific frame index
596 // elimination implementation.
597 auto SI = FuncInfo.StaticAllocaMap.find(AI);
598 if (SI != FuncInfo.StaticAllocaMap.end())
599 Ops.push_back(MachineOperand::CreateFI(SI->second));
603 unsigned Reg = getRegForValue(Val);
606 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
612 bool FastISel::selectStackmap(const CallInst *I) {
613 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
614 // [live variables...])
615 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
616 "Stackmap cannot return a value.");
618 // The stackmap intrinsic only records the live variables (the arguments
619 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
620 // intrinsic, this won't be lowered to a function call. This means we don't
621 // have to worry about calling conventions and target-specific lowering code.
622 // Instead we perform the call lowering right here.
624 // CALLSEQ_START(0, 0...)
625 // STACKMAP(id, nbytes, ...)
628 SmallVector<MachineOperand, 32> Ops;
630 // Add the <id> and <numBytes> constants.
631 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
632 "Expected a constant integer.");
633 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
634 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
636 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
637 "Expected a constant integer.");
638 const auto *NumBytes =
639 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
640 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
642 // Push live variables for the stack map (skipping the first two arguments
643 // <id> and <numBytes>).
644 if (!addStackMapLiveVars(Ops, I, 2))
647 // We are not adding any register mask info here, because the stackmap doesn't
650 // Add scratch registers as implicit def and early clobber.
651 CallingConv::ID CC = I->getCallingConv();
652 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
653 for (unsigned i = 0; ScratchRegs[i]; ++i)
654 Ops.push_back(MachineOperand::CreateReg(
655 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
656 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
658 // Issue CALLSEQ_START
659 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
661 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
662 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
663 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
667 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
668 TII.get(TargetOpcode::STACKMAP));
669 for (auto const &MO : Ops)
673 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
674 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
678 // Inform the Frame Information that we have a stackmap in this function.
679 FuncInfo.MF->getFrameInfo().setHasStackMap();
684 /// \brief Lower an argument list according to the target calling convention.
686 /// This is a helper for lowering intrinsics that follow a target calling
687 /// convention or require stack pointer adjustment. Only a subset of the
688 /// intrinsic's operands need to participate in the calling convention.
689 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
690 unsigned NumArgs, const Value *Callee,
691 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
693 Args.reserve(NumArgs);
695 // Populate the argument list.
696 ImmutableCallSite CS(CI);
697 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
698 Value *V = CI->getOperand(ArgI);
700 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
704 Entry.Ty = V->getType();
705 Entry.setAttributes(&CS, ArgIdx);
706 Args.push_back(Entry);
709 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
711 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
713 return lowerCallTo(CLI);
716 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
717 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
718 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
719 SmallString<32> MangledName;
720 Mangler::getNameWithPrefix(MangledName, Target, DL);
721 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
722 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
725 bool FastISel::selectPatchpoint(const CallInst *I) {
726 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
731 // [live variables...])
732 CallingConv::ID CC = I->getCallingConv();
733 bool IsAnyRegCC = CC == CallingConv::AnyReg;
734 bool HasDef = !I->getType()->isVoidTy();
735 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
737 // Get the real number of arguments participating in the call <numArgs>
738 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
739 "Expected a constant integer.");
740 const auto *NumArgsVal =
741 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
742 unsigned NumArgs = NumArgsVal->getZExtValue();
744 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
745 // This includes all meta-operands up to but not including CC.
746 unsigned NumMetaOpers = PatchPointOpers::CCPos;
747 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
748 "Not enough arguments provided to the patchpoint intrinsic");
750 // For AnyRegCC the arguments are lowered later on manually.
751 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
752 CallLoweringInfo CLI;
753 CLI.setIsPatchPoint();
754 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
757 assert(CLI.Call && "No call instruction specified.");
759 SmallVector<MachineOperand, 32> Ops;
761 // Add an explicit result reg if we use the anyreg calling convention.
762 if (IsAnyRegCC && HasDef) {
763 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
764 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
765 CLI.NumResultRegs = 1;
766 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
769 // Add the <id> and <numBytes> constants.
770 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
771 "Expected a constant integer.");
772 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
773 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
775 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
776 "Expected a constant integer.");
777 const auto *NumBytes =
778 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
779 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
781 // Add the call target.
782 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
783 uint64_t CalleeConstAddr =
784 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
785 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
786 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
787 if (C->getOpcode() == Instruction::IntToPtr) {
788 uint64_t CalleeConstAddr =
789 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
790 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
792 llvm_unreachable("Unsupported ConstantExpr.");
793 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
794 Ops.push_back(MachineOperand::CreateGA(GV, 0));
795 } else if (isa<ConstantPointerNull>(Callee))
796 Ops.push_back(MachineOperand::CreateImm(0));
798 llvm_unreachable("Unsupported callee address.");
800 // Adjust <numArgs> to account for any arguments that have been passed on
801 // the stack instead.
802 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
803 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
805 // Add the calling convention
806 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
808 // Add the arguments we omitted previously. The register allocator should
809 // place these in any free register.
811 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
812 unsigned Reg = getRegForValue(I->getArgOperand(i));
815 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
819 // Push the arguments from the call instruction.
820 for (auto Reg : CLI.OutRegs)
821 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
823 // Push live variables for the stack map.
824 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
827 // Push the register mask info.
828 Ops.push_back(MachineOperand::CreateRegMask(
829 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
831 // Add scratch registers as implicit def and early clobber.
832 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
833 for (unsigned i = 0; ScratchRegs[i]; ++i)
834 Ops.push_back(MachineOperand::CreateReg(
835 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
836 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
838 // Add implicit defs (return values).
839 for (auto Reg : CLI.InRegs)
840 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
843 // Insert the patchpoint instruction before the call generated by the target.
844 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
845 TII.get(TargetOpcode::PATCHPOINT));
850 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
852 // Delete the original call instruction.
853 CLI.Call->eraseFromParent();
855 // Inform the Frame Information that we have a patchpoint in this function.
856 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
858 if (CLI.NumResultRegs)
859 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
863 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
864 const auto &Triple = TM.getTargetTriple();
865 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
866 return true; // don't do anything to this instruction.
867 SmallVector<MachineOperand, 8> Ops;
868 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
870 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
872 MachineInstrBuilder MIB =
873 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
874 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
877 // Insert the Patchable Event Call instruction, that gets lowered properly.
882 /// Returns an AttributeList representing the attributes applied to the return
883 /// value of the given call.
884 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
885 SmallVector<Attribute::AttrKind, 2> Attrs;
887 Attrs.push_back(Attribute::SExt);
889 Attrs.push_back(Attribute::ZExt);
891 Attrs.push_back(Attribute::InReg);
893 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
897 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
899 MCContext &Ctx = MF->getContext();
900 SmallString<32> MangledName;
901 Mangler::getNameWithPrefix(MangledName, SymName, DL);
902 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
903 return lowerCallTo(CI, Sym, NumArgs);
906 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
908 ImmutableCallSite CS(CI);
910 FunctionType *FTy = CS.getFunctionType();
911 Type *RetTy = CS.getType();
914 Args.reserve(NumArgs);
916 // Populate the argument list.
917 // Attributes for args start at offset 1, after the return attribute.
918 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
919 Value *V = CI->getOperand(ArgI);
921 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
925 Entry.Ty = V->getType();
926 Entry.setAttributes(&CS, ArgI);
927 Args.push_back(Entry);
929 TLI.markLibCallAttributes(MF, CS.getCallingConv(), Args);
931 CallLoweringInfo CLI;
932 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
934 return lowerCallTo(CLI);
937 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
938 // Handle the incoming return values from the call.
940 SmallVector<EVT, 4> RetTys;
941 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
943 SmallVector<ISD::OutputArg, 4> Outs;
944 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
946 bool CanLowerReturn = TLI.CanLowerReturn(
947 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
949 // FIXME: sret demotion isn't supported yet - bail out.
953 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
955 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
956 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
957 for (unsigned i = 0; i != NumRegs; ++i) {
958 ISD::InputArg MyFlags;
959 MyFlags.VT = RegisterVT;
961 MyFlags.Used = CLI.IsReturnValueUsed;
963 MyFlags.Flags.setSExt();
965 MyFlags.Flags.setZExt();
967 MyFlags.Flags.setInReg();
968 CLI.Ins.push_back(MyFlags);
972 // Handle all of the outgoing arguments.
974 for (auto &Arg : CLI.getArgs()) {
975 Type *FinalType = Arg.Ty;
977 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
978 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
979 FinalType, CLI.CallConv, CLI.IsVarArg);
981 ISD::ArgFlagsTy Flags;
991 Flags.setSwiftSelf();
992 if (Arg.IsSwiftError)
993 Flags.setSwiftError();
996 if (Arg.IsInAlloca) {
998 // Set the byval flag for CCAssignFn callbacks that don't know about
999 // inalloca. This way we can know how many bytes we should've allocated
1000 // and how many bytes a callee cleanup function will pop. If we port
1001 // inalloca to more targets, we'll have to add custom inalloca handling in
1002 // the various CC lowering callbacks.
1005 if (Arg.IsByVal || Arg.IsInAlloca) {
1006 PointerType *Ty = cast<PointerType>(Arg.Ty);
1007 Type *ElementTy = Ty->getElementType();
1008 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1009 // For ByVal, alignment should come from FE. BE will guess if this info is
1010 // not there, but there are cases it cannot get right.
1011 unsigned FrameAlign = Arg.Alignment;
1013 FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1014 Flags.setByValSize(FrameSize);
1015 Flags.setByValAlign(FrameAlign);
1020 Flags.setInConsecutiveRegs();
1021 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1022 Flags.setOrigAlign(OriginalAlignment);
1024 CLI.OutVals.push_back(Arg.Val);
1025 CLI.OutFlags.push_back(Flags);
1028 if (!fastLowerCall(CLI))
1031 // Set all unused physreg defs as dead.
1032 assert(CLI.Call && "No call instruction specified.");
1033 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1035 if (CLI.NumResultRegs && CLI.CS)
1036 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1041 bool FastISel::lowerCall(const CallInst *CI) {
1042 ImmutableCallSite CS(CI);
1044 FunctionType *FuncTy = CS.getFunctionType();
1045 Type *RetTy = CS.getType();
1049 Args.reserve(CS.arg_size());
1051 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1056 if (V->getType()->isEmptyTy())
1060 Entry.Ty = V->getType();
1062 // Skip the first return-type Attribute to get to params.
1063 Entry.setAttributes(&CS, i - CS.arg_begin());
1064 Args.push_back(Entry);
1067 // Check if target-independent constraints permit a tail call here.
1068 // Target-dependent constraints are checked within fastLowerCall.
1069 bool IsTailCall = CI->isTailCall();
1070 if (IsTailCall && !isInTailCallPosition(CS, TM))
1073 CallLoweringInfo CLI;
1074 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1075 .setTailCall(IsTailCall);
1077 return lowerCallTo(CLI);
1080 bool FastISel::selectCall(const User *I) {
1081 const CallInst *Call = cast<CallInst>(I);
1083 // Handle simple inline asms.
1084 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1085 // If the inline asm has side effects, then make sure that no local value
1086 // lives across by flushing the local value map.
1087 if (IA->hasSideEffects())
1088 flushLocalValueMap();
1090 // Don't attempt to handle constraints.
1091 if (!IA->getConstraintString().empty())
1094 unsigned ExtraInfo = 0;
1095 if (IA->hasSideEffects())
1096 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1097 if (IA->isAlignStack())
1098 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1100 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1101 TII.get(TargetOpcode::INLINEASM))
1102 .addExternalSymbol(IA->getAsmString().c_str())
1107 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1108 computeUsesVAFloatArgument(*Call, MMI);
1110 // Handle intrinsic function calls.
1111 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1112 return selectIntrinsicCall(II);
1114 // Usually, it does not make sense to initialize a value,
1115 // make an unrelated function call and use the value, because
1116 // it tends to be spilled on the stack. So, we move the pointer
1117 // to the last local value to the beginning of the block, so that
1118 // all the values which have already been materialized,
1119 // appear after the call. It also makes sense to skip intrinsics
1120 // since they tend to be inlined.
1121 flushLocalValueMap();
1123 return lowerCall(Call);
1126 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1127 switch (II->getIntrinsicID()) {
1130 // At -O0 we don't care about the lifetime intrinsics.
1131 case Intrinsic::lifetime_start:
1132 case Intrinsic::lifetime_end:
1133 // The donothing intrinsic does, well, nothing.
1134 case Intrinsic::donothing:
1135 // Neither does the sideeffect intrinsic.
1136 case Intrinsic::sideeffect:
1137 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1138 case Intrinsic::assume:
1140 case Intrinsic::dbg_declare: {
1141 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1142 assert(DI->getVariable() && "Missing variable");
1143 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1144 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1148 const Value *Address = DI->getAddress();
1149 if (!Address || isa<UndefValue>(Address)) {
1150 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1154 // Byval arguments with frame indices were already handled after argument
1155 // lowering and before isel.
1157 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1158 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1161 Optional<MachineOperand> Op;
1162 if (unsigned Reg = lookUpRegForValue(Address))
1163 Op = MachineOperand::CreateReg(Reg, false);
1165 // If we have a VLA that has a "use" in a metadata node that's then used
1166 // here but it has no other uses, then we have a problem. E.g.,
1168 // int foo (const int *x) {
1173 // If we assign 'a' a vreg and fast isel later on has to use the selection
1174 // DAG isel, it will want to copy the value to the vreg. However, there are
1175 // no uses, which goes counter to what selection DAG isel expects.
1176 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1177 (!isa<AllocaInst>(Address) ||
1178 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1179 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1183 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1184 "Expected inlined-at fields to agree");
1186 Op->setIsDebug(true);
1187 // A dbg.declare describes the address of a source variable, so lower it
1188 // into an indirect DBG_VALUE.
1189 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1190 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1191 Op->getReg(), DI->getVariable(), DI->getExpression());
1193 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1194 TII.get(TargetOpcode::DBG_VALUE))
1197 .addMetadata(DI->getVariable())
1198 .addMetadata(DI->getExpression());
1200 // We can't yet handle anything else here because it would require
1201 // generating code, thus altering codegen because of debug info.
1202 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1206 case Intrinsic::dbg_value: {
1207 // This form of DBG_VALUE is target-independent.
1208 const DbgValueInst *DI = cast<DbgValueInst>(II);
1209 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1210 const Value *V = DI->getValue();
1211 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1212 "Expected inlined-at fields to agree");
1214 // Currently the optimizer can produce this; insert an undef to
1215 // help debugging. Probably the optimizer should not do this.
1216 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1217 DI->getVariable(), DI->getExpression());
1218 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1219 if (CI->getBitWidth() > 64)
1220 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1223 .addMetadata(DI->getVariable())
1224 .addMetadata(DI->getExpression());
1226 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1227 .addImm(CI->getZExtValue())
1229 .addMetadata(DI->getVariable())
1230 .addMetadata(DI->getExpression());
1231 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1232 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1235 .addMetadata(DI->getVariable())
1236 .addMetadata(DI->getExpression());
1237 } else if (unsigned Reg = lookUpRegForValue(V)) {
1238 // FIXME: This does not handle register-indirect values at offset 0.
1239 bool IsIndirect = false;
1240 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1241 DI->getVariable(), DI->getExpression());
1243 // We can't yet handle anything else here because it would require
1244 // generating code, thus altering codegen because of debug info.
1245 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1249 case Intrinsic::objectsize: {
1250 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1251 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1252 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1253 unsigned ResultReg = getRegForValue(ResCI);
1256 updateValueMap(II, ResultReg);
1259 case Intrinsic::invariant_group_barrier:
1260 case Intrinsic::expect: {
1261 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1264 updateValueMap(II, ResultReg);
1267 case Intrinsic::experimental_stackmap:
1268 return selectStackmap(II);
1269 case Intrinsic::experimental_patchpoint_void:
1270 case Intrinsic::experimental_patchpoint_i64:
1271 return selectPatchpoint(II);
1273 case Intrinsic::xray_customevent:
1274 return selectXRayCustomEvent(II);
1277 return fastLowerIntrinsicCall(II);
1280 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1281 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1282 EVT DstVT = TLI.getValueType(DL, I->getType());
1284 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1286 // Unhandled type. Halt "fast" selection and bail.
1289 // Check if the destination type is legal.
1290 if (!TLI.isTypeLegal(DstVT))
1293 // Check if the source operand is legal.
1294 if (!TLI.isTypeLegal(SrcVT))
1297 unsigned InputReg = getRegForValue(I->getOperand(0));
1299 // Unhandled operand. Halt "fast" selection and bail.
1302 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1304 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1305 Opcode, InputReg, InputRegIsKill);
1309 updateValueMap(I, ResultReg);
1313 bool FastISel::selectBitCast(const User *I) {
1314 // If the bitcast doesn't change the type, just use the operand value.
1315 if (I->getType() == I->getOperand(0)->getType()) {
1316 unsigned Reg = getRegForValue(I->getOperand(0));
1319 updateValueMap(I, Reg);
1323 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1324 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1325 EVT DstEVT = TLI.getValueType(DL, I->getType());
1326 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1327 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1328 // Unhandled type. Halt "fast" selection and bail.
1331 MVT SrcVT = SrcEVT.getSimpleVT();
1332 MVT DstVT = DstEVT.getSimpleVT();
1333 unsigned Op0 = getRegForValue(I->getOperand(0));
1334 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1336 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1338 // First, try to perform the bitcast by inserting a reg-reg copy.
1339 unsigned ResultReg = 0;
1340 if (SrcVT == DstVT) {
1341 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1342 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1343 // Don't attempt a cross-class copy. It will likely fail.
1344 if (SrcClass == DstClass) {
1345 ResultReg = createResultReg(DstClass);
1346 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1347 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1351 // If the reg-reg copy failed, select a BITCAST opcode.
1353 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1358 updateValueMap(I, ResultReg);
1362 // Remove local value instructions starting from the instruction after
1363 // SavedLastLocalValue to the current function insert point.
1364 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1366 MachineInstr *CurLastLocalValue = getLastLocalValue();
1367 if (CurLastLocalValue != SavedLastLocalValue) {
1368 // Find the first local value instruction to be deleted.
1369 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1370 // Otherwise it's the first instruction in the block.
1371 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1372 if (SavedLastLocalValue)
1375 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1376 setLastLocalValue(SavedLastLocalValue);
1377 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1381 bool FastISel::selectInstruction(const Instruction *I) {
1382 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1383 // Just before the terminator instruction, insert instructions to
1384 // feed PHI nodes in successor blocks.
1385 if (isa<TerminatorInst>(I)) {
1386 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1387 // PHI node handling may have generated local value instructions,
1388 // even though it failed to handle all PHI nodes.
1389 // We remove these instructions because SelectionDAGISel will generate
1391 removeDeadLocalValueCode(SavedLastLocalValue);
1396 // FastISel does not handle any operand bundles except OB_funclet.
1397 if (ImmutableCallSite CS = ImmutableCallSite(I))
1398 for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1399 if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1402 DbgLoc = I->getDebugLoc();
1404 SavedInsertPt = FuncInfo.InsertPt;
1406 if (const auto *Call = dyn_cast<CallInst>(I)) {
1407 const Function *F = Call->getCalledFunction();
1410 // As a special case, don't handle calls to builtin library functions that
1411 // may be translated directly to target instructions.
1412 if (F && !F->hasLocalLinkage() && F->hasName() &&
1413 LibInfo->getLibFunc(F->getName(), Func) &&
1414 LibInfo->hasOptimizedCodeGen(Func))
1417 // Don't handle Intrinsic::trap if a trap function is specified.
1418 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1419 Call->hasFnAttr("trap-func-name"))
1423 // First, try doing target-independent selection.
1424 if (!SkipTargetIndependentISel) {
1425 if (selectOperator(I, I->getOpcode())) {
1426 ++NumFastIselSuccessIndependent;
1427 DbgLoc = DebugLoc();
1430 // Remove dead code.
1431 recomputeInsertPt();
1432 if (SavedInsertPt != FuncInfo.InsertPt)
1433 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1434 SavedInsertPt = FuncInfo.InsertPt;
1436 // Next, try calling the target to attempt to handle the instruction.
1437 if (fastSelectInstruction(I)) {
1438 ++NumFastIselSuccessTarget;
1439 DbgLoc = DebugLoc();
1442 // Remove dead code.
1443 recomputeInsertPt();
1444 if (SavedInsertPt != FuncInfo.InsertPt)
1445 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1447 DbgLoc = DebugLoc();
1448 // Undo phi node updates, because they will be added again by SelectionDAG.
1449 if (isa<TerminatorInst>(I)) {
1450 // PHI node handling may have generated local value instructions.
1451 // We remove them because SelectionDAGISel will generate them again.
1452 removeDeadLocalValueCode(SavedLastLocalValue);
1453 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1458 /// Emit an unconditional branch to the given block, unless it is the immediate
1459 /// (fall-through) successor, and update the CFG.
1460 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1461 const DebugLoc &DbgLoc) {
1462 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1463 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1464 // For more accurate line information if this is the only instruction
1465 // in the block then emit it, otherwise we have the unconditional
1466 // fall-through case, which needs no instructions.
1468 // The unconditional branch case.
1469 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1470 SmallVector<MachineOperand, 0>(), DbgLoc);
1473 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1474 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1475 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1477 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1480 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1481 MachineBasicBlock *TrueMBB,
1482 MachineBasicBlock *FalseMBB) {
1483 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1484 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1485 // successor/predecessor lists.
1486 if (TrueMBB != FalseMBB) {
1488 auto BranchProbability =
1489 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1490 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1492 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1495 fastEmitBranch(FalseMBB, DbgLoc);
1498 /// Emit an FNeg operation.
1499 bool FastISel::selectFNeg(const User *I) {
1500 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1503 bool OpRegIsKill = hasTrivialKill(I);
1505 // If the target has ISD::FNEG, use it.
1506 EVT VT = TLI.getValueType(DL, I->getType());
1507 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1508 OpReg, OpRegIsKill);
1510 updateValueMap(I, ResultReg);
1514 // Bitcast the value to integer, twiddle the sign bit with xor,
1515 // and then bitcast it back to floating-point.
1516 if (VT.getSizeInBits() > 64)
1518 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1519 if (!TLI.isTypeLegal(IntVT))
1522 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1523 ISD::BITCAST, OpReg, OpRegIsKill);
1527 unsigned IntResultReg = fastEmit_ri_(
1528 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1529 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1533 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1534 IntResultReg, /*IsKill=*/true);
1538 updateValueMap(I, ResultReg);
1542 bool FastISel::selectExtractValue(const User *U) {
1543 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1547 // Make sure we only try to handle extracts with a legal result. But also
1548 // allow i1 because it's easy.
1549 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1550 if (!RealVT.isSimple())
1552 MVT VT = RealVT.getSimpleVT();
1553 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1556 const Value *Op0 = EVI->getOperand(0);
1557 Type *AggTy = Op0->getType();
1559 // Get the base result register.
1561 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1562 if (I != FuncInfo.ValueMap.end())
1563 ResultReg = I->second;
1564 else if (isa<Instruction>(Op0))
1565 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1567 return false; // fast-isel can't handle aggregate constants at the moment
1569 // Get the actual result register, which is an offset from the base register.
1570 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1572 SmallVector<EVT, 4> AggValueVTs;
1573 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1575 for (unsigned i = 0; i < VTIndex; i++)
1576 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1578 updateValueMap(EVI, ResultReg);
1582 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1584 case Instruction::Add:
1585 return selectBinaryOp(I, ISD::ADD);
1586 case Instruction::FAdd:
1587 return selectBinaryOp(I, ISD::FADD);
1588 case Instruction::Sub:
1589 return selectBinaryOp(I, ISD::SUB);
1590 case Instruction::FSub:
1591 // FNeg is currently represented in LLVM IR as a special case of FSub.
1592 if (BinaryOperator::isFNeg(I))
1593 return selectFNeg(I);
1594 return selectBinaryOp(I, ISD::FSUB);
1595 case Instruction::Mul:
1596 return selectBinaryOp(I, ISD::MUL);
1597 case Instruction::FMul:
1598 return selectBinaryOp(I, ISD::FMUL);
1599 case Instruction::SDiv:
1600 return selectBinaryOp(I, ISD::SDIV);
1601 case Instruction::UDiv:
1602 return selectBinaryOp(I, ISD::UDIV);
1603 case Instruction::FDiv:
1604 return selectBinaryOp(I, ISD::FDIV);
1605 case Instruction::SRem:
1606 return selectBinaryOp(I, ISD::SREM);
1607 case Instruction::URem:
1608 return selectBinaryOp(I, ISD::UREM);
1609 case Instruction::FRem:
1610 return selectBinaryOp(I, ISD::FREM);
1611 case Instruction::Shl:
1612 return selectBinaryOp(I, ISD::SHL);
1613 case Instruction::LShr:
1614 return selectBinaryOp(I, ISD::SRL);
1615 case Instruction::AShr:
1616 return selectBinaryOp(I, ISD::SRA);
1617 case Instruction::And:
1618 return selectBinaryOp(I, ISD::AND);
1619 case Instruction::Or:
1620 return selectBinaryOp(I, ISD::OR);
1621 case Instruction::Xor:
1622 return selectBinaryOp(I, ISD::XOR);
1624 case Instruction::GetElementPtr:
1625 return selectGetElementPtr(I);
1627 case Instruction::Br: {
1628 const BranchInst *BI = cast<BranchInst>(I);
1630 if (BI->isUnconditional()) {
1631 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1632 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1633 fastEmitBranch(MSucc, BI->getDebugLoc());
1637 // Conditional branches are not handed yet.
1638 // Halt "fast" selection and bail.
1642 case Instruction::Unreachable:
1643 if (TM.Options.TrapUnreachable)
1644 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1648 case Instruction::Alloca:
1649 // FunctionLowering has the static-sized case covered.
1650 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1653 // Dynamic-sized alloca is not handled yet.
1656 case Instruction::Call:
1657 return selectCall(I);
1659 case Instruction::BitCast:
1660 return selectBitCast(I);
1662 case Instruction::FPToSI:
1663 return selectCast(I, ISD::FP_TO_SINT);
1664 case Instruction::ZExt:
1665 return selectCast(I, ISD::ZERO_EXTEND);
1666 case Instruction::SExt:
1667 return selectCast(I, ISD::SIGN_EXTEND);
1668 case Instruction::Trunc:
1669 return selectCast(I, ISD::TRUNCATE);
1670 case Instruction::SIToFP:
1671 return selectCast(I, ISD::SINT_TO_FP);
1673 case Instruction::IntToPtr: // Deliberate fall-through.
1674 case Instruction::PtrToInt: {
1675 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1676 EVT DstVT = TLI.getValueType(DL, I->getType());
1677 if (DstVT.bitsGT(SrcVT))
1678 return selectCast(I, ISD::ZERO_EXTEND);
1679 if (DstVT.bitsLT(SrcVT))
1680 return selectCast(I, ISD::TRUNCATE);
1681 unsigned Reg = getRegForValue(I->getOperand(0));
1684 updateValueMap(I, Reg);
1688 case Instruction::ExtractValue:
1689 return selectExtractValue(I);
1691 case Instruction::PHI:
1692 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1695 // Unhandled instruction. Halt "fast" selection and bail.
1700 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1701 const TargetLibraryInfo *LibInfo,
1702 bool SkipTargetIndependentISel)
1703 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1704 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1705 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1706 TII(*MF->getSubtarget().getInstrInfo()),
1707 TLI(*MF->getSubtarget().getTargetLowering()),
1708 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1709 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1711 FastISel::~FastISel() = default;
1713 bool FastISel::fastLowerArguments() { return false; }
1715 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1717 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1721 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1723 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1724 bool /*Op0IsKill*/) {
1728 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1729 bool /*Op0IsKill*/, unsigned /*Op1*/,
1730 bool /*Op1IsKill*/) {
1734 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1738 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1739 const ConstantFP * /*FPImm*/) {
1743 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1744 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1748 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1749 /// instruction with an immediate operand using fastEmit_ri.
1750 /// If that fails, it materializes the immediate into a register and try
1751 /// fastEmit_rr instead.
1752 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1753 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1754 // If this is a multiply by a power of two, emit this as a shift left.
1755 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1758 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1759 // div x, 8 -> srl x, 3
1764 // Horrible hack (to be removed), check to make sure shift amounts are
1766 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1767 Imm >= VT.getSizeInBits())
1770 // First check if immediate type is legal. If not, we can't use the ri form.
1771 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1774 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1775 bool IsImmKill = true;
1777 // This is a bit ugly/slow, but failing here means falling out of
1778 // fast-isel, which would be very slow.
1780 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1781 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1784 // FIXME: If the materialized register here has no uses yet then this
1785 // will be the first use and we should be able to mark it as killed.
1786 // However, the local value area for materialising constant expressions
1787 // grows down, not up, which means that any constant expressions we generate
1788 // later which also use 'Imm' could be after this instruction and therefore
1792 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1795 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1796 return MRI.createVirtualRegister(RC);
1799 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1801 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1802 const TargetRegisterClass *RegClass =
1803 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1804 if (!MRI.constrainRegClass(Op, RegClass)) {
1805 // If it's not legal to COPY between the register classes, something
1806 // has gone very wrong before we got here.
1807 unsigned NewOp = createResultReg(RegClass);
1808 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1809 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1816 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1817 const TargetRegisterClass *RC) {
1818 unsigned ResultReg = createResultReg(RC);
1819 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1821 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1825 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1826 const TargetRegisterClass *RC, unsigned Op0,
1828 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1830 unsigned ResultReg = createResultReg(RC);
1831 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1833 if (II.getNumDefs() >= 1)
1834 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1835 .addReg(Op0, getKillRegState(Op0IsKill));
1837 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1838 .addReg(Op0, getKillRegState(Op0IsKill));
1839 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1840 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1846 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1847 const TargetRegisterClass *RC, unsigned Op0,
1848 bool Op0IsKill, unsigned Op1,
1850 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1852 unsigned ResultReg = createResultReg(RC);
1853 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1854 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1856 if (II.getNumDefs() >= 1)
1857 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1858 .addReg(Op0, getKillRegState(Op0IsKill))
1859 .addReg(Op1, getKillRegState(Op1IsKill));
1861 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1862 .addReg(Op0, getKillRegState(Op0IsKill))
1863 .addReg(Op1, getKillRegState(Op1IsKill));
1864 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1865 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1870 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1871 const TargetRegisterClass *RC, unsigned Op0,
1872 bool Op0IsKill, unsigned Op1,
1873 bool Op1IsKill, unsigned Op2,
1875 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1877 unsigned ResultReg = createResultReg(RC);
1878 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1879 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1880 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1882 if (II.getNumDefs() >= 1)
1883 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1884 .addReg(Op0, getKillRegState(Op0IsKill))
1885 .addReg(Op1, getKillRegState(Op1IsKill))
1886 .addReg(Op2, getKillRegState(Op2IsKill));
1888 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1889 .addReg(Op0, getKillRegState(Op0IsKill))
1890 .addReg(Op1, getKillRegState(Op1IsKill))
1891 .addReg(Op2, getKillRegState(Op2IsKill));
1892 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1893 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1898 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1899 const TargetRegisterClass *RC, unsigned Op0,
1900 bool Op0IsKill, uint64_t Imm) {
1901 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1903 unsigned ResultReg = createResultReg(RC);
1904 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1906 if (II.getNumDefs() >= 1)
1907 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1908 .addReg(Op0, getKillRegState(Op0IsKill))
1911 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1912 .addReg(Op0, getKillRegState(Op0IsKill))
1914 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1915 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1920 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1921 const TargetRegisterClass *RC, unsigned Op0,
1922 bool Op0IsKill, uint64_t Imm1,
1924 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1926 unsigned ResultReg = createResultReg(RC);
1927 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1929 if (II.getNumDefs() >= 1)
1930 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1931 .addReg(Op0, getKillRegState(Op0IsKill))
1935 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1936 .addReg(Op0, getKillRegState(Op0IsKill))
1939 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1940 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1945 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1946 const TargetRegisterClass *RC,
1947 const ConstantFP *FPImm) {
1948 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1950 unsigned ResultReg = createResultReg(RC);
1952 if (II.getNumDefs() >= 1)
1953 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1959 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1964 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1965 const TargetRegisterClass *RC, unsigned Op0,
1966 bool Op0IsKill, unsigned Op1,
1967 bool Op1IsKill, uint64_t Imm) {
1968 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1970 unsigned ResultReg = createResultReg(RC);
1971 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1972 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1974 if (II.getNumDefs() >= 1)
1975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1976 .addReg(Op0, getKillRegState(Op0IsKill))
1977 .addReg(Op1, getKillRegState(Op1IsKill))
1980 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1981 .addReg(Op0, getKillRegState(Op0IsKill))
1982 .addReg(Op1, getKillRegState(Op1IsKill))
1984 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1985 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1990 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1991 const TargetRegisterClass *RC, uint64_t Imm) {
1992 unsigned ResultReg = createResultReg(RC);
1993 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1995 if (II.getNumDefs() >= 1)
1996 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1999 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2000 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2001 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2006 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2007 bool Op0IsKill, uint32_t Idx) {
2008 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2009 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
2010 "Cannot yet extract from physregs");
2011 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2012 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2013 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2014 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2018 /// Emit MachineInstrs to compute the value of Op with all but the least
2019 /// significant bit set to zero.
2020 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2021 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2024 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2025 /// Emit code to ensure constants are copied into registers when needed.
2026 /// Remember the virtual registers that need to be added to the Machine PHI
2027 /// nodes as input. We cannot just directly add them, because expansion
2028 /// might result in multiple MBB's for one BB. As such, the start of the
2029 /// BB might correspond to a different MBB than the end.
2030 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2031 const TerminatorInst *TI = LLVMBB->getTerminator();
2033 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2034 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2036 // Check successor nodes' PHI nodes that expect a constant to be available
2038 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2039 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2040 if (!isa<PHINode>(SuccBB->begin()))
2042 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2044 // If this terminator has multiple identical successors (common for
2045 // switches), only handle each succ once.
2046 if (!SuccsHandled.insert(SuccMBB).second)
2049 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2051 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2052 // nodes and Machine PHI nodes, but the incoming operands have not been
2054 for (BasicBlock::const_iterator I = SuccBB->begin();
2055 const auto *PN = dyn_cast<PHINode>(I); ++I) {
2057 // Ignore dead phi's.
2058 if (PN->use_empty())
2061 // Only handle legal types. Two interesting things to note here. First,
2062 // by bailing out early, we may leave behind some dead instructions,
2063 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2064 // own moves. Second, this check is necessary because FastISel doesn't
2065 // use CreateRegs to create registers, so it always creates
2066 // exactly one register for each non-void instruction.
2067 EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2068 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2069 // Handle integer promotions, though, because they're common and easy.
2070 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2071 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2076 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2078 // Set the DebugLoc for the copy. Prefer the location of the operand
2079 // if there is one; use the location of the PHI otherwise.
2080 DbgLoc = PN->getDebugLoc();
2081 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2082 DbgLoc = Inst->getDebugLoc();
2084 unsigned Reg = getRegForValue(PHIOp);
2086 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2089 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2090 DbgLoc = DebugLoc();
2097 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2098 assert(LI->hasOneUse() &&
2099 "tryToFoldLoad expected a LoadInst with a single use");
2100 // We know that the load has a single use, but don't know what it is. If it
2101 // isn't one of the folded instructions, then we can't succeed here. Handle
2102 // this by scanning the single-use users of the load until we get to FoldInst.
2103 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2105 const Instruction *TheUser = LI->user_back();
2106 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2107 // Stay in the right block.
2108 TheUser->getParent() == FoldInst->getParent() &&
2109 --MaxUsers) { // Don't scan too far.
2110 // If there are multiple or no uses of this instruction, then bail out.
2111 if (!TheUser->hasOneUse())
2114 TheUser = TheUser->user_back();
2117 // If we didn't find the fold instruction, then we failed to collapse the
2119 if (TheUser != FoldInst)
2122 // Don't try to fold volatile loads. Target has to deal with alignment
2124 if (LI->isVolatile())
2127 // Figure out which vreg this is going into. If there is no assigned vreg yet
2128 // then there actually was no reference to it. Perhaps the load is referenced
2129 // by a dead instruction.
2130 unsigned LoadReg = getRegForValue(LI);
2134 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2135 // may mean that the instruction got lowered to multiple MIs, or the use of
2136 // the loaded value ended up being multiple operands of the result.
2137 if (!MRI.hasOneUse(LoadReg))
2140 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2141 MachineInstr *User = RI->getParent();
2143 // Set the insertion point properly. Folding the load can cause generation of
2144 // other random instructions (like sign extends) for addressing modes; make
2145 // sure they get inserted in a logical place before the new instruction.
2146 FuncInfo.InsertPt = User;
2147 FuncInfo.MBB = User->getParent();
2149 // Ask the target to try folding the load.
2150 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2153 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2155 if (!isa<AddOperator>(Add))
2157 // Type size needs to match.
2158 if (DL.getTypeSizeInBits(GEP->getType()) !=
2159 DL.getTypeSizeInBits(Add->getType()))
2161 // Must be in the same basic block.
2162 if (isa<Instruction>(Add) &&
2163 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2165 // Must have a constant operand.
2166 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2170 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2174 MachineMemOperand::Flags Flags;
2177 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2178 Alignment = LI->getAlignment();
2179 IsVolatile = LI->isVolatile();
2180 Flags = MachineMemOperand::MOLoad;
2181 Ptr = LI->getPointerOperand();
2182 ValTy = LI->getType();
2183 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2184 Alignment = SI->getAlignment();
2185 IsVolatile = SI->isVolatile();
2186 Flags = MachineMemOperand::MOStore;
2187 Ptr = SI->getPointerOperand();
2188 ValTy = SI->getValueOperand()->getType();
2192 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2193 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2194 bool IsDereferenceable =
2195 I->getMetadata(LLVMContext::MD_dereferenceable) != nullptr;
2196 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2199 I->getAAMetadata(AAInfo);
2201 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2202 Alignment = DL.getABITypeAlignment(ValTy);
2204 unsigned Size = DL.getTypeStoreSize(ValTy);
2207 Flags |= MachineMemOperand::MOVolatile;
2209 Flags |= MachineMemOperand::MONonTemporal;
2210 if (IsDereferenceable)
2211 Flags |= MachineMemOperand::MODereferenceable;
2213 Flags |= MachineMemOperand::MOInvariant;
2215 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2216 Alignment, AAInfo, Ranges);
2219 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2220 // If both operands are the same, then try to optimize or fold the cmp.
2221 CmpInst::Predicate Predicate = CI->getPredicate();
2222 if (CI->getOperand(0) != CI->getOperand(1))
2225 switch (Predicate) {
2226 default: llvm_unreachable("Invalid predicate!");
2227 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2228 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2229 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2230 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2231 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2232 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2233 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2234 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2235 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2236 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2237 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2238 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2239 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2240 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2241 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2242 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2244 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2245 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2246 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2247 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2248 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2249 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2250 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2251 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2252 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2253 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;