1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
51 #include "llvm/Analysis/BranchProbabilityInfo.h"
52 #include "llvm/Analysis/TargetLibraryInfo.h"
53 #include "llvm/CodeGen/Analysis.h"
54 #include "llvm/CodeGen/FunctionLoweringInfo.h"
55 #include "llvm/CodeGen/ISDOpcodes.h"
56 #include "llvm/CodeGen/MachineBasicBlock.h"
57 #include "llvm/CodeGen/MachineFrameInfo.h"
58 #include "llvm/CodeGen/MachineInstr.h"
59 #include "llvm/CodeGen/MachineInstrBuilder.h"
60 #include "llvm/CodeGen/MachineMemOperand.h"
61 #include "llvm/CodeGen/MachineModuleInfo.h"
62 #include "llvm/CodeGen/MachineOperand.h"
63 #include "llvm/CodeGen/MachineRegisterInfo.h"
64 #include "llvm/CodeGen/MachineValueType.h"
65 #include "llvm/CodeGen/StackMaps.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/IR/Argument.h"
68 #include "llvm/IR/Attributes.h"
69 #include "llvm/IR/BasicBlock.h"
70 #include "llvm/IR/CallSite.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GetElementPtrTypeIterator.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/Type.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/MC/MCContext.h"
94 #include "llvm/MC/MCInstrDesc.h"
95 #include "llvm/MC/MCRegisterInfo.h"
96 #include "llvm/Support/Casting.h"
97 #include "llvm/Support/Debug.h"
98 #include "llvm/Support/ErrorHandling.h"
99 #include "llvm/Support/MathExtras.h"
100 #include "llvm/Support/raw_ostream.h"
101 #include "llvm/Target/TargetInstrInfo.h"
102 #include "llvm/Target/TargetLowering.h"
103 #include "llvm/Target/TargetMachine.h"
104 #include "llvm/Target/TargetOptions.h"
105 #include "llvm/Target/TargetSubtargetInfo.h"
112 using namespace llvm;
114 #define DEBUG_TYPE "isel"
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117 "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119 "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
122 /// Set the current block to which generated machine instructions will be
123 /// appended, and clear the local CSE map.
124 void FastISel::startNewBlock() {
125 LocalValueMap.clear();
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
132 EmitStartPt = &FuncInfo.MBB->back();
133 LastLocalValue = EmitStartPt;
136 bool FastISel::lowerArguments() {
137 if (!FuncInfo.CanLowerReturn)
138 // Fallback to SDISel argument lowering code to deal with sret pointer
142 if (!fastLowerArguments())
145 // Enter arguments into ValueMap for uses in non-entry BBs.
146 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
147 E = FuncInfo.Fn->arg_end();
149 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
150 assert(VI != LocalValueMap.end() && "Missed an argument?");
151 FuncInfo.ValueMap[&*I] = VI->second;
156 void FastISel::flushLocalValueMap() {
157 LocalValueMap.clear();
158 LastLocalValue = EmitStartPt;
160 SavedInsertPt = FuncInfo.InsertPt;
163 bool FastISel::hasTrivialKill(const Value *V) {
164 // Don't consider constants or arguments to have trivial kills.
165 const Instruction *I = dyn_cast<Instruction>(V);
169 // No-op casts are trivially coalesced by fast-isel.
170 if (const auto *Cast = dyn_cast<CastInst>(I))
171 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
172 !hasTrivialKill(Cast->getOperand(0)))
175 // Even the value might have only one use in the LLVM IR, it is possible that
176 // FastISel might fold the use into another instruction and now there is more
177 // than one use at the Machine Instruction level.
178 unsigned Reg = lookUpRegForValue(V);
179 if (Reg && !MRI.use_empty(Reg))
182 // GEPs with all zero indices are trivially coalesced by fast-isel.
183 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
184 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
187 // Only instructions with a single use in the same basic block are considered
188 // to have trivial kills.
189 return I->hasOneUse() &&
190 !(I->getOpcode() == Instruction::BitCast ||
191 I->getOpcode() == Instruction::PtrToInt ||
192 I->getOpcode() == Instruction::IntToPtr) &&
193 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
196 unsigned FastISel::getRegForValue(const Value *V) {
197 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
198 // Don't handle non-simple values in FastISel.
199 if (!RealVT.isSimple())
202 // Ignore illegal types. We must do this before looking up the value
203 // in ValueMap because Arguments are given virtual registers regardless
204 // of whether FastISel can handle them.
205 MVT VT = RealVT.getSimpleVT();
206 if (!TLI.isTypeLegal(VT)) {
207 // Handle integer promotions, though, because they're common and easy.
208 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
209 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
214 // Look up the value to see if we already have a register for it.
215 unsigned Reg = lookUpRegForValue(V);
219 // In bottom-up mode, just create the virtual register which will be used
220 // to hold the value. It will be materialized later.
221 if (isa<Instruction>(V) &&
222 (!isa<AllocaInst>(V) ||
223 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
224 return FuncInfo.InitializeRegForValue(V);
226 SavePoint SaveInsertPt = enterLocalValueArea();
228 // Materialize the value in a register. Emit any instructions in the
230 Reg = materializeRegForValue(V, VT);
232 leaveLocalValueArea(SaveInsertPt);
237 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
239 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
240 if (CI->getValue().getActiveBits() <= 64)
241 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
242 } else if (isa<AllocaInst>(V))
243 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
244 else if (isa<ConstantPointerNull>(V))
245 // Translate this as an integer zero so that it can be
246 // local-CSE'd with actual integer zeros.
247 Reg = getRegForValue(
248 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
249 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
250 if (CF->isNullValue())
251 Reg = fastMaterializeFloatZero(CF);
253 // Try to emit the constant directly.
254 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
257 // Try to emit the constant by using an integer constant with a cast.
258 const APFloat &Flt = CF->getValueAPF();
259 EVT IntVT = TLI.getPointerTy(DL);
260 uint32_t IntBitWidth = IntVT.getSizeInBits();
261 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
263 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
265 unsigned IntegerReg =
266 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
268 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
272 } else if (const auto *Op = dyn_cast<Operator>(V)) {
273 if (!selectOperator(Op, Op->getOpcode()))
274 if (!isa<Instruction>(Op) ||
275 !fastSelectInstruction(cast<Instruction>(Op)))
277 Reg = lookUpRegForValue(Op);
278 } else if (isa<UndefValue>(V)) {
279 Reg = createResultReg(TLI.getRegClassFor(VT));
280 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
281 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
286 /// Helper for getRegForValue. This function is called when the value isn't
287 /// already available in a register and must be materialized with new
289 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
291 // Give the target-specific code a try first.
292 if (isa<Constant>(V))
293 Reg = fastMaterializeConstant(cast<Constant>(V));
295 // If target-specific code couldn't or didn't want to handle the value, then
296 // give target-independent code a try.
298 Reg = materializeConstant(V, VT);
300 // Don't cache constant materializations in the general ValueMap.
301 // To do so would require tracking what uses they dominate.
303 LocalValueMap[V] = Reg;
304 LastLocalValue = MRI.getVRegDef(Reg);
309 unsigned FastISel::lookUpRegForValue(const Value *V) {
310 // Look up the value to see if we already have a register for it. We
311 // cache values defined by Instructions across blocks, and other values
312 // only locally. This is because Instructions already have the SSA
313 // def-dominates-use requirement enforced.
314 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
315 if (I != FuncInfo.ValueMap.end())
317 return LocalValueMap[V];
320 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
321 if (!isa<Instruction>(I)) {
322 LocalValueMap[I] = Reg;
326 unsigned &AssignedReg = FuncInfo.ValueMap[I];
327 if (AssignedReg == 0)
328 // Use the new register.
330 else if (Reg != AssignedReg) {
331 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
332 for (unsigned i = 0; i < NumRegs; i++)
333 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
339 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
340 unsigned IdxN = getRegForValue(Idx);
342 // Unhandled operand. Halt "fast" selection and bail.
343 return std::pair<unsigned, bool>(0, false);
345 bool IdxNIsKill = hasTrivialKill(Idx);
347 // If the index is smaller or larger than intptr_t, truncate or extend it.
348 MVT PtrVT = TLI.getPointerTy(DL);
349 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
350 if (IdxVT.bitsLT(PtrVT)) {
351 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
354 } else if (IdxVT.bitsGT(PtrVT)) {
356 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
359 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
362 void FastISel::recomputeInsertPt() {
363 if (getLastLocalValue()) {
364 FuncInfo.InsertPt = getLastLocalValue();
365 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
368 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
370 // Now skip past any EH_LABELs, which must remain at the beginning.
371 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
372 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
376 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
377 MachineBasicBlock::iterator E) {
378 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
379 "Invalid iterator!");
381 MachineInstr *Dead = &*I;
383 Dead->eraseFromParent();
389 FastISel::SavePoint FastISel::enterLocalValueArea() {
390 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
391 DebugLoc OldDL = DbgLoc;
394 SavePoint SP = {OldInsertPt, OldDL};
398 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
399 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
400 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
402 // Restore the previous insert position.
403 FuncInfo.InsertPt = OldInsertPt.InsertPt;
404 DbgLoc = OldInsertPt.DL;
407 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
408 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
409 if (VT == MVT::Other || !VT.isSimple())
410 // Unhandled type. Halt "fast" selection and bail.
413 // We only handle legal types. For example, on x86-32 the instruction
414 // selector contains all of the 64-bit instructions from x86-64,
415 // under the assumption that i64 won't be used if the target doesn't
417 if (!TLI.isTypeLegal(VT)) {
418 // MVT::i1 is special. Allow AND, OR, or XOR because they
419 // don't require additional zeroing, which makes them easy.
420 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
421 ISDOpcode == ISD::XOR))
422 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
427 // Check if the first operand is a constant, and handle it as "ri". At -O0,
428 // we don't have anything that canonicalizes operand order.
429 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
430 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
431 unsigned Op1 = getRegForValue(I->getOperand(1));
434 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
437 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
438 CI->getZExtValue(), VT.getSimpleVT());
442 // We successfully emitted code for the given LLVM Instruction.
443 updateValueMap(I, ResultReg);
447 unsigned Op0 = getRegForValue(I->getOperand(0));
448 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
450 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
452 // Check if the second operand is a constant and handle it appropriately.
453 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
454 uint64_t Imm = CI->getSExtValue();
456 // Transform "sdiv exact X, 8" -> "sra X, 3".
457 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
458 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
460 ISDOpcode = ISD::SRA;
463 // Transform "urem x, pow2" -> "and x, pow2-1".
464 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
465 isPowerOf2_64(Imm)) {
467 ISDOpcode = ISD::AND;
470 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
471 Op0IsKill, Imm, VT.getSimpleVT());
475 // We successfully emitted code for the given LLVM Instruction.
476 updateValueMap(I, ResultReg);
480 unsigned Op1 = getRegForValue(I->getOperand(1));
481 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
483 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
485 // Now we have both operands in registers. Emit the instruction.
486 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
487 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
489 // Target-specific code wasn't able to find a machine opcode for
490 // the given ISD opcode and type. Halt "fast" selection and bail.
493 // We successfully emitted code for the given LLVM Instruction.
494 updateValueMap(I, ResultReg);
498 bool FastISel::selectGetElementPtr(const User *I) {
499 unsigned N = getRegForValue(I->getOperand(0));
500 if (!N) // Unhandled operand. Halt "fast" selection and bail.
502 bool NIsKill = hasTrivialKill(I->getOperand(0));
504 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
505 // into a single N = N + TotalOffset.
506 uint64_t TotalOffs = 0;
507 // FIXME: What's a good SWAG number for MaxOffs?
508 uint64_t MaxOffs = 2048;
509 MVT VT = TLI.getPointerTy(DL);
510 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
512 const Value *Idx = GTI.getOperand();
513 if (StructType *StTy = GTI.getStructTypeOrNull()) {
514 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
517 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
518 if (TotalOffs >= MaxOffs) {
519 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
520 if (!N) // Unhandled operand. Halt "fast" selection and bail.
527 Type *Ty = GTI.getIndexedType();
529 // If this is a constant subscript, handle it quickly.
530 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
534 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
535 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
536 if (TotalOffs >= MaxOffs) {
537 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
538 if (!N) // Unhandled operand. Halt "fast" selection and bail.
546 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
547 if (!N) // Unhandled operand. Halt "fast" selection and bail.
553 // N = N + Idx * ElementSize;
554 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
555 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
556 unsigned IdxN = Pair.first;
557 bool IdxNIsKill = Pair.second;
558 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
561 if (ElementSize != 1) {
562 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
563 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
567 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
568 if (!N) // Unhandled operand. Halt "fast" selection and bail.
573 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
574 if (!N) // Unhandled operand. Halt "fast" selection and bail.
578 // We successfully emitted code for the given LLVM Instruction.
579 updateValueMap(I, N);
583 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
584 const CallInst *CI, unsigned StartIdx) {
585 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
586 Value *Val = CI->getArgOperand(i);
587 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
588 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
589 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
590 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
591 } else if (isa<ConstantPointerNull>(Val)) {
592 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
593 Ops.push_back(MachineOperand::CreateImm(0));
594 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
595 // Values coming from a stack location also require a special encoding,
596 // but that is added later on by the target specific frame index
597 // elimination implementation.
598 auto SI = FuncInfo.StaticAllocaMap.find(AI);
599 if (SI != FuncInfo.StaticAllocaMap.end())
600 Ops.push_back(MachineOperand::CreateFI(SI->second));
604 unsigned Reg = getRegForValue(Val);
607 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
613 bool FastISel::selectStackmap(const CallInst *I) {
614 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
615 // [live variables...])
616 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
617 "Stackmap cannot return a value.");
619 // The stackmap intrinsic only records the live variables (the arguments
620 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
621 // intrinsic, this won't be lowered to a function call. This means we don't
622 // have to worry about calling conventions and target-specific lowering code.
623 // Instead we perform the call lowering right here.
625 // CALLSEQ_START(0, 0...)
626 // STACKMAP(id, nbytes, ...)
629 SmallVector<MachineOperand, 32> Ops;
631 // Add the <id> and <numBytes> constants.
632 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
633 "Expected a constant integer.");
634 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
635 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
637 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
638 "Expected a constant integer.");
639 const auto *NumBytes =
640 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
641 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
643 // Push live variables for the stack map (skipping the first two arguments
644 // <id> and <numBytes>).
645 if (!addStackMapLiveVars(Ops, I, 2))
648 // We are not adding any register mask info here, because the stackmap doesn't
651 // Add scratch registers as implicit def and early clobber.
652 CallingConv::ID CC = I->getCallingConv();
653 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
654 for (unsigned i = 0; ScratchRegs[i]; ++i)
655 Ops.push_back(MachineOperand::CreateReg(
656 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
657 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
659 // Issue CALLSEQ_START
660 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
662 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
663 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
664 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
668 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
669 TII.get(TargetOpcode::STACKMAP));
670 for (auto const &MO : Ops)
674 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
679 // Inform the Frame Information that we have a stackmap in this function.
680 FuncInfo.MF->getFrameInfo().setHasStackMap();
685 /// \brief Lower an argument list according to the target calling convention.
687 /// This is a helper for lowering intrinsics that follow a target calling
688 /// convention or require stack pointer adjustment. Only a subset of the
689 /// intrinsic's operands need to participate in the calling convention.
690 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
691 unsigned NumArgs, const Value *Callee,
692 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
694 Args.reserve(NumArgs);
696 // Populate the argument list.
697 ImmutableCallSite CS(CI);
698 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
699 Value *V = CI->getOperand(ArgI);
701 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
705 Entry.Ty = V->getType();
706 Entry.setAttributes(&CS, ArgIdx);
707 Args.push_back(Entry);
710 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
712 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
714 return lowerCallTo(CLI);
717 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
718 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
719 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
720 SmallString<32> MangledName;
721 Mangler::getNameWithPrefix(MangledName, Target, DL);
722 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
723 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
726 bool FastISel::selectPatchpoint(const CallInst *I) {
727 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
732 // [live variables...])
733 CallingConv::ID CC = I->getCallingConv();
734 bool IsAnyRegCC = CC == CallingConv::AnyReg;
735 bool HasDef = !I->getType()->isVoidTy();
736 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
738 // Get the real number of arguments participating in the call <numArgs>
739 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
740 "Expected a constant integer.");
741 const auto *NumArgsVal =
742 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
743 unsigned NumArgs = NumArgsVal->getZExtValue();
745 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
746 // This includes all meta-operands up to but not including CC.
747 unsigned NumMetaOpers = PatchPointOpers::CCPos;
748 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
749 "Not enough arguments provided to the patchpoint intrinsic");
751 // For AnyRegCC the arguments are lowered later on manually.
752 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
753 CallLoweringInfo CLI;
754 CLI.setIsPatchPoint();
755 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
758 assert(CLI.Call && "No call instruction specified.");
760 SmallVector<MachineOperand, 32> Ops;
762 // Add an explicit result reg if we use the anyreg calling convention.
763 if (IsAnyRegCC && HasDef) {
764 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
765 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
766 CLI.NumResultRegs = 1;
767 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
770 // Add the <id> and <numBytes> constants.
771 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
772 "Expected a constant integer.");
773 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
774 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
776 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
777 "Expected a constant integer.");
778 const auto *NumBytes =
779 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
780 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
782 // Add the call target.
783 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
784 uint64_t CalleeConstAddr =
785 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
786 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
787 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
788 if (C->getOpcode() == Instruction::IntToPtr) {
789 uint64_t CalleeConstAddr =
790 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
791 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
793 llvm_unreachable("Unsupported ConstantExpr.");
794 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
795 Ops.push_back(MachineOperand::CreateGA(GV, 0));
796 } else if (isa<ConstantPointerNull>(Callee))
797 Ops.push_back(MachineOperand::CreateImm(0));
799 llvm_unreachable("Unsupported callee address.");
801 // Adjust <numArgs> to account for any arguments that have been passed on
802 // the stack instead.
803 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
804 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
806 // Add the calling convention
807 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
809 // Add the arguments we omitted previously. The register allocator should
810 // place these in any free register.
812 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
813 unsigned Reg = getRegForValue(I->getArgOperand(i));
816 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
820 // Push the arguments from the call instruction.
821 for (auto Reg : CLI.OutRegs)
822 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
824 // Push live variables for the stack map.
825 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
828 // Push the register mask info.
829 Ops.push_back(MachineOperand::CreateRegMask(
830 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
832 // Add scratch registers as implicit def and early clobber.
833 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
834 for (unsigned i = 0; ScratchRegs[i]; ++i)
835 Ops.push_back(MachineOperand::CreateReg(
836 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
837 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
839 // Add implicit defs (return values).
840 for (auto Reg : CLI.InRegs)
841 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
844 // Insert the patchpoint instruction before the call generated by the target.
845 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
846 TII.get(TargetOpcode::PATCHPOINT));
851 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
853 // Delete the original call instruction.
854 CLI.Call->eraseFromParent();
856 // Inform the Frame Information that we have a patchpoint in this function.
857 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
859 if (CLI.NumResultRegs)
860 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
864 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
865 const auto &Triple = TM.getTargetTriple();
866 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
867 return true; // don't do anything to this instruction.
868 SmallVector<MachineOperand, 8> Ops;
869 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
871 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
873 MachineInstrBuilder MIB =
874 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
875 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
878 // Insert the Patchable Event Call instruction, that gets lowered properly.
883 /// Returns an AttributeList representing the attributes applied to the return
884 /// value of the given call.
885 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
886 SmallVector<Attribute::AttrKind, 2> Attrs;
888 Attrs.push_back(Attribute::SExt);
890 Attrs.push_back(Attribute::ZExt);
892 Attrs.push_back(Attribute::InReg);
894 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
898 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
900 MCContext &Ctx = MF->getContext();
901 SmallString<32> MangledName;
902 Mangler::getNameWithPrefix(MangledName, SymName, DL);
903 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
904 return lowerCallTo(CI, Sym, NumArgs);
907 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
909 ImmutableCallSite CS(CI);
911 FunctionType *FTy = CS.getFunctionType();
912 Type *RetTy = CS.getType();
915 Args.reserve(NumArgs);
917 // Populate the argument list.
918 // Attributes for args start at offset 1, after the return attribute.
919 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
920 Value *V = CI->getOperand(ArgI);
922 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
926 Entry.Ty = V->getType();
927 Entry.setAttributes(&CS, ArgI);
928 Args.push_back(Entry);
930 TLI.markLibCallAttributes(MF, CS.getCallingConv(), Args);
932 CallLoweringInfo CLI;
933 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
935 return lowerCallTo(CLI);
938 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
939 // Handle the incoming return values from the call.
941 SmallVector<EVT, 4> RetTys;
942 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
944 SmallVector<ISD::OutputArg, 4> Outs;
945 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
947 bool CanLowerReturn = TLI.CanLowerReturn(
948 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
950 // FIXME: sret demotion isn't supported yet - bail out.
954 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
956 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
957 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
958 for (unsigned i = 0; i != NumRegs; ++i) {
959 ISD::InputArg MyFlags;
960 MyFlags.VT = RegisterVT;
962 MyFlags.Used = CLI.IsReturnValueUsed;
964 MyFlags.Flags.setSExt();
966 MyFlags.Flags.setZExt();
968 MyFlags.Flags.setInReg();
969 CLI.Ins.push_back(MyFlags);
973 // Handle all of the outgoing arguments.
975 for (auto &Arg : CLI.getArgs()) {
976 Type *FinalType = Arg.Ty;
978 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
979 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
980 FinalType, CLI.CallConv, CLI.IsVarArg);
982 ISD::ArgFlagsTy Flags;
992 Flags.setSwiftSelf();
993 if (Arg.IsSwiftError)
994 Flags.setSwiftError();
997 if (Arg.IsInAlloca) {
999 // Set the byval flag for CCAssignFn callbacks that don't know about
1000 // inalloca. This way we can know how many bytes we should've allocated
1001 // and how many bytes a callee cleanup function will pop. If we port
1002 // inalloca to more targets, we'll have to add custom inalloca handling in
1003 // the various CC lowering callbacks.
1006 if (Arg.IsByVal || Arg.IsInAlloca) {
1007 PointerType *Ty = cast<PointerType>(Arg.Ty);
1008 Type *ElementTy = Ty->getElementType();
1009 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1010 // For ByVal, alignment should come from FE. BE will guess if this info is
1011 // not there, but there are cases it cannot get right.
1012 unsigned FrameAlign = Arg.Alignment;
1014 FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1015 Flags.setByValSize(FrameSize);
1016 Flags.setByValAlign(FrameAlign);
1021 Flags.setInConsecutiveRegs();
1022 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1023 Flags.setOrigAlign(OriginalAlignment);
1025 CLI.OutVals.push_back(Arg.Val);
1026 CLI.OutFlags.push_back(Flags);
1029 if (!fastLowerCall(CLI))
1032 // Set all unused physreg defs as dead.
1033 assert(CLI.Call && "No call instruction specified.");
1034 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1036 if (CLI.NumResultRegs && CLI.CS)
1037 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1042 bool FastISel::lowerCall(const CallInst *CI) {
1043 ImmutableCallSite CS(CI);
1045 FunctionType *FuncTy = CS.getFunctionType();
1046 Type *RetTy = CS.getType();
1050 Args.reserve(CS.arg_size());
1052 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1057 if (V->getType()->isEmptyTy())
1061 Entry.Ty = V->getType();
1063 // Skip the first return-type Attribute to get to params.
1064 Entry.setAttributes(&CS, i - CS.arg_begin());
1065 Args.push_back(Entry);
1068 // Check if target-independent constraints permit a tail call here.
1069 // Target-dependent constraints are checked within fastLowerCall.
1070 bool IsTailCall = CI->isTailCall();
1071 if (IsTailCall && !isInTailCallPosition(CS, TM))
1074 CallLoweringInfo CLI;
1075 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1076 .setTailCall(IsTailCall);
1078 return lowerCallTo(CLI);
1081 bool FastISel::selectCall(const User *I) {
1082 const CallInst *Call = cast<CallInst>(I);
1084 // Handle simple inline asms.
1085 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1086 // If the inline asm has side effects, then make sure that no local value
1087 // lives across by flushing the local value map.
1088 if (IA->hasSideEffects())
1089 flushLocalValueMap();
1091 // Don't attempt to handle constraints.
1092 if (!IA->getConstraintString().empty())
1095 unsigned ExtraInfo = 0;
1096 if (IA->hasSideEffects())
1097 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1098 if (IA->isAlignStack())
1099 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1101 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1102 TII.get(TargetOpcode::INLINEASM))
1103 .addExternalSymbol(IA->getAsmString().c_str())
1108 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1109 computeUsesVAFloatArgument(*Call, MMI);
1111 // Handle intrinsic function calls.
1112 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1113 return selectIntrinsicCall(II);
1115 // Usually, it does not make sense to initialize a value,
1116 // make an unrelated function call and use the value, because
1117 // it tends to be spilled on the stack. So, we move the pointer
1118 // to the last local value to the beginning of the block, so that
1119 // all the values which have already been materialized,
1120 // appear after the call. It also makes sense to skip intrinsics
1121 // since they tend to be inlined.
1122 flushLocalValueMap();
1124 return lowerCall(Call);
1127 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1128 switch (II->getIntrinsicID()) {
1131 // At -O0 we don't care about the lifetime intrinsics.
1132 case Intrinsic::lifetime_start:
1133 case Intrinsic::lifetime_end:
1134 // The donothing intrinsic does, well, nothing.
1135 case Intrinsic::donothing:
1136 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1137 case Intrinsic::assume:
1139 case Intrinsic::dbg_declare: {
1140 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1141 assert(DI->getVariable() && "Missing variable");
1142 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1143 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1147 const Value *Address = DI->getAddress();
1148 if (!Address || isa<UndefValue>(Address)) {
1149 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1153 // Byval arguments with frame indices were already handled after argument
1154 // lowering and before isel.
1156 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1157 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1160 Optional<MachineOperand> Op;
1161 if (unsigned Reg = lookUpRegForValue(Address))
1162 Op = MachineOperand::CreateReg(Reg, false);
1164 // If we have a VLA that has a "use" in a metadata node that's then used
1165 // here but it has no other uses, then we have a problem. E.g.,
1167 // int foo (const int *x) {
1172 // If we assign 'a' a vreg and fast isel later on has to use the selection
1173 // DAG isel, it will want to copy the value to the vreg. However, there are
1174 // no uses, which goes counter to what selection DAG isel expects.
1175 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1176 (!isa<AllocaInst>(Address) ||
1177 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1178 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1182 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1183 "Expected inlined-at fields to agree");
1185 Op->setIsDebug(true);
1186 // A dbg.declare describes the address of a source variable, so lower it
1187 // into an indirect DBG_VALUE.
1188 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1189 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1190 Op->getReg(), 0, DI->getVariable(), DI->getExpression());
1192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1193 TII.get(TargetOpcode::DBG_VALUE))
1196 .addMetadata(DI->getVariable())
1197 .addMetadata(DI->getExpression());
1199 // We can't yet handle anything else here because it would require
1200 // generating code, thus altering codegen because of debug info.
1201 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1205 case Intrinsic::dbg_value: {
1206 // This form of DBG_VALUE is target-independent.
1207 const DbgValueInst *DI = cast<DbgValueInst>(II);
1208 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1209 const Value *V = DI->getValue();
1210 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1211 "Expected inlined-at fields to agree");
1213 // Currently the optimizer can produce this; insert an undef to
1214 // help debugging. Probably the optimizer should not do this.
1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1217 .addImm(DI->getOffset())
1218 .addMetadata(DI->getVariable())
1219 .addMetadata(DI->getExpression());
1220 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1221 if (CI->getBitWidth() > 64)
1222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1224 .addImm(DI->getOffset())
1225 .addMetadata(DI->getVariable())
1226 .addMetadata(DI->getExpression());
1228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1229 .addImm(CI->getZExtValue())
1230 .addImm(DI->getOffset())
1231 .addMetadata(DI->getVariable())
1232 .addMetadata(DI->getExpression());
1233 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1234 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1236 .addImm(DI->getOffset())
1237 .addMetadata(DI->getVariable())
1238 .addMetadata(DI->getExpression());
1239 } else if (unsigned Reg = lookUpRegForValue(V)) {
1240 // FIXME: This does not handle register-indirect values at offset 0.
1241 bool IsIndirect = DI->getOffset() != 0;
1242 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1243 DI->getOffset(), DI->getVariable(), DI->getExpression());
1245 // We can't yet handle anything else here because it would require
1246 // generating code, thus altering codegen because of debug info.
1247 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1251 case Intrinsic::objectsize: {
1252 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1253 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1254 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1255 unsigned ResultReg = getRegForValue(ResCI);
1258 updateValueMap(II, ResultReg);
1261 case Intrinsic::invariant_group_barrier:
1262 case Intrinsic::expect: {
1263 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1266 updateValueMap(II, ResultReg);
1269 case Intrinsic::experimental_stackmap:
1270 return selectStackmap(II);
1271 case Intrinsic::experimental_patchpoint_void:
1272 case Intrinsic::experimental_patchpoint_i64:
1273 return selectPatchpoint(II);
1275 case Intrinsic::xray_customevent:
1276 return selectXRayCustomEvent(II);
1279 return fastLowerIntrinsicCall(II);
1282 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1283 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1284 EVT DstVT = TLI.getValueType(DL, I->getType());
1286 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1288 // Unhandled type. Halt "fast" selection and bail.
1291 // Check if the destination type is legal.
1292 if (!TLI.isTypeLegal(DstVT))
1295 // Check if the source operand is legal.
1296 if (!TLI.isTypeLegal(SrcVT))
1299 unsigned InputReg = getRegForValue(I->getOperand(0));
1301 // Unhandled operand. Halt "fast" selection and bail.
1304 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1306 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1307 Opcode, InputReg, InputRegIsKill);
1311 updateValueMap(I, ResultReg);
1315 bool FastISel::selectBitCast(const User *I) {
1316 // If the bitcast doesn't change the type, just use the operand value.
1317 if (I->getType() == I->getOperand(0)->getType()) {
1318 unsigned Reg = getRegForValue(I->getOperand(0));
1321 updateValueMap(I, Reg);
1325 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1326 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1327 EVT DstEVT = TLI.getValueType(DL, I->getType());
1328 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1329 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1330 // Unhandled type. Halt "fast" selection and bail.
1333 MVT SrcVT = SrcEVT.getSimpleVT();
1334 MVT DstVT = DstEVT.getSimpleVT();
1335 unsigned Op0 = getRegForValue(I->getOperand(0));
1336 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1338 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1340 // First, try to perform the bitcast by inserting a reg-reg copy.
1341 unsigned ResultReg = 0;
1342 if (SrcVT == DstVT) {
1343 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1344 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1345 // Don't attempt a cross-class copy. It will likely fail.
1346 if (SrcClass == DstClass) {
1347 ResultReg = createResultReg(DstClass);
1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1349 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1353 // If the reg-reg copy failed, select a BITCAST opcode.
1355 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1360 updateValueMap(I, ResultReg);
1364 // Remove local value instructions starting from the instruction after
1365 // SavedLastLocalValue to the current function insert point.
1366 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1368 MachineInstr *CurLastLocalValue = getLastLocalValue();
1369 if (CurLastLocalValue != SavedLastLocalValue) {
1370 // Find the first local value instruction to be deleted.
1371 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1372 // Otherwise it's the first instruction in the block.
1373 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1374 if (SavedLastLocalValue)
1377 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1378 setLastLocalValue(SavedLastLocalValue);
1379 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1383 bool FastISel::selectInstruction(const Instruction *I) {
1384 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1385 // Just before the terminator instruction, insert instructions to
1386 // feed PHI nodes in successor blocks.
1387 if (isa<TerminatorInst>(I)) {
1388 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1389 // PHI node handling may have generated local value instructions,
1390 // even though it failed to handle all PHI nodes.
1391 // We remove these instructions because SelectionDAGISel will generate
1393 removeDeadLocalValueCode(SavedLastLocalValue);
1398 // FastISel does not handle any operand bundles except OB_funclet.
1399 if (ImmutableCallSite CS = ImmutableCallSite(I))
1400 for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1401 if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1404 DbgLoc = I->getDebugLoc();
1406 SavedInsertPt = FuncInfo.InsertPt;
1408 if (const auto *Call = dyn_cast<CallInst>(I)) {
1409 const Function *F = Call->getCalledFunction();
1412 // As a special case, don't handle calls to builtin library functions that
1413 // may be translated directly to target instructions.
1414 if (F && !F->hasLocalLinkage() && F->hasName() &&
1415 LibInfo->getLibFunc(F->getName(), Func) &&
1416 LibInfo->hasOptimizedCodeGen(Func))
1419 // Don't handle Intrinsic::trap if a trap function is specified.
1420 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1421 Call->hasFnAttr("trap-func-name"))
1425 // First, try doing target-independent selection.
1426 if (!SkipTargetIndependentISel) {
1427 if (selectOperator(I, I->getOpcode())) {
1428 ++NumFastIselSuccessIndependent;
1429 DbgLoc = DebugLoc();
1432 // Remove dead code.
1433 recomputeInsertPt();
1434 if (SavedInsertPt != FuncInfo.InsertPt)
1435 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1436 SavedInsertPt = FuncInfo.InsertPt;
1438 // Next, try calling the target to attempt to handle the instruction.
1439 if (fastSelectInstruction(I)) {
1440 ++NumFastIselSuccessTarget;
1441 DbgLoc = DebugLoc();
1444 // Remove dead code.
1445 recomputeInsertPt();
1446 if (SavedInsertPt != FuncInfo.InsertPt)
1447 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1449 DbgLoc = DebugLoc();
1450 // Undo phi node updates, because they will be added again by SelectionDAG.
1451 if (isa<TerminatorInst>(I)) {
1452 // PHI node handling may have generated local value instructions.
1453 // We remove them because SelectionDAGISel will generate them again.
1454 removeDeadLocalValueCode(SavedLastLocalValue);
1455 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1460 /// Emit an unconditional branch to the given block, unless it is the immediate
1461 /// (fall-through) successor, and update the CFG.
1462 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1463 const DebugLoc &DbgLoc) {
1464 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1465 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1466 // For more accurate line information if this is the only instruction
1467 // in the block then emit it, otherwise we have the unconditional
1468 // fall-through case, which needs no instructions.
1470 // The unconditional branch case.
1471 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1472 SmallVector<MachineOperand, 0>(), DbgLoc);
1475 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1476 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1477 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1479 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1482 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1483 MachineBasicBlock *TrueMBB,
1484 MachineBasicBlock *FalseMBB) {
1485 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1486 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1487 // successor/predecessor lists.
1488 if (TrueMBB != FalseMBB) {
1490 auto BranchProbability =
1491 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1492 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1494 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1497 fastEmitBranch(FalseMBB, DbgLoc);
1500 /// Emit an FNeg operation.
1501 bool FastISel::selectFNeg(const User *I) {
1502 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1505 bool OpRegIsKill = hasTrivialKill(I);
1507 // If the target has ISD::FNEG, use it.
1508 EVT VT = TLI.getValueType(DL, I->getType());
1509 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1510 OpReg, OpRegIsKill);
1512 updateValueMap(I, ResultReg);
1516 // Bitcast the value to integer, twiddle the sign bit with xor,
1517 // and then bitcast it back to floating-point.
1518 if (VT.getSizeInBits() > 64)
1520 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1521 if (!TLI.isTypeLegal(IntVT))
1524 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1525 ISD::BITCAST, OpReg, OpRegIsKill);
1529 unsigned IntResultReg = fastEmit_ri_(
1530 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1531 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1535 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1536 IntResultReg, /*IsKill=*/true);
1540 updateValueMap(I, ResultReg);
1544 bool FastISel::selectExtractValue(const User *U) {
1545 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1549 // Make sure we only try to handle extracts with a legal result. But also
1550 // allow i1 because it's easy.
1551 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1552 if (!RealVT.isSimple())
1554 MVT VT = RealVT.getSimpleVT();
1555 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1558 const Value *Op0 = EVI->getOperand(0);
1559 Type *AggTy = Op0->getType();
1561 // Get the base result register.
1563 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1564 if (I != FuncInfo.ValueMap.end())
1565 ResultReg = I->second;
1566 else if (isa<Instruction>(Op0))
1567 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1569 return false; // fast-isel can't handle aggregate constants at the moment
1571 // Get the actual result register, which is an offset from the base register.
1572 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1574 SmallVector<EVT, 4> AggValueVTs;
1575 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1577 for (unsigned i = 0; i < VTIndex; i++)
1578 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1580 updateValueMap(EVI, ResultReg);
1584 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1586 case Instruction::Add:
1587 return selectBinaryOp(I, ISD::ADD);
1588 case Instruction::FAdd:
1589 return selectBinaryOp(I, ISD::FADD);
1590 case Instruction::Sub:
1591 return selectBinaryOp(I, ISD::SUB);
1592 case Instruction::FSub:
1593 // FNeg is currently represented in LLVM IR as a special case of FSub.
1594 if (BinaryOperator::isFNeg(I))
1595 return selectFNeg(I);
1596 return selectBinaryOp(I, ISD::FSUB);
1597 case Instruction::Mul:
1598 return selectBinaryOp(I, ISD::MUL);
1599 case Instruction::FMul:
1600 return selectBinaryOp(I, ISD::FMUL);
1601 case Instruction::SDiv:
1602 return selectBinaryOp(I, ISD::SDIV);
1603 case Instruction::UDiv:
1604 return selectBinaryOp(I, ISD::UDIV);
1605 case Instruction::FDiv:
1606 return selectBinaryOp(I, ISD::FDIV);
1607 case Instruction::SRem:
1608 return selectBinaryOp(I, ISD::SREM);
1609 case Instruction::URem:
1610 return selectBinaryOp(I, ISD::UREM);
1611 case Instruction::FRem:
1612 return selectBinaryOp(I, ISD::FREM);
1613 case Instruction::Shl:
1614 return selectBinaryOp(I, ISD::SHL);
1615 case Instruction::LShr:
1616 return selectBinaryOp(I, ISD::SRL);
1617 case Instruction::AShr:
1618 return selectBinaryOp(I, ISD::SRA);
1619 case Instruction::And:
1620 return selectBinaryOp(I, ISD::AND);
1621 case Instruction::Or:
1622 return selectBinaryOp(I, ISD::OR);
1623 case Instruction::Xor:
1624 return selectBinaryOp(I, ISD::XOR);
1626 case Instruction::GetElementPtr:
1627 return selectGetElementPtr(I);
1629 case Instruction::Br: {
1630 const BranchInst *BI = cast<BranchInst>(I);
1632 if (BI->isUnconditional()) {
1633 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1634 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1635 fastEmitBranch(MSucc, BI->getDebugLoc());
1639 // Conditional branches are not handed yet.
1640 // Halt "fast" selection and bail.
1644 case Instruction::Unreachable:
1645 if (TM.Options.TrapUnreachable)
1646 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1650 case Instruction::Alloca:
1651 // FunctionLowering has the static-sized case covered.
1652 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1655 // Dynamic-sized alloca is not handled yet.
1658 case Instruction::Call:
1659 return selectCall(I);
1661 case Instruction::BitCast:
1662 return selectBitCast(I);
1664 case Instruction::FPToSI:
1665 return selectCast(I, ISD::FP_TO_SINT);
1666 case Instruction::ZExt:
1667 return selectCast(I, ISD::ZERO_EXTEND);
1668 case Instruction::SExt:
1669 return selectCast(I, ISD::SIGN_EXTEND);
1670 case Instruction::Trunc:
1671 return selectCast(I, ISD::TRUNCATE);
1672 case Instruction::SIToFP:
1673 return selectCast(I, ISD::SINT_TO_FP);
1675 case Instruction::IntToPtr: // Deliberate fall-through.
1676 case Instruction::PtrToInt: {
1677 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1678 EVT DstVT = TLI.getValueType(DL, I->getType());
1679 if (DstVT.bitsGT(SrcVT))
1680 return selectCast(I, ISD::ZERO_EXTEND);
1681 if (DstVT.bitsLT(SrcVT))
1682 return selectCast(I, ISD::TRUNCATE);
1683 unsigned Reg = getRegForValue(I->getOperand(0));
1686 updateValueMap(I, Reg);
1690 case Instruction::ExtractValue:
1691 return selectExtractValue(I);
1693 case Instruction::PHI:
1694 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1697 // Unhandled instruction. Halt "fast" selection and bail.
1702 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1703 const TargetLibraryInfo *LibInfo,
1704 bool SkipTargetIndependentISel)
1705 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1706 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1707 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1708 TII(*MF->getSubtarget().getInstrInfo()),
1709 TLI(*MF->getSubtarget().getTargetLowering()),
1710 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1711 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1713 FastISel::~FastISel() = default;
1715 bool FastISel::fastLowerArguments() { return false; }
1717 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1719 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1723 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1725 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1726 bool /*Op0IsKill*/) {
1730 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1731 bool /*Op0IsKill*/, unsigned /*Op1*/,
1732 bool /*Op1IsKill*/) {
1736 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1740 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1741 const ConstantFP * /*FPImm*/) {
1745 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1746 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1750 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1751 /// instruction with an immediate operand using fastEmit_ri.
1752 /// If that fails, it materializes the immediate into a register and try
1753 /// fastEmit_rr instead.
1754 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1755 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1756 // If this is a multiply by a power of two, emit this as a shift left.
1757 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1760 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1761 // div x, 8 -> srl x, 3
1766 // Horrible hack (to be removed), check to make sure shift amounts are
1768 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1769 Imm >= VT.getSizeInBits())
1772 // First check if immediate type is legal. If not, we can't use the ri form.
1773 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1776 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1777 bool IsImmKill = true;
1779 // This is a bit ugly/slow, but failing here means falling out of
1780 // fast-isel, which would be very slow.
1782 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1783 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1786 // FIXME: If the materialized register here has no uses yet then this
1787 // will be the first use and we should be able to mark it as killed.
1788 // However, the local value area for materialising constant expressions
1789 // grows down, not up, which means that any constant expressions we generate
1790 // later which also use 'Imm' could be after this instruction and therefore
1794 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1797 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1798 return MRI.createVirtualRegister(RC);
1801 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1803 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1804 const TargetRegisterClass *RegClass =
1805 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1806 if (!MRI.constrainRegClass(Op, RegClass)) {
1807 // If it's not legal to COPY between the register classes, something
1808 // has gone very wrong before we got here.
1809 unsigned NewOp = createResultReg(RegClass);
1810 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1811 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1818 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1819 const TargetRegisterClass *RC) {
1820 unsigned ResultReg = createResultReg(RC);
1821 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1823 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1827 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1828 const TargetRegisterClass *RC, unsigned Op0,
1830 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1832 unsigned ResultReg = createResultReg(RC);
1833 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1835 if (II.getNumDefs() >= 1)
1836 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1837 .addReg(Op0, getKillRegState(Op0IsKill));
1839 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1840 .addReg(Op0, getKillRegState(Op0IsKill));
1841 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1842 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1848 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1849 const TargetRegisterClass *RC, unsigned Op0,
1850 bool Op0IsKill, unsigned Op1,
1852 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1854 unsigned ResultReg = createResultReg(RC);
1855 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1856 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1858 if (II.getNumDefs() >= 1)
1859 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1860 .addReg(Op0, getKillRegState(Op0IsKill))
1861 .addReg(Op1, getKillRegState(Op1IsKill));
1863 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1864 .addReg(Op0, getKillRegState(Op0IsKill))
1865 .addReg(Op1, getKillRegState(Op1IsKill));
1866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1867 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1872 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1873 const TargetRegisterClass *RC, unsigned Op0,
1874 bool Op0IsKill, unsigned Op1,
1875 bool Op1IsKill, unsigned Op2,
1877 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1879 unsigned ResultReg = createResultReg(RC);
1880 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1881 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1882 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1884 if (II.getNumDefs() >= 1)
1885 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1886 .addReg(Op0, getKillRegState(Op0IsKill))
1887 .addReg(Op1, getKillRegState(Op1IsKill))
1888 .addReg(Op2, getKillRegState(Op2IsKill));
1890 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1891 .addReg(Op0, getKillRegState(Op0IsKill))
1892 .addReg(Op1, getKillRegState(Op1IsKill))
1893 .addReg(Op2, getKillRegState(Op2IsKill));
1894 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1895 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1900 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1901 const TargetRegisterClass *RC, unsigned Op0,
1902 bool Op0IsKill, uint64_t Imm) {
1903 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1905 unsigned ResultReg = createResultReg(RC);
1906 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1908 if (II.getNumDefs() >= 1)
1909 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1910 .addReg(Op0, getKillRegState(Op0IsKill))
1913 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1914 .addReg(Op0, getKillRegState(Op0IsKill))
1916 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1917 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1922 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1923 const TargetRegisterClass *RC, unsigned Op0,
1924 bool Op0IsKill, uint64_t Imm1,
1926 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1928 unsigned ResultReg = createResultReg(RC);
1929 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1931 if (II.getNumDefs() >= 1)
1932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1933 .addReg(Op0, getKillRegState(Op0IsKill))
1937 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1938 .addReg(Op0, getKillRegState(Op0IsKill))
1941 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1942 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1947 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
1948 const TargetRegisterClass *RC,
1949 const ConstantFP *FPImm) {
1950 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1952 unsigned ResultReg = createResultReg(RC);
1954 if (II.getNumDefs() >= 1)
1955 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1960 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1961 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1966 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1967 const TargetRegisterClass *RC, unsigned Op0,
1968 bool Op0IsKill, unsigned Op1,
1969 bool Op1IsKill, uint64_t Imm) {
1970 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1972 unsigned ResultReg = createResultReg(RC);
1973 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1974 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1976 if (II.getNumDefs() >= 1)
1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1978 .addReg(Op0, getKillRegState(Op0IsKill))
1979 .addReg(Op1, getKillRegState(Op1IsKill))
1982 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1983 .addReg(Op0, getKillRegState(Op0IsKill))
1984 .addReg(Op1, getKillRegState(Op1IsKill))
1986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1987 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1992 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1993 const TargetRegisterClass *RC, uint64_t Imm) {
1994 unsigned ResultReg = createResultReg(RC);
1995 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1997 if (II.getNumDefs() >= 1)
1998 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2001 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2002 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2003 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2008 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2009 bool Op0IsKill, uint32_t Idx) {
2010 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2011 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
2012 "Cannot yet extract from physregs");
2013 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2014 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2015 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2016 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2020 /// Emit MachineInstrs to compute the value of Op with all but the least
2021 /// significant bit set to zero.
2022 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2023 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2026 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2027 /// Emit code to ensure constants are copied into registers when needed.
2028 /// Remember the virtual registers that need to be added to the Machine PHI
2029 /// nodes as input. We cannot just directly add them, because expansion
2030 /// might result in multiple MBB's for one BB. As such, the start of the
2031 /// BB might correspond to a different MBB than the end.
2032 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2033 const TerminatorInst *TI = LLVMBB->getTerminator();
2035 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2036 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2038 // Check successor nodes' PHI nodes that expect a constant to be available
2040 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2041 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2042 if (!isa<PHINode>(SuccBB->begin()))
2044 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2046 // If this terminator has multiple identical successors (common for
2047 // switches), only handle each succ once.
2048 if (!SuccsHandled.insert(SuccMBB).second)
2051 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2053 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2054 // nodes and Machine PHI nodes, but the incoming operands have not been
2056 for (BasicBlock::const_iterator I = SuccBB->begin();
2057 const auto *PN = dyn_cast<PHINode>(I); ++I) {
2059 // Ignore dead phi's.
2060 if (PN->use_empty())
2063 // Only handle legal types. Two interesting things to note here. First,
2064 // by bailing out early, we may leave behind some dead instructions,
2065 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2066 // own moves. Second, this check is necessary because FastISel doesn't
2067 // use CreateRegs to create registers, so it always creates
2068 // exactly one register for each non-void instruction.
2069 EVT VT = TLI.getValueType(DL, PN->getType(), /*AllowUnknown=*/true);
2070 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2071 // Handle integer promotions, though, because they're common and easy.
2072 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2073 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2078 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2080 // Set the DebugLoc for the copy. Prefer the location of the operand
2081 // if there is one; use the location of the PHI otherwise.
2082 DbgLoc = PN->getDebugLoc();
2083 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2084 DbgLoc = Inst->getDebugLoc();
2086 unsigned Reg = getRegForValue(PHIOp);
2088 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2091 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2092 DbgLoc = DebugLoc();
2099 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2100 assert(LI->hasOneUse() &&
2101 "tryToFoldLoad expected a LoadInst with a single use");
2102 // We know that the load has a single use, but don't know what it is. If it
2103 // isn't one of the folded instructions, then we can't succeed here. Handle
2104 // this by scanning the single-use users of the load until we get to FoldInst.
2105 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2107 const Instruction *TheUser = LI->user_back();
2108 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2109 // Stay in the right block.
2110 TheUser->getParent() == FoldInst->getParent() &&
2111 --MaxUsers) { // Don't scan too far.
2112 // If there are multiple or no uses of this instruction, then bail out.
2113 if (!TheUser->hasOneUse())
2116 TheUser = TheUser->user_back();
2119 // If we didn't find the fold instruction, then we failed to collapse the
2121 if (TheUser != FoldInst)
2124 // Don't try to fold volatile loads. Target has to deal with alignment
2126 if (LI->isVolatile())
2129 // Figure out which vreg this is going into. If there is no assigned vreg yet
2130 // then there actually was no reference to it. Perhaps the load is referenced
2131 // by a dead instruction.
2132 unsigned LoadReg = getRegForValue(LI);
2136 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2137 // may mean that the instruction got lowered to multiple MIs, or the use of
2138 // the loaded value ended up being multiple operands of the result.
2139 if (!MRI.hasOneUse(LoadReg))
2142 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2143 MachineInstr *User = RI->getParent();
2145 // Set the insertion point properly. Folding the load can cause generation of
2146 // other random instructions (like sign extends) for addressing modes; make
2147 // sure they get inserted in a logical place before the new instruction.
2148 FuncInfo.InsertPt = User;
2149 FuncInfo.MBB = User->getParent();
2151 // Ask the target to try folding the load.
2152 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2155 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2157 if (!isa<AddOperator>(Add))
2159 // Type size needs to match.
2160 if (DL.getTypeSizeInBits(GEP->getType()) !=
2161 DL.getTypeSizeInBits(Add->getType()))
2163 // Must be in the same basic block.
2164 if (isa<Instruction>(Add) &&
2165 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2167 // Must have a constant operand.
2168 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2172 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2176 MachineMemOperand::Flags Flags;
2179 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2180 Alignment = LI->getAlignment();
2181 IsVolatile = LI->isVolatile();
2182 Flags = MachineMemOperand::MOLoad;
2183 Ptr = LI->getPointerOperand();
2184 ValTy = LI->getType();
2185 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2186 Alignment = SI->getAlignment();
2187 IsVolatile = SI->isVolatile();
2188 Flags = MachineMemOperand::MOStore;
2189 Ptr = SI->getPointerOperand();
2190 ValTy = SI->getValueOperand()->getType();
2194 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2195 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2196 bool IsDereferenceable =
2197 I->getMetadata(LLVMContext::MD_dereferenceable) != nullptr;
2198 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2201 I->getAAMetadata(AAInfo);
2203 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2204 Alignment = DL.getABITypeAlignment(ValTy);
2206 unsigned Size = DL.getTypeStoreSize(ValTy);
2209 Flags |= MachineMemOperand::MOVolatile;
2211 Flags |= MachineMemOperand::MONonTemporal;
2212 if (IsDereferenceable)
2213 Flags |= MachineMemOperand::MODereferenceable;
2215 Flags |= MachineMemOperand::MOInvariant;
2217 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2218 Alignment, AAInfo, Ranges);
2221 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2222 // If both operands are the same, then try to optimize or fold the cmp.
2223 CmpInst::Predicate Predicate = CI->getPredicate();
2224 if (CI->getOperand(0) != CI->getOperand(1))
2227 switch (Predicate) {
2228 default: llvm_unreachable("Invalid predicate!");
2229 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2230 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2231 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2232 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2233 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2234 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2235 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2236 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2237 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2238 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2239 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2240 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2241 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2242 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2243 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2244 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2246 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2247 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2248 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2249 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2250 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2251 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2252 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2253 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2254 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2255 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;