1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the X86-specific support for the FastISel class. Much
10 // of the target-specific code is generated by tablegen in the file
11 // X86GenFastISel.inc, which is #included here.
13 //===----------------------------------------------------------------------===//
16 #include "X86CallingConv.h"
17 #include "X86InstrBuilder.h"
18 #include "X86InstrInfo.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/Analysis/BranchProbabilityInfo.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/IR/CallSite.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/IntrinsicsX86.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/MC/MCAsmInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Target/TargetOptions.h"
48 class X86FastISel final : public FastISel {
49 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
50 /// make the right decision when generating code for different targets.
51 const X86Subtarget *Subtarget;
53 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
54 /// floating point ops.
55 /// When SSE is available, use it for f32 operations.
56 /// When SSE2 is available, use it for f64 operations.
61 explicit X86FastISel(FunctionLoweringInfo &funcInfo,
62 const TargetLibraryInfo *libInfo)
63 : FastISel(funcInfo, libInfo) {
64 Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
65 X86ScalarSSEf64 = Subtarget->hasSSE2();
66 X86ScalarSSEf32 = Subtarget->hasSSE1();
69 bool fastSelectInstruction(const Instruction *I) override;
71 /// The specified machine instr operand is a vreg, and that
72 /// vreg is being provided by the specified load instruction. If possible,
73 /// try to fold the load as an operand to the instruction, returning true if
75 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
76 const LoadInst *LI) override;
78 bool fastLowerArguments() override;
79 bool fastLowerCall(CallLoweringInfo &CLI) override;
80 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
82 #include "X86GenFastISel.inc"
85 bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
88 bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
89 unsigned &ResultReg, unsigned Alignment = 1);
91 bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
92 MachineMemOperand *MMO = nullptr, bool Aligned = false);
93 bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
95 MachineMemOperand *MMO = nullptr, bool Aligned = false);
97 bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
100 bool X86SelectAddress(const Value *V, X86AddressMode &AM);
101 bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
103 bool X86SelectLoad(const Instruction *I);
105 bool X86SelectStore(const Instruction *I);
107 bool X86SelectRet(const Instruction *I);
109 bool X86SelectCmp(const Instruction *I);
111 bool X86SelectZExt(const Instruction *I);
113 bool X86SelectSExt(const Instruction *I);
115 bool X86SelectBranch(const Instruction *I);
117 bool X86SelectShift(const Instruction *I);
119 bool X86SelectDivRem(const Instruction *I);
121 bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
123 bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
125 bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
127 bool X86SelectSelect(const Instruction *I);
129 bool X86SelectTrunc(const Instruction *I);
131 bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
132 const TargetRegisterClass *RC);
134 bool X86SelectFPExt(const Instruction *I);
135 bool X86SelectFPTrunc(const Instruction *I);
136 bool X86SelectSIToFP(const Instruction *I);
137 bool X86SelectUIToFP(const Instruction *I);
138 bool X86SelectIntToFP(const Instruction *I, bool IsSigned);
140 const X86InstrInfo *getInstrInfo() const {
141 return Subtarget->getInstrInfo();
143 const X86TargetMachine *getTargetMachine() const {
144 return static_cast<const X86TargetMachine *>(&TM);
147 bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
149 unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
150 unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
151 unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
152 unsigned fastMaterializeConstant(const Constant *C) override;
154 unsigned fastMaterializeAlloca(const AllocaInst *C) override;
156 unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
158 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
159 /// computed in an SSE register, not on the X87 floating point stack.
160 bool isScalarFPTypeInSSEReg(EVT VT) const {
161 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
162 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
165 bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
167 bool IsMemcpySmall(uint64_t Len);
169 bool TryEmitSmallMemcpy(X86AddressMode DestAM,
170 X86AddressMode SrcAM, uint64_t Len);
172 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
175 const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
178 unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
179 const TargetRegisterClass *RC, unsigned Op0,
180 bool Op0IsKill, unsigned Op1, bool Op1IsKill,
181 unsigned Op2, bool Op2IsKill, unsigned Op3,
185 } // end anonymous namespace.
187 static std::pair<unsigned, bool>
188 getX86SSEConditionCode(CmpInst::Predicate Predicate) {
190 bool NeedSwap = false;
192 // SSE Condition code mapping:
202 default: llvm_unreachable("Unexpected predicate");
203 case CmpInst::FCMP_OEQ: CC = 0; break;
204 case CmpInst::FCMP_OGT: NeedSwap = true; LLVM_FALLTHROUGH;
205 case CmpInst::FCMP_OLT: CC = 1; break;
206 case CmpInst::FCMP_OGE: NeedSwap = true; LLVM_FALLTHROUGH;
207 case CmpInst::FCMP_OLE: CC = 2; break;
208 case CmpInst::FCMP_UNO: CC = 3; break;
209 case CmpInst::FCMP_UNE: CC = 4; break;
210 case CmpInst::FCMP_ULE: NeedSwap = true; LLVM_FALLTHROUGH;
211 case CmpInst::FCMP_UGE: CC = 5; break;
212 case CmpInst::FCMP_ULT: NeedSwap = true; LLVM_FALLTHROUGH;
213 case CmpInst::FCMP_UGT: CC = 6; break;
214 case CmpInst::FCMP_ORD: CC = 7; break;
215 case CmpInst::FCMP_UEQ: CC = 8; break;
216 case CmpInst::FCMP_ONE: CC = 12; break;
219 return std::make_pair(CC, NeedSwap);
222 /// Adds a complex addressing mode to the given machine instr builder.
223 /// Note, this will constrain the index register. If its not possible to
224 /// constrain the given index register, then a new one will be created. The
225 /// IndexReg field of the addressing mode will be updated to match in this case.
226 const MachineInstrBuilder &
227 X86FastISel::addFullAddress(const MachineInstrBuilder &MIB,
228 X86AddressMode &AM) {
229 // First constrain the index register. It needs to be a GR64_NOSP.
230 AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg,
231 MIB->getNumOperands() +
233 return ::addFullAddress(MIB, AM);
236 /// Check if it is possible to fold the condition from the XALU intrinsic
237 /// into the user. The condition code will only be updated on success.
238 bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
240 if (!isa<ExtractValueInst>(Cond))
243 const auto *EV = cast<ExtractValueInst>(Cond);
244 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
247 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
249 const Function *Callee = II->getCalledFunction();
251 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
252 if (!isTypeLegal(RetTy, RetVT))
255 if (RetVT != MVT::i32 && RetVT != MVT::i64)
259 switch (II->getIntrinsicID()) {
260 default: return false;
261 case Intrinsic::sadd_with_overflow:
262 case Intrinsic::ssub_with_overflow:
263 case Intrinsic::smul_with_overflow:
264 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
265 case Intrinsic::uadd_with_overflow:
266 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
269 // Check if both instructions are in the same basic block.
270 if (II->getParent() != I->getParent())
273 // Make sure nothing is in the way
274 BasicBlock::const_iterator Start(I);
275 BasicBlock::const_iterator End(II);
276 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
277 // We only expect extractvalue instructions between the intrinsic and the
278 // instruction to be selected.
279 if (!isa<ExtractValueInst>(Itr))
282 // Check that the extractvalue operand comes from the intrinsic.
283 const auto *EVI = cast<ExtractValueInst>(Itr);
284 if (EVI->getAggregateOperand() != II)
292 bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
293 EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
294 if (evt == MVT::Other || !evt.isSimple())
295 // Unhandled type. Halt "fast" selection and bail.
298 VT = evt.getSimpleVT();
299 // For now, require SSE/SSE2 for performing floating-point operations,
300 // since x87 requires additional work.
301 if (VT == MVT::f64 && !X86ScalarSSEf64)
303 if (VT == MVT::f32 && !X86ScalarSSEf32)
305 // Similarly, no f80 support yet.
308 // We only handle legal types. For example, on x86-32 the instruction
309 // selector contains all of the 64-bit instructions from x86-64,
310 // under the assumption that i64 won't be used if the target doesn't
312 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
315 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
316 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
317 /// Return true and the result register by reference if it is possible.
318 bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
319 MachineMemOperand *MMO, unsigned &ResultReg,
320 unsigned Alignment) {
321 bool HasSSE41 = Subtarget->hasSSE41();
322 bool HasAVX = Subtarget->hasAVX();
323 bool HasAVX2 = Subtarget->hasAVX2();
324 bool HasAVX512 = Subtarget->hasAVX512();
325 bool HasVLX = Subtarget->hasVLX();
326 bool IsNonTemporal = MMO && MMO->isNonTemporal();
328 // Treat i1 loads the same as i8 loads. Masking will be done when storing.
332 // Get opcode and regclass of the output for the given load instruction.
334 switch (VT.SimpleTy) {
335 default: return false;
346 // Must be in x86-64 mode.
351 Opc = HasAVX512 ? X86::VMOVSSZrm_alt :
352 HasAVX ? X86::VMOVSSrm_alt :
359 Opc = HasAVX512 ? X86::VMOVSDZrm_alt :
360 HasAVX ? X86::VMOVSDrm_alt :
366 // No f80 support yet.
369 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
370 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
371 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
372 else if (Alignment >= 16)
373 Opc = HasVLX ? X86::VMOVAPSZ128rm :
374 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
376 Opc = HasVLX ? X86::VMOVUPSZ128rm :
377 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
380 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
381 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
382 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
383 else if (Alignment >= 16)
384 Opc = HasVLX ? X86::VMOVAPDZ128rm :
385 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
387 Opc = HasVLX ? X86::VMOVUPDZ128rm :
388 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
394 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
395 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
396 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
397 else if (Alignment >= 16)
398 Opc = HasVLX ? X86::VMOVDQA64Z128rm :
399 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
401 Opc = HasVLX ? X86::VMOVDQU64Z128rm :
402 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
406 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
407 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
408 else if (IsNonTemporal && Alignment >= 16)
409 return false; // Force split for X86::VMOVNTDQArm
410 else if (Alignment >= 32)
411 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
413 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
417 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
418 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
419 else if (IsNonTemporal && Alignment >= 16)
420 return false; // Force split for X86::VMOVNTDQArm
421 else if (Alignment >= 32)
422 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
424 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
431 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
432 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
433 else if (IsNonTemporal && Alignment >= 16)
434 return false; // Force split for X86::VMOVNTDQArm
435 else if (Alignment >= 32)
436 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
438 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
442 if (IsNonTemporal && Alignment >= 64)
443 Opc = X86::VMOVNTDQAZrm;
445 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
449 if (IsNonTemporal && Alignment >= 64)
450 Opc = X86::VMOVNTDQAZrm;
452 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
459 // Note: There are a lot more choices based on type with AVX-512, but
460 // there's really no advantage when the load isn't masked.
461 if (IsNonTemporal && Alignment >= 64)
462 Opc = X86::VMOVNTDQAZrm;
464 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
468 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
470 ResultReg = createResultReg(RC);
471 MachineInstrBuilder MIB =
472 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
473 addFullAddress(MIB, AM);
475 MIB->addMemOperand(*FuncInfo.MF, MMO);
479 /// X86FastEmitStore - Emit a machine instruction to store a value Val of
480 /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
481 /// and a displacement offset, or a GlobalAddress,
482 /// i.e. V. Return true if it is possible.
483 bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
485 MachineMemOperand *MMO, bool Aligned) {
486 bool HasSSE1 = Subtarget->hasSSE1();
487 bool HasSSE2 = Subtarget->hasSSE2();
488 bool HasSSE4A = Subtarget->hasSSE4A();
489 bool HasAVX = Subtarget->hasAVX();
490 bool HasAVX512 = Subtarget->hasAVX512();
491 bool HasVLX = Subtarget->hasVLX();
492 bool IsNonTemporal = MMO && MMO->isNonTemporal();
494 // Get opcode and regclass of the output for the given store instruction.
496 switch (VT.getSimpleVT().SimpleTy) {
497 case MVT::f80: // No f80 support yet.
498 default: return false;
500 // Mask out all but lowest bit.
501 unsigned AndResult = createResultReg(&X86::GR8RegClass);
502 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
503 TII.get(X86::AND8ri), AndResult)
504 .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
506 LLVM_FALLTHROUGH; // handle i1 as i8.
508 case MVT::i8: Opc = X86::MOV8mr; break;
509 case MVT::i16: Opc = X86::MOV16mr; break;
511 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
514 // Must be in x86-64 mode.
515 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
518 if (X86ScalarSSEf32) {
519 if (IsNonTemporal && HasSSE4A)
522 Opc = HasAVX512 ? X86::VMOVSSZmr :
523 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
528 if (X86ScalarSSEf32) {
529 if (IsNonTemporal && HasSSE4A)
532 Opc = HasAVX512 ? X86::VMOVSDZmr :
533 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
538 Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr;
543 Opc = HasVLX ? X86::VMOVNTPSZ128mr :
544 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
546 Opc = HasVLX ? X86::VMOVAPSZ128mr :
547 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
549 Opc = HasVLX ? X86::VMOVUPSZ128mr :
550 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
555 Opc = HasVLX ? X86::VMOVNTPDZ128mr :
556 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
558 Opc = HasVLX ? X86::VMOVAPDZ128mr :
559 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
561 Opc = HasVLX ? X86::VMOVUPDZ128mr :
562 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
570 Opc = HasVLX ? X86::VMOVNTDQZ128mr :
571 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
573 Opc = HasVLX ? X86::VMOVDQA64Z128mr :
574 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
576 Opc = HasVLX ? X86::VMOVDQU64Z128mr :
577 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
583 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
585 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
587 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
593 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
595 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
597 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
606 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
608 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
610 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
615 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
617 Opc = X86::VMOVUPSZmr;
622 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
624 Opc = X86::VMOVUPDZmr;
631 // Note: There are a lot more choices based on type with AVX-512, but
632 // there's really no advantage when the store isn't masked.
634 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
636 Opc = X86::VMOVDQU64Zmr;
640 const MCInstrDesc &Desc = TII.get(Opc);
641 // Some of the instructions in the previous switch use FR128 instead
642 // of FR32 for ValReg. Make sure the register we feed the instruction
643 // matches its register class constraints.
644 // Note: This is fine to do a copy from FR32 to FR128, this is the
645 // same registers behind the scene and actually why it did not trigger
647 ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
648 MachineInstrBuilder MIB =
649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
650 addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
652 MIB->addMemOperand(*FuncInfo.MF, MMO);
657 bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
659 MachineMemOperand *MMO, bool Aligned) {
660 // Handle 'null' like i32/i64 0.
661 if (isa<ConstantPointerNull>(Val))
662 Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
664 // If this is a store of a simple constant, fold the constant into the store.
665 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
668 switch (VT.getSimpleVT().SimpleTy) {
672 LLVM_FALLTHROUGH; // Handle as i8.
673 case MVT::i8: Opc = X86::MOV8mi; break;
674 case MVT::i16: Opc = X86::MOV16mi; break;
675 case MVT::i32: Opc = X86::MOV32mi; break;
677 // Must be a 32-bit sign extended value.
678 if (isInt<32>(CI->getSExtValue()))
679 Opc = X86::MOV64mi32;
684 MachineInstrBuilder MIB =
685 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
686 addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
687 : CI->getZExtValue());
689 MIB->addMemOperand(*FuncInfo.MF, MMO);
694 unsigned ValReg = getRegForValue(Val);
698 bool ValKill = hasTrivialKill(Val);
699 return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
702 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
703 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
704 /// ISD::SIGN_EXTEND).
705 bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
706 unsigned Src, EVT SrcVT,
707 unsigned &ResultReg) {
708 unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
709 Src, /*TODO: Kill=*/false);
717 bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
718 // Handle constant address.
719 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
720 // Can't handle alternate code models yet.
721 if (TM.getCodeModel() != CodeModel::Small)
724 // Can't handle TLS yet.
725 if (GV->isThreadLocal())
728 // Can't handle !absolute_symbol references yet.
729 if (GV->isAbsoluteSymbolRef())
732 // RIP-relative addresses can't have additional register operands, so if
733 // we've already folded stuff into the addressing mode, just force the
734 // global value into its own register, which we can use as the basereg.
735 if (!Subtarget->isPICStyleRIPRel() ||
736 (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
737 // Okay, we've committed to selecting this global. Set up the address.
740 // Allow the subtarget to classify the global.
741 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
743 // If this reference is relative to the pic base, set it now.
744 if (isGlobalRelativeToPICBase(GVFlags)) {
745 // FIXME: How do we know Base.Reg is free??
746 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
749 // Unless the ABI requires an extra load, return a direct reference to
751 if (!isGlobalStubReference(GVFlags)) {
752 if (Subtarget->isPICStyleRIPRel()) {
753 // Use rip-relative addressing if we can. Above we verified that the
754 // base and index registers are unused.
755 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
756 AM.Base.Reg = X86::RIP;
758 AM.GVOpFlags = GVFlags;
762 // Ok, we need to do a load from a stub. If we've already loaded from
763 // this stub, reuse the loaded pointer, otherwise emit the load now.
764 DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);
766 if (I != LocalValueMap.end() && I->second != 0) {
769 // Issue load from stub.
771 const TargetRegisterClass *RC = nullptr;
772 X86AddressMode StubAM;
773 StubAM.Base.Reg = AM.Base.Reg;
775 StubAM.GVOpFlags = GVFlags;
777 // Prepare for inserting code in the local-value area.
778 SavePoint SaveInsertPt = enterLocalValueArea();
780 if (TLI.getPointerTy(DL) == MVT::i64) {
782 RC = &X86::GR64RegClass;
784 if (Subtarget->isPICStyleRIPRel())
785 StubAM.Base.Reg = X86::RIP;
788 RC = &X86::GR32RegClass;
791 LoadReg = createResultReg(RC);
792 MachineInstrBuilder LoadMI =
793 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
794 addFullAddress(LoadMI, StubAM);
796 // Ok, back to normal mode.
797 leaveLocalValueArea(SaveInsertPt);
799 // Prevent loading GV stub multiple times in same MBB.
800 LocalValueMap[V] = LoadReg;
803 // Now construct the final address. Note that the Disp, Scale,
804 // and Index values may already be set here.
805 AM.Base.Reg = LoadReg;
811 // If all else fails, try to materialize the value in a register.
812 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
813 if (AM.Base.Reg == 0) {
814 AM.Base.Reg = getRegForValue(V);
815 return AM.Base.Reg != 0;
817 if (AM.IndexReg == 0) {
818 assert(AM.Scale == 1 && "Scale with no index!");
819 AM.IndexReg = getRegForValue(V);
820 return AM.IndexReg != 0;
827 /// X86SelectAddress - Attempt to fill in an address from the given value.
829 bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
830 SmallVector<const Value *, 32> GEPs;
832 const User *U = nullptr;
833 unsigned Opcode = Instruction::UserOp1;
834 if (const Instruction *I = dyn_cast<Instruction>(V)) {
835 // Don't walk into other basic blocks; it's possible we haven't
836 // visited them yet, so the instructions may not yet be assigned
837 // virtual registers.
838 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
839 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
840 Opcode = I->getOpcode();
843 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
844 Opcode = C->getOpcode();
848 if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
849 if (Ty->getAddressSpace() > 255)
850 // Fast instruction selection doesn't support the special
856 case Instruction::BitCast:
857 // Look past bitcasts.
858 return X86SelectAddress(U->getOperand(0), AM);
860 case Instruction::IntToPtr:
861 // Look past no-op inttoptrs.
862 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
863 TLI.getPointerTy(DL))
864 return X86SelectAddress(U->getOperand(0), AM);
867 case Instruction::PtrToInt:
868 // Look past no-op ptrtoints.
869 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
870 return X86SelectAddress(U->getOperand(0), AM);
873 case Instruction::Alloca: {
874 // Do static allocas.
875 const AllocaInst *A = cast<AllocaInst>(V);
876 DenseMap<const AllocaInst *, int>::iterator SI =
877 FuncInfo.StaticAllocaMap.find(A);
878 if (SI != FuncInfo.StaticAllocaMap.end()) {
879 AM.BaseType = X86AddressMode::FrameIndexBase;
880 AM.Base.FrameIndex = SI->second;
886 case Instruction::Add: {
887 // Adds of constants are common and easy enough.
888 if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
889 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
890 // They have to fit in the 32-bit signed displacement field though.
891 if (isInt<32>(Disp)) {
892 AM.Disp = (uint32_t)Disp;
893 return X86SelectAddress(U->getOperand(0), AM);
899 case Instruction::GetElementPtr: {
900 X86AddressMode SavedAM = AM;
902 // Pattern-match simple GEPs.
903 uint64_t Disp = (int32_t)AM.Disp;
904 unsigned IndexReg = AM.IndexReg;
905 unsigned Scale = AM.Scale;
906 gep_type_iterator GTI = gep_type_begin(U);
907 // Iterate through the indices, folding what we can. Constants can be
908 // folded, and one dynamic index can be handled, if the scale is supported.
909 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
910 i != e; ++i, ++GTI) {
911 const Value *Op = *i;
912 if (StructType *STy = GTI.getStructTypeOrNull()) {
913 const StructLayout *SL = DL.getStructLayout(STy);
914 Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
918 // A array/variable index is always of the form i*S where S is the
919 // constant scale size. See if we can push the scale into immediates.
920 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
922 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
923 // Constant-offset addressing.
924 Disp += CI->getSExtValue() * S;
927 if (canFoldAddIntoGEP(U, Op)) {
928 // A compatible add with a constant operand. Fold the constant.
930 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
931 Disp += CI->getSExtValue() * S;
932 // Iterate on the other operand.
933 Op = cast<AddOperator>(Op)->getOperand(0);
937 (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
938 (S == 1 || S == 2 || S == 4 || S == 8)) {
939 // Scaled-index addressing.
941 IndexReg = getRegForGEPIndex(Op).first;
947 goto unsupported_gep;
951 // Check for displacement overflow.
952 if (!isInt<32>(Disp))
955 AM.IndexReg = IndexReg;
957 AM.Disp = (uint32_t)Disp;
960 if (const GetElementPtrInst *GEP =
961 dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
962 // Ok, the GEP indices were covered by constant-offset and scaled-index
963 // addressing. Update the address state and move on to examining the base.
966 } else if (X86SelectAddress(U->getOperand(0), AM)) {
970 // If we couldn't merge the gep value into this addr mode, revert back to
971 // our address and just match the value instead of completely failing.
974 for (const Value *I : reverse(GEPs))
975 if (handleConstantAddresses(I, AM))
980 // Ok, the GEP indices weren't all covered.
985 return handleConstantAddresses(V, AM);
988 /// X86SelectCallAddress - Attempt to fill in an address from the given value.
990 bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
991 const User *U = nullptr;
992 unsigned Opcode = Instruction::UserOp1;
993 const Instruction *I = dyn_cast<Instruction>(V);
994 // Record if the value is defined in the same basic block.
996 // This information is crucial to know whether or not folding an
998 // Indeed, FastISel generates or reuses a virtual register for all
999 // operands of all instructions it selects. Obviously, the definition and
1000 // its uses must use the same virtual register otherwise the produced
1001 // code is incorrect.
1002 // Before instruction selection, FunctionLoweringInfo::set sets the virtual
1003 // registers for values that are alive across basic blocks. This ensures
1004 // that the values are consistently set between across basic block, even
1005 // if different instruction selection mechanisms are used (e.g., a mix of
1006 // SDISel and FastISel).
1007 // For values local to a basic block, the instruction selection process
1008 // generates these virtual registers with whatever method is appropriate
1009 // for its needs. In particular, FastISel and SDISel do not share the way
1010 // local virtual registers are set.
1011 // Therefore, this is impossible (or at least unsafe) to share values
1012 // between basic blocks unless they use the same instruction selection
1013 // method, which is not guarantee for X86.
1014 // Moreover, things like hasOneUse could not be used accurately, if we
1015 // allow to reference values across basic blocks whereas they are not
1016 // alive across basic blocks initially.
1019 Opcode = I->getOpcode();
1021 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
1022 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
1023 Opcode = C->getOpcode();
1029 case Instruction::BitCast:
1030 // Look past bitcasts if its operand is in the same BB.
1032 return X86SelectCallAddress(U->getOperand(0), AM);
1035 case Instruction::IntToPtr:
1036 // Look past no-op inttoptrs if its operand is in the same BB.
1038 TLI.getValueType(DL, U->getOperand(0)->getType()) ==
1039 TLI.getPointerTy(DL))
1040 return X86SelectCallAddress(U->getOperand(0), AM);
1043 case Instruction::PtrToInt:
1044 // Look past no-op ptrtoints if its operand is in the same BB.
1045 if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
1046 return X86SelectCallAddress(U->getOperand(0), AM);
1050 // Handle constant address.
1051 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1052 // Can't handle alternate code models yet.
1053 if (TM.getCodeModel() != CodeModel::Small)
1056 // RIP-relative addresses can't have additional register operands.
1057 if (Subtarget->isPICStyleRIPRel() &&
1058 (AM.Base.Reg != 0 || AM.IndexReg != 0))
1061 // Can't handle TLS.
1062 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
1063 if (GVar->isThreadLocal())
1066 // Okay, we've committed to selecting this global. Set up the basic address.
1069 // Return a direct reference to the global. Fastisel can handle calls to
1070 // functions that require loads, such as dllimport and nonlazybind
1072 if (Subtarget->isPICStyleRIPRel()) {
1073 // Use rip-relative addressing if we can. Above we verified that the
1074 // base and index registers are unused.
1075 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
1076 AM.Base.Reg = X86::RIP;
1078 AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
1084 // If all else fails, try to materialize the value in a register.
1085 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
1086 if (AM.Base.Reg == 0) {
1087 AM.Base.Reg = getRegForValue(V);
1088 return AM.Base.Reg != 0;
1090 if (AM.IndexReg == 0) {
1091 assert(AM.Scale == 1 && "Scale with no index!");
1092 AM.IndexReg = getRegForValue(V);
1093 return AM.IndexReg != 0;
1101 /// X86SelectStore - Select and emit code to implement store instructions.
1102 bool X86FastISel::X86SelectStore(const Instruction *I) {
1103 // Atomic stores need special handling.
1104 const StoreInst *S = cast<StoreInst>(I);
1109 const Value *PtrV = I->getOperand(1);
1110 if (TLI.supportSwiftError()) {
1111 // Swifterror values can come from either a function parameter with
1112 // swifterror attribute or an alloca with swifterror attribute.
1113 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1114 if (Arg->hasSwiftErrorAttr())
1118 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1119 if (Alloca->isSwiftError())
1124 const Value *Val = S->getValueOperand();
1125 const Value *Ptr = S->getPointerOperand();
1128 if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
1131 unsigned Alignment = S->getAlignment();
1132 unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
1133 if (Alignment == 0) // Ensure that codegen never sees alignment 0
1134 Alignment = ABIAlignment;
1135 bool Aligned = Alignment >= ABIAlignment;
1138 if (!X86SelectAddress(Ptr, AM))
1141 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
1144 /// X86SelectRet - Select and emit code to implement ret instructions.
1145 bool X86FastISel::X86SelectRet(const Instruction *I) {
1146 const ReturnInst *Ret = cast<ReturnInst>(I);
1147 const Function &F = *I->getParent()->getParent();
1148 const X86MachineFunctionInfo *X86MFInfo =
1149 FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
1151 if (!FuncInfo.CanLowerReturn)
1154 if (TLI.supportSwiftError() &&
1155 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1158 if (TLI.supportSplitCSR(FuncInfo.MF))
1161 CallingConv::ID CC = F.getCallingConv();
1162 if (CC != CallingConv::C &&
1163 CC != CallingConv::Fast &&
1164 CC != CallingConv::Tail &&
1165 CC != CallingConv::X86_FastCall &&
1166 CC != CallingConv::X86_StdCall &&
1167 CC != CallingConv::X86_ThisCall &&
1168 CC != CallingConv::X86_64_SysV &&
1169 CC != CallingConv::Win64)
1172 // Don't handle popping bytes if they don't fit the ret's immediate.
1173 if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
1176 // fastcc with -tailcallopt is intended to provide a guaranteed
1177 // tail call optimization. Fastisel doesn't know how to do that.
1178 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
1179 CC == CallingConv::Tail)
1182 // Let SDISel handle vararg functions.
1186 // Build a list of return value registers.
1187 SmallVector<unsigned, 4> RetRegs;
1189 if (Ret->getNumOperands() > 0) {
1190 SmallVector<ISD::OutputArg, 4> Outs;
1191 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1193 // Analyze operands of the call, assigning locations to each operand.
1194 SmallVector<CCValAssign, 16> ValLocs;
1195 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
1196 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
1198 const Value *RV = Ret->getOperand(0);
1199 unsigned Reg = getRegForValue(RV);
1203 // Only handle a single return value for now.
1204 if (ValLocs.size() != 1)
1207 CCValAssign &VA = ValLocs[0];
1209 // Don't bother handling odd stuff for now.
1210 if (VA.getLocInfo() != CCValAssign::Full)
1212 // Only handle register returns for now.
1216 // The calling-convention tables for x87 returns don't tell
1218 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
1221 unsigned SrcReg = Reg + VA.getValNo();
1222 EVT SrcVT = TLI.getValueType(DL, RV->getType());
1223 EVT DstVT = VA.getValVT();
1224 // Special handling for extended integers.
1225 if (SrcVT != DstVT) {
1226 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
1229 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1232 assert(DstVT == MVT::i32 && "X86 should always ext to i32");
1234 if (SrcVT == MVT::i1) {
1235 if (Outs[0].Flags.isSExt())
1237 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
1240 unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
1242 SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
1243 SrcReg, /*TODO: Kill=*/false);
1247 Register DstReg = VA.getLocReg();
1248 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
1249 // Avoid a cross-class copy. This is very unlikely.
1250 if (!SrcRC->contains(DstReg))
1252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1253 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1255 // Add register to return instruction.
1256 RetRegs.push_back(VA.getLocReg());
1259 // Swift calling convention does not require we copy the sret argument
1260 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
1262 // All x86 ABIs require that for returning structs by value we copy
1263 // the sret argument into %rax/%eax (depending on ABI) for the return.
1264 // We saved the argument into a virtual register in the entry block,
1265 // so now we copy the value out and into %rax/%eax.
1266 if (F.hasStructRetAttr() && CC != CallingConv::Swift) {
1267 unsigned Reg = X86MFInfo->getSRetReturnReg();
1269 "SRetReturnReg should have been set in LowerFormalArguments()!");
1270 unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
1271 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1272 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1273 RetRegs.push_back(RetReg);
1276 // Now emit the RET.
1277 MachineInstrBuilder MIB;
1278 if (X86MFInfo->getBytesToPopOnReturn()) {
1279 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1280 TII.get(Subtarget->is64Bit() ? X86::RETIQ : X86::RETIL))
1281 .addImm(X86MFInfo->getBytesToPopOnReturn());
1283 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1284 TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
1286 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1287 MIB.addReg(RetRegs[i], RegState::Implicit);
1291 /// X86SelectLoad - Select and emit code to implement load instructions.
1293 bool X86FastISel::X86SelectLoad(const Instruction *I) {
1294 const LoadInst *LI = cast<LoadInst>(I);
1296 // Atomic loads need special handling.
1300 const Value *SV = I->getOperand(0);
1301 if (TLI.supportSwiftError()) {
1302 // Swifterror values can come from either a function parameter with
1303 // swifterror attribute or an alloca with swifterror attribute.
1304 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1305 if (Arg->hasSwiftErrorAttr())
1309 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1310 if (Alloca->isSwiftError())
1316 if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
1319 const Value *Ptr = LI->getPointerOperand();
1322 if (!X86SelectAddress(Ptr, AM))
1325 unsigned Alignment = LI->getAlignment();
1326 unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType());
1327 if (Alignment == 0) // Ensure that codegen never sees alignment 0
1328 Alignment = ABIAlignment;
1330 unsigned ResultReg = 0;
1331 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1335 updateValueMap(I, ResultReg);
1339 static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
1340 bool HasAVX512 = Subtarget->hasAVX512();
1341 bool HasAVX = Subtarget->hasAVX();
1342 bool X86ScalarSSEf32 = Subtarget->hasSSE1();
1343 bool X86ScalarSSEf64 = Subtarget->hasSSE2();
1345 switch (VT.getSimpleVT().SimpleTy) {
1347 case MVT::i8: return X86::CMP8rr;
1348 case MVT::i16: return X86::CMP16rr;
1349 case MVT::i32: return X86::CMP32rr;
1350 case MVT::i64: return X86::CMP64rr;
1352 return X86ScalarSSEf32
1353 ? (HasAVX512 ? X86::VUCOMISSZrr
1354 : HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr)
1357 return X86ScalarSSEf64
1358 ? (HasAVX512 ? X86::VUCOMISDZrr
1359 : HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr)
1364 /// If we have a comparison with RHS as the RHS of the comparison, return an
1365 /// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
1366 static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
1367 int64_t Val = RHSC->getSExtValue();
1368 switch (VT.getSimpleVT().SimpleTy) {
1369 // Otherwise, we can't fold the immediate into this comparison.
1376 return X86::CMP16ri8;
1377 return X86::CMP16ri;
1380 return X86::CMP32ri8;
1381 return X86::CMP32ri;
1384 return X86::CMP64ri8;
1385 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
1388 return X86::CMP64ri32;
1393 bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
1394 const DebugLoc &CurDbgLoc) {
1395 unsigned Op0Reg = getRegForValue(Op0);
1396 if (Op0Reg == 0) return false;
1398 // Handle 'null' like i32/i64 0.
1399 if (isa<ConstantPointerNull>(Op1))
1400 Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
1402 // We have two options: compare with register or immediate. If the RHS of
1403 // the compare is an immediate that we can fold into this compare, use
1404 // CMPri, otherwise use CMPrr.
1405 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1406 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
1407 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc))
1409 .addImm(Op1C->getSExtValue());
1414 unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
1415 if (CompareOpc == 0) return false;
1417 unsigned Op1Reg = getRegForValue(Op1);
1418 if (Op1Reg == 0) return false;
1419 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
1426 bool X86FastISel::X86SelectCmp(const Instruction *I) {
1427 const CmpInst *CI = cast<CmpInst>(I);
1430 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
1433 // Try to optimize or fold the cmp.
1434 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1435 unsigned ResultReg = 0;
1436 switch (Predicate) {
1438 case CmpInst::FCMP_FALSE: {
1439 ResultReg = createResultReg(&X86::GR32RegClass);
1440 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
1442 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true,
1448 case CmpInst::FCMP_TRUE: {
1449 ResultReg = createResultReg(&X86::GR8RegClass);
1450 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
1451 ResultReg).addImm(1);
1457 updateValueMap(I, ResultReg);
1461 const Value *LHS = CI->getOperand(0);
1462 const Value *RHS = CI->getOperand(1);
1464 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1465 // We don't have to materialize a zero constant for this case and can just use
1466 // %x again on the RHS.
1467 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1468 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1469 if (RHSC && RHSC->isNullValue())
1473 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1474 static const uint16_t SETFOpcTable[2][3] = {
1475 { X86::COND_E, X86::COND_NP, X86::AND8rr },
1476 { X86::COND_NE, X86::COND_P, X86::OR8rr }
1478 const uint16_t *SETFOpc = nullptr;
1479 switch (Predicate) {
1481 case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
1482 case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
1485 ResultReg = createResultReg(&X86::GR8RegClass);
1487 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1490 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1491 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1492 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1493 FlagReg1).addImm(SETFOpc[0]);
1494 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1495 FlagReg2).addImm(SETFOpc[1]);
1496 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
1497 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
1498 updateValueMap(I, ResultReg);
1504 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1505 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1508 std::swap(LHS, RHS);
1510 // Emit a compare of LHS/RHS.
1511 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1514 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1515 ResultReg).addImm(CC);
1516 updateValueMap(I, ResultReg);
1520 bool X86FastISel::X86SelectZExt(const Instruction *I) {
1521 EVT DstVT = TLI.getValueType(DL, I->getType());
1522 if (!TLI.isTypeLegal(DstVT))
1525 unsigned ResultReg = getRegForValue(I->getOperand(0));
1529 // Handle zero-extension from i1 to i8, which is common.
1530 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1531 if (SrcVT == MVT::i1) {
1532 // Set the high bits to zero.
1533 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
1540 if (DstVT == MVT::i64) {
1541 // Handle extension to 64-bits via sub-register shenanigans.
1544 switch (SrcVT.SimpleTy) {
1545 case MVT::i8: MovInst = X86::MOVZX32rr8; break;
1546 case MVT::i16: MovInst = X86::MOVZX32rr16; break;
1547 case MVT::i32: MovInst = X86::MOV32rr; break;
1548 default: llvm_unreachable("Unexpected zext to i64 source type");
1551 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1552 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
1555 ResultReg = createResultReg(&X86::GR64RegClass);
1556 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
1558 .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
1559 } else if (DstVT == MVT::i16) {
1560 // i8->i16 doesn't exist in the autogenerated isel table. Need to zero
1561 // extend to 32-bits and then extract down to 16-bits.
1562 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8),
1564 Result32).addReg(ResultReg);
1566 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, /*Kill=*/true,
1568 } else if (DstVT != MVT::i8) {
1569 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
1570 ResultReg, /*Kill=*/true);
1575 updateValueMap(I, ResultReg);
1579 bool X86FastISel::X86SelectSExt(const Instruction *I) {
1580 EVT DstVT = TLI.getValueType(DL, I->getType());
1581 if (!TLI.isTypeLegal(DstVT))
1584 unsigned ResultReg = getRegForValue(I->getOperand(0));
1588 // Handle sign-extension from i1 to i8.
1589 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1590 if (SrcVT == MVT::i1) {
1591 // Set the high bits to zero.
1592 unsigned ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg,
1593 /*TODO: Kill=*/false);
1597 // Negate the result to make an 8-bit sign extended value.
1598 ResultReg = createResultReg(&X86::GR8RegClass);
1599 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::NEG8r),
1600 ResultReg).addReg(ZExtReg);
1605 if (DstVT == MVT::i16) {
1606 // i8->i16 doesn't exist in the autogenerated isel table. Need to sign
1607 // extend to 32-bits and then extract down to 16-bits.
1608 unsigned Result32 = createResultReg(&X86::GR32RegClass);
1609 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8),
1610 Result32).addReg(ResultReg);
1612 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, /*Kill=*/true,
1614 } else if (DstVT != MVT::i8) {
1615 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
1616 ResultReg, /*Kill=*/true);
1621 updateValueMap(I, ResultReg);
1625 bool X86FastISel::X86SelectBranch(const Instruction *I) {
1626 // Unconditional branches are selected by tablegen-generated code.
1627 // Handle a conditional branch.
1628 const BranchInst *BI = cast<BranchInst>(I);
1629 MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1630 MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1632 // Fold the common case of a conditional branch with a comparison
1633 // in the same block (values defined on other blocks may not have
1634 // initialized registers).
1636 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1637 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
1638 EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1640 // Try to optimize or fold the cmp.
1641 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1642 switch (Predicate) {
1644 case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true;
1645 case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true;
1648 const Value *CmpLHS = CI->getOperand(0);
1649 const Value *CmpRHS = CI->getOperand(1);
1651 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
1653 // We don't have to materialize a zero constant for this case and can just
1654 // use %x again on the RHS.
1655 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1656 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1657 if (CmpRHSC && CmpRHSC->isNullValue())
1661 // Try to take advantage of fallthrough opportunities.
1662 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1663 std::swap(TrueMBB, FalseMBB);
1664 Predicate = CmpInst::getInversePredicate(Predicate);
1667 // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
1668 // code check. Instead two branch instructions are required to check all
1669 // the flags. First we change the predicate to a supported condition code,
1670 // which will be the first branch. Later one we will emit the second
1672 bool NeedExtraBranch = false;
1673 switch (Predicate) {
1675 case CmpInst::FCMP_OEQ:
1676 std::swap(TrueMBB, FalseMBB);
1678 case CmpInst::FCMP_UNE:
1679 NeedExtraBranch = true;
1680 Predicate = CmpInst::FCMP_ONE;
1685 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1686 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1689 std::swap(CmpLHS, CmpRHS);
1691 // Emit a compare of the LHS and RHS, setting the flags.
1692 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
1695 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1696 .addMBB(TrueMBB).addImm(CC);
1698 // X86 requires a second branch to handle UNE (and OEQ, which is mapped
1700 if (NeedExtraBranch) {
1701 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1702 .addMBB(TrueMBB).addImm(X86::COND_P);
1705 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1708 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1709 // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
1710 // typically happen for _Bool and C++ bools.
1712 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1713 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1714 unsigned TestOpc = 0;
1715 switch (SourceVT.SimpleTy) {
1717 case MVT::i8: TestOpc = X86::TEST8ri; break;
1718 case MVT::i16: TestOpc = X86::TEST16ri; break;
1719 case MVT::i32: TestOpc = X86::TEST32ri; break;
1720 case MVT::i64: TestOpc = X86::TEST64ri32; break;
1723 unsigned OpReg = getRegForValue(TI->getOperand(0));
1724 if (OpReg == 0) return false;
1726 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
1727 .addReg(OpReg).addImm(1);
1729 unsigned JmpCond = X86::COND_NE;
1730 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1731 std::swap(TrueMBB, FalseMBB);
1732 JmpCond = X86::COND_E;
1735 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1736 .addMBB(TrueMBB).addImm(JmpCond);
1738 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1742 } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
1743 // Fake request the condition, otherwise the intrinsic might be completely
1745 unsigned TmpReg = getRegForValue(BI->getCondition());
1749 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1750 .addMBB(TrueMBB).addImm(CC);
1751 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1755 // Otherwise do a clumsy setcc and re-test it.
1756 // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
1757 // in an explicit cast, so make sure to handle that correctly.
1758 unsigned OpReg = getRegForValue(BI->getCondition());
1759 if (OpReg == 0) return false;
1761 // In case OpReg is a K register, COPY to a GPR
1762 if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
1763 unsigned KOpReg = OpReg;
1764 OpReg = createResultReg(&X86::GR32RegClass);
1765 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1766 TII.get(TargetOpcode::COPY), OpReg)
1768 OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Kill=*/true,
1771 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1774 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JCC_1))
1775 .addMBB(TrueMBB).addImm(X86::COND_NE);
1776 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1780 bool X86FastISel::X86SelectShift(const Instruction *I) {
1781 unsigned CReg = 0, OpReg = 0;
1782 const TargetRegisterClass *RC = nullptr;
1783 if (I->getType()->isIntegerTy(8)) {
1785 RC = &X86::GR8RegClass;
1786 switch (I->getOpcode()) {
1787 case Instruction::LShr: OpReg = X86::SHR8rCL; break;
1788 case Instruction::AShr: OpReg = X86::SAR8rCL; break;
1789 case Instruction::Shl: OpReg = X86::SHL8rCL; break;
1790 default: return false;
1792 } else if (I->getType()->isIntegerTy(16)) {
1794 RC = &X86::GR16RegClass;
1795 switch (I->getOpcode()) {
1796 default: llvm_unreachable("Unexpected shift opcode");
1797 case Instruction::LShr: OpReg = X86::SHR16rCL; break;
1798 case Instruction::AShr: OpReg = X86::SAR16rCL; break;
1799 case Instruction::Shl: OpReg = X86::SHL16rCL; break;
1801 } else if (I->getType()->isIntegerTy(32)) {
1803 RC = &X86::GR32RegClass;
1804 switch (I->getOpcode()) {
1805 default: llvm_unreachable("Unexpected shift opcode");
1806 case Instruction::LShr: OpReg = X86::SHR32rCL; break;
1807 case Instruction::AShr: OpReg = X86::SAR32rCL; break;
1808 case Instruction::Shl: OpReg = X86::SHL32rCL; break;
1810 } else if (I->getType()->isIntegerTy(64)) {
1812 RC = &X86::GR64RegClass;
1813 switch (I->getOpcode()) {
1814 default: llvm_unreachable("Unexpected shift opcode");
1815 case Instruction::LShr: OpReg = X86::SHR64rCL; break;
1816 case Instruction::AShr: OpReg = X86::SAR64rCL; break;
1817 case Instruction::Shl: OpReg = X86::SHL64rCL; break;
1824 if (!isTypeLegal(I->getType(), VT))
1827 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1828 if (Op0Reg == 0) return false;
1830 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1831 if (Op1Reg == 0) return false;
1832 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1833 CReg).addReg(Op1Reg);
1835 // The shift instruction uses X86::CL. If we defined a super-register
1836 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1837 if (CReg != X86::CL)
1838 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1839 TII.get(TargetOpcode::KILL), X86::CL)
1840 .addReg(CReg, RegState::Kill);
1842 unsigned ResultReg = createResultReg(RC);
1843 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
1845 updateValueMap(I, ResultReg);
1849 bool X86FastISel::X86SelectDivRem(const Instruction *I) {
1850 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1851 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1852 const static bool S = true; // IsSigned
1853 const static bool U = false; // !IsSigned
1854 const static unsigned Copy = TargetOpcode::COPY;
1855 // For the X86 DIV/IDIV instruction, in most cases the dividend
1856 // (numerator) must be in a specific register pair highreg:lowreg,
1857 // producing the quotient in lowreg and the remainder in highreg.
1858 // For most data types, to set up the instruction, the dividend is
1859 // copied into lowreg, and lowreg is sign-extended or zero-extended
1860 // into highreg. The exception is i8, where the dividend is defined
1861 // as a single register rather than a register pair, and we
1862 // therefore directly sign-extend or zero-extend the dividend into
1863 // lowreg, instead of copying, and ignore the highreg.
1864 const static struct DivRemEntry {
1865 // The following portion depends only on the data type.
1866 const TargetRegisterClass *RC;
1867 unsigned LowInReg; // low part of the register pair
1868 unsigned HighInReg; // high part of the register pair
1869 // The following portion depends on both the data type and the operation.
1870 struct DivRemResult {
1871 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1872 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1873 // highreg, or copying a zero into highreg.
1874 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1875 // zero/sign-extending into lowreg for i8.
1876 unsigned DivRemResultReg; // Register containing the desired result.
1877 bool IsOpSigned; // Whether to use signed or unsigned form.
1878 } ResultTable[NumOps];
1879 } OpTable[NumTypes] = {
1880 { &X86::GR8RegClass, X86::AX, 0, {
1881 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
1882 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
1883 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
1884 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
1887 { &X86::GR16RegClass, X86::AX, X86::DX, {
1888 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
1889 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
1890 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
1891 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
1894 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1895 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
1896 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
1897 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
1898 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
1901 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1902 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
1903 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
1904 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
1905 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
1911 if (!isTypeLegal(I->getType(), VT))
1914 unsigned TypeIndex, OpIndex;
1915 switch (VT.SimpleTy) {
1916 default: return false;
1917 case MVT::i8: TypeIndex = 0; break;
1918 case MVT::i16: TypeIndex = 1; break;
1919 case MVT::i32: TypeIndex = 2; break;
1920 case MVT::i64: TypeIndex = 3;
1921 if (!Subtarget->is64Bit())
1926 switch (I->getOpcode()) {
1927 default: llvm_unreachable("Unexpected div/rem opcode");
1928 case Instruction::SDiv: OpIndex = 0; break;
1929 case Instruction::SRem: OpIndex = 1; break;
1930 case Instruction::UDiv: OpIndex = 2; break;
1931 case Instruction::URem: OpIndex = 3; break;
1934 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1935 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1936 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1939 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1943 // Move op0 into low-order input register.
1944 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1945 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1946 // Zero-extend or sign-extend into high-order input register.
1947 if (OpEntry.OpSignExtend) {
1948 if (OpEntry.IsOpSigned)
1949 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1950 TII.get(OpEntry.OpSignExtend));
1952 unsigned Zero32 = createResultReg(&X86::GR32RegClass);
1953 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1954 TII.get(X86::MOV32r0), Zero32);
1956 // Copy the zero into the appropriate sub/super/identical physical
1957 // register. Unfortunately the operations needed are not uniform enough
1958 // to fit neatly into the table above.
1959 if (VT == MVT::i16) {
1960 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1961 TII.get(Copy), TypeEntry.HighInReg)
1962 .addReg(Zero32, 0, X86::sub_16bit);
1963 } else if (VT == MVT::i32) {
1964 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1965 TII.get(Copy), TypeEntry.HighInReg)
1967 } else if (VT == MVT::i64) {
1968 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1969 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1970 .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
1974 // Generate the DIV/IDIV instruction.
1975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1976 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1977 // For i8 remainder, we can't reference ah directly, as we'll end
1978 // up with bogus copies like %r9b = COPY %ah. Reference ax
1979 // instead to prevent ah references in a rex instruction.
1981 // The current assumption of the fast register allocator is that isel
1982 // won't generate explicit references to the GR8_NOREX registers. If
1983 // the allocator and/or the backend get enhanced to be more robust in
1984 // that regard, this can be, and should be, removed.
1985 unsigned ResultReg = 0;
1986 if ((I->getOpcode() == Instruction::SRem ||
1987 I->getOpcode() == Instruction::URem) &&
1988 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1989 unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
1990 unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
1991 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1992 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
1994 // Shift AX right by 8 bits instead of using AH.
1995 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
1996 ResultSuperReg).addReg(SourceSuperReg).addImm(8);
1998 // Now reference the 8-bit subreg of the result.
1999 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
2000 /*Kill=*/true, X86::sub_8bit);
2002 // Copy the result out of the physreg if we haven't already.
2004 ResultReg = createResultReg(TypeEntry.RC);
2005 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
2006 .addReg(OpEntry.DivRemResultReg);
2008 updateValueMap(I, ResultReg);
2013 /// Emit a conditional move instruction (if the are supported) to lower
2015 bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
2016 // Check if the subtarget supports these instructions.
2017 if (!Subtarget->hasCMov())
2020 // FIXME: Add support for i8.
2021 if (RetVT < MVT::i16 || RetVT > MVT::i64)
2024 const Value *Cond = I->getOperand(0);
2025 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2026 bool NeedTest = true;
2027 X86::CondCode CC = X86::COND_NE;
2029 // Optimize conditions coming from a compare if both instructions are in the
2030 // same basic block (values defined in other basic blocks may not have
2031 // initialized registers).
2032 const auto *CI = dyn_cast<CmpInst>(Cond);
2033 if (CI && (CI->getParent() == I->getParent())) {
2034 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2036 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
2037 static const uint16_t SETFOpcTable[2][3] = {
2038 { X86::COND_NP, X86::COND_E, X86::TEST8rr },
2039 { X86::COND_P, X86::COND_NE, X86::OR8rr }
2041 const uint16_t *SETFOpc = nullptr;
2042 switch (Predicate) {
2044 case CmpInst::FCMP_OEQ:
2045 SETFOpc = &SETFOpcTable[0][0];
2046 Predicate = CmpInst::ICMP_NE;
2048 case CmpInst::FCMP_UNE:
2049 SETFOpc = &SETFOpcTable[1][0];
2050 Predicate = CmpInst::ICMP_NE;
2055 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate);
2056 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
2058 const Value *CmpLHS = CI->getOperand(0);
2059 const Value *CmpRHS = CI->getOperand(1);
2061 std::swap(CmpLHS, CmpRHS);
2063 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2064 // Emit a compare of the LHS and RHS, setting the flags.
2065 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2069 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
2070 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
2071 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2072 FlagReg1).addImm(SETFOpc[0]);
2073 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2074 FlagReg2).addImm(SETFOpc[1]);
2075 auto const &II = TII.get(SETFOpc[2]);
2076 if (II.getNumDefs()) {
2077 unsigned TmpReg = createResultReg(&X86::GR8RegClass);
2078 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
2079 .addReg(FlagReg2).addReg(FlagReg1);
2081 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2082 .addReg(FlagReg2).addReg(FlagReg1);
2086 } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
2087 // Fake request the condition, otherwise the intrinsic might be completely
2089 unsigned TmpReg = getRegForValue(Cond);
2097 // Selects operate on i1, however, CondReg is 8 bits width and may contain
2098 // garbage. Indeed, only the less significant bit is supposed to be
2099 // accurate. If we read more than the lsb, we may see non-zero values
2100 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
2101 // the select. This is achieved by performing TEST against 1.
2102 unsigned CondReg = getRegForValue(Cond);
2105 bool CondIsKill = hasTrivialKill(Cond);
2107 // In case OpReg is a K register, COPY to a GPR
2108 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2109 unsigned KCondReg = CondReg;
2110 CondReg = createResultReg(&X86::GR32RegClass);
2111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2112 TII.get(TargetOpcode::COPY), CondReg)
2113 .addReg(KCondReg, getKillRegState(CondIsKill));
2114 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Kill=*/true,
2117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
2118 .addReg(CondReg, getKillRegState(CondIsKill))
2122 const Value *LHS = I->getOperand(1);
2123 const Value *RHS = I->getOperand(2);
2125 unsigned RHSReg = getRegForValue(RHS);
2126 bool RHSIsKill = hasTrivialKill(RHS);
2128 unsigned LHSReg = getRegForValue(LHS);
2129 bool LHSIsKill = hasTrivialKill(LHS);
2131 if (!LHSReg || !RHSReg)
2134 const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
2135 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
2136 unsigned ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill,
2137 LHSReg, LHSIsKill, CC);
2138 updateValueMap(I, ResultReg);
2142 /// Emit SSE or AVX instructions to lower the select.
2144 /// Try to use SSE1/SSE2 instructions to simulate a select without branches.
2145 /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
2146 /// SSE instructions are available. If AVX is available, try to use a VBLENDV.
2147 bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
2148 // Optimize conditions coming from a compare if both instructions are in the
2149 // same basic block (values defined in other basic blocks may not have
2150 // initialized registers).
2151 const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
2152 if (!CI || (CI->getParent() != I->getParent()))
2155 if (I->getType() != CI->getOperand(0)->getType() ||
2156 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
2157 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
2160 const Value *CmpLHS = CI->getOperand(0);
2161 const Value *CmpRHS = CI->getOperand(1);
2162 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2164 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
2165 // We don't have to materialize a zero constant for this case and can just use
2166 // %x again on the RHS.
2167 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
2168 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
2169 if (CmpRHSC && CmpRHSC->isNullValue())
2175 std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
2176 if (CC > 7 && !Subtarget->hasAVX())
2180 std::swap(CmpLHS, CmpRHS);
2182 const Value *LHS = I->getOperand(1);
2183 const Value *RHS = I->getOperand(2);
2185 unsigned LHSReg = getRegForValue(LHS);
2186 bool LHSIsKill = hasTrivialKill(LHS);
2188 unsigned RHSReg = getRegForValue(RHS);
2189 bool RHSIsKill = hasTrivialKill(RHS);
2191 unsigned CmpLHSReg = getRegForValue(CmpLHS);
2192 bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
2194 unsigned CmpRHSReg = getRegForValue(CmpRHS);
2195 bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
2197 if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
2200 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2203 if (Subtarget->hasAVX512()) {
2204 // If we have AVX512 we can use a mask compare and masked movss/sd.
2205 const TargetRegisterClass *VR128X = &X86::VR128XRegClass;
2206 const TargetRegisterClass *VK1 = &X86::VK1RegClass;
2208 unsigned CmpOpcode =
2209 (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
2210 unsigned CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill,
2211 CmpRHSReg, CmpRHSIsKill, CC);
2213 // Need an IMPLICIT_DEF for the input that is used to generate the upper
2214 // bits of the result register since its not based on any of the inputs.
2215 unsigned ImplicitDefReg = createResultReg(VR128X);
2216 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2217 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2219 // Place RHSReg is the passthru of the masked movss/sd operation and put
2220 // LHS in the input. The mask input comes from the compare.
2221 unsigned MovOpcode =
2222 (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
2223 unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill,
2224 CmpReg, true, ImplicitDefReg, true,
2227 ResultReg = createResultReg(RC);
2228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2229 TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg);
2231 } else if (Subtarget->hasAVX()) {
2232 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2234 // If we have AVX, create 1 blendv instead of 3 logic instructions.
2235 // Blendv was introduced with SSE 4.1, but the 2 register form implicitly
2236 // uses XMM0 as the selection register. That may need just as many
2237 // instructions as the AND/ANDN/OR sequence due to register moves, so
2239 unsigned CmpOpcode =
2240 (RetVT == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr;
2241 unsigned BlendOpcode =
2242 (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
2244 unsigned CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill,
2245 CmpRHSReg, CmpRHSIsKill, CC);
2246 unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
2247 LHSReg, LHSIsKill, CmpReg, true);
2248 ResultReg = createResultReg(RC);
2249 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2250 TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
2252 // Choose the SSE instruction sequence based on data type (float or double).
2253 static const uint16_t OpcTable[2][4] = {
2254 { X86::CMPSSrr, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
2255 { X86::CMPSDrr, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
2258 const uint16_t *Opc = nullptr;
2259 switch (RetVT.SimpleTy) {
2260 default: return false;
2261 case MVT::f32: Opc = &OpcTable[0][0]; break;
2262 case MVT::f64: Opc = &OpcTable[1][0]; break;
2265 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2266 unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
2267 CmpRHSReg, CmpRHSIsKill, CC);
2268 unsigned AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, /*IsKill=*/false,
2270 unsigned AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, /*IsKill=*/true,
2272 unsigned OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*IsKill=*/true,
2273 AndReg, /*IsKill=*/true);
2274 ResultReg = createResultReg(RC);
2275 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2276 TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
2278 updateValueMap(I, ResultReg);
2282 bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
2283 // These are pseudo CMOV instructions and will be later expanded into control-
2286 switch (RetVT.SimpleTy) {
2287 default: return false;
2288 case MVT::i8: Opc = X86::CMOV_GR8; break;
2289 case MVT::i16: Opc = X86::CMOV_GR16; break;
2290 case MVT::i32: Opc = X86::CMOV_GR32; break;
2291 case MVT::f32: Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X
2292 : X86::CMOV_FR32; break;
2293 case MVT::f64: Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X
2294 : X86::CMOV_FR64; break;
2297 const Value *Cond = I->getOperand(0);
2298 X86::CondCode CC = X86::COND_NE;
2300 // Optimize conditions coming from a compare if both instructions are in the
2301 // same basic block (values defined in other basic blocks may not have
2302 // initialized registers).
2303 const auto *CI = dyn_cast<CmpInst>(Cond);
2304 if (CI && (CI->getParent() == I->getParent())) {
2306 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate());
2307 if (CC > X86::LAST_VALID_COND)
2310 const Value *CmpLHS = CI->getOperand(0);
2311 const Value *CmpRHS = CI->getOperand(1);
2314 std::swap(CmpLHS, CmpRHS);
2316 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2317 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2320 unsigned CondReg = getRegForValue(Cond);
2323 bool CondIsKill = hasTrivialKill(Cond);
2325 // In case OpReg is a K register, COPY to a GPR
2326 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2327 unsigned KCondReg = CondReg;
2328 CondReg = createResultReg(&X86::GR32RegClass);
2329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2330 TII.get(TargetOpcode::COPY), CondReg)
2331 .addReg(KCondReg, getKillRegState(CondIsKill));
2332 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Kill=*/true,
2335 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
2336 .addReg(CondReg, getKillRegState(CondIsKill))
2340 const Value *LHS = I->getOperand(1);
2341 const Value *RHS = I->getOperand(2);
2343 unsigned LHSReg = getRegForValue(LHS);
2344 bool LHSIsKill = hasTrivialKill(LHS);
2346 unsigned RHSReg = getRegForValue(RHS);
2347 bool RHSIsKill = hasTrivialKill(RHS);
2349 if (!LHSReg || !RHSReg)
2352 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2354 unsigned ResultReg =
2355 fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
2356 updateValueMap(I, ResultReg);
2360 bool X86FastISel::X86SelectSelect(const Instruction *I) {
2362 if (!isTypeLegal(I->getType(), RetVT))
2365 // Check if we can fold the select.
2366 if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
2367 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2368 const Value *Opnd = nullptr;
2369 switch (Predicate) {
2371 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
2372 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
2374 // No need for a select anymore - this is an unconditional move.
2376 unsigned OpReg = getRegForValue(Opnd);
2379 bool OpIsKill = hasTrivialKill(Opnd);
2380 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2381 unsigned ResultReg = createResultReg(RC);
2382 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2383 TII.get(TargetOpcode::COPY), ResultReg)
2384 .addReg(OpReg, getKillRegState(OpIsKill));
2385 updateValueMap(I, ResultReg);
2390 // First try to use real conditional move instructions.
2391 if (X86FastEmitCMoveSelect(RetVT, I))
2394 // Try to use a sequence of SSE instructions to simulate a conditional move.
2395 if (X86FastEmitSSESelect(RetVT, I))
2398 // Fall-back to pseudo conditional move instructions, which will be later
2399 // converted to control-flow.
2400 if (X86FastEmitPseudoSelect(RetVT, I))
2406 // Common code for X86SelectSIToFP and X86SelectUIToFP.
2407 bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
2408 // The target-independent selection algorithm in FastISel already knows how
2409 // to select a SINT_TO_FP if the target is SSE but not AVX.
2410 // Early exit if the subtarget doesn't have AVX.
2411 // Unsigned conversion requires avx512.
2412 bool HasAVX512 = Subtarget->hasAVX512();
2413 if (!Subtarget->hasAVX() || (!IsSigned && !HasAVX512))
2416 // TODO: We could sign extend narrower types.
2417 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
2418 if (SrcVT != MVT::i32 && SrcVT != MVT::i64)
2421 // Select integer to float/double conversion.
2422 unsigned OpReg = getRegForValue(I->getOperand(0));
2428 static const uint16_t SCvtOpc[2][2][2] = {
2429 { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr },
2430 { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } },
2431 { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr },
2432 { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } },
2434 static const uint16_t UCvtOpc[2][2] = {
2435 { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr },
2436 { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr },
2438 bool Is64Bit = SrcVT == MVT::i64;
2440 if (I->getType()->isDoubleTy()) {
2441 // s/uitofp int -> double
2442 Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit] : UCvtOpc[1][Is64Bit];
2443 } else if (I->getType()->isFloatTy()) {
2444 // s/uitofp int -> float
2445 Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit] : UCvtOpc[0][Is64Bit];
2449 MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT();
2450 const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT);
2451 unsigned ImplicitDefReg = createResultReg(RC);
2452 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2453 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2454 unsigned ResultReg =
2455 fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false);
2456 updateValueMap(I, ResultReg);
2460 bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
2461 return X86SelectIntToFP(I, /*IsSigned*/true);
2464 bool X86FastISel::X86SelectUIToFP(const Instruction *I) {
2465 return X86SelectIntToFP(I, /*IsSigned*/false);
2468 // Helper method used by X86SelectFPExt and X86SelectFPTrunc.
2469 bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
2471 const TargetRegisterClass *RC) {
2472 assert((I->getOpcode() == Instruction::FPExt ||
2473 I->getOpcode() == Instruction::FPTrunc) &&
2474 "Instruction must be an FPExt or FPTrunc!");
2475 bool HasAVX = Subtarget->hasAVX();
2477 unsigned OpReg = getRegForValue(I->getOperand(0));
2481 unsigned ImplicitDefReg;
2483 ImplicitDefReg = createResultReg(RC);
2484 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2485 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2489 unsigned ResultReg = createResultReg(RC);
2490 MachineInstrBuilder MIB;
2491 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc),
2495 MIB.addReg(ImplicitDefReg);
2498 updateValueMap(I, ResultReg);
2502 bool X86FastISel::X86SelectFPExt(const Instruction *I) {
2503 if (X86ScalarSSEf64 && I->getType()->isDoubleTy() &&
2504 I->getOperand(0)->getType()->isFloatTy()) {
2505 bool HasAVX512 = Subtarget->hasAVX512();
2506 // fpext from float to double.
2508 HasAVX512 ? X86::VCVTSS2SDZrr
2509 : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2510 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f64));
2516 bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
2517 if (X86ScalarSSEf64 && I->getType()->isFloatTy() &&
2518 I->getOperand(0)->getType()->isDoubleTy()) {
2519 bool HasAVX512 = Subtarget->hasAVX512();
2520 // fptrunc from double to float.
2522 HasAVX512 ? X86::VCVTSD2SSZrr
2523 : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2524 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f32));
2530 bool X86FastISel::X86SelectTrunc(const Instruction *I) {
2531 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2532 EVT DstVT = TLI.getValueType(DL, I->getType());
2534 // This code only handles truncation to byte.
2535 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2537 if (!TLI.isTypeLegal(SrcVT))
2540 unsigned InputReg = getRegForValue(I->getOperand(0));
2542 // Unhandled operand. Halt "fast" selection and bail.
2545 if (SrcVT == MVT::i8) {
2546 // Truncate from i8 to i1; no code needed.
2547 updateValueMap(I, InputReg);
2551 // Issue an extract_subreg.
2552 unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
2558 updateValueMap(I, ResultReg);
2562 bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2563 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2566 bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
2567 X86AddressMode SrcAM, uint64_t Len) {
2569 // Make sure we don't bloat code by inlining very large memcpy's.
2570 if (!IsMemcpySmall(Len))
2573 bool i64Legal = Subtarget->is64Bit();
2575 // We don't care about alignment here since we just emit integer accesses.
2578 if (Len >= 8 && i64Legal)
2588 bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
2589 RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM);
2590 assert(RV && "Failed to emit load or store??");
2592 unsigned Size = VT.getSizeInBits()/8;
2594 DestAM.Disp += Size;
2601 bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
2602 // FIXME: Handle more intrinsics.
2603 switch (II->getIntrinsicID()) {
2604 default: return false;
2605 case Intrinsic::convert_from_fp16:
2606 case Intrinsic::convert_to_fp16: {
2607 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
2610 const Value *Op = II->getArgOperand(0);
2611 unsigned InputReg = getRegForValue(Op);
2615 // F16C only allows converting from float to half and from half to float.
2616 bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16;
2617 if (IsFloatToHalf) {
2618 if (!Op->getType()->isFloatTy())
2621 if (!II->getType()->isFloatTy())
2625 unsigned ResultReg = 0;
2626 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
2627 if (IsFloatToHalf) {
2628 // 'InputReg' is implicitly promoted from register class FR32 to
2629 // register class VR128 by method 'constrainOperandRegClass' which is
2630 // directly called by 'fastEmitInst_ri'.
2631 // Instruction VCVTPS2PHrr takes an extra immediate operand which is
2632 // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
2633 // It's consistent with the other FP instructions, which are usually
2634 // controlled by MXCSR.
2635 InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 4);
2637 // Move the lower 32-bits of ResultReg to another register of class GR32.
2638 ResultReg = createResultReg(&X86::GR32RegClass);
2639 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2640 TII.get(X86::VMOVPDI2DIrr), ResultReg)
2641 .addReg(InputReg, RegState::Kill);
2643 // The result value is in the lower 16-bits of ResultReg.
2644 unsigned RegIdx = X86::sub_16bit;
2645 ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
2647 assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
2648 // Explicitly sign-extend the input to 32-bit.
2649 InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::SIGN_EXTEND, InputReg,
2652 // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
2653 InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
2654 InputReg, /*Kill=*/true);
2656 InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg, /*Kill=*/true);
2658 // The result value is in the lower 32-bits of ResultReg.
2659 // Emit an explicit copy from register class VR128 to register class FR32.
2660 ResultReg = createResultReg(&X86::FR32RegClass);
2661 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2662 TII.get(TargetOpcode::COPY), ResultReg)
2663 .addReg(InputReg, RegState::Kill);
2666 updateValueMap(II, ResultReg);
2669 case Intrinsic::frameaddress: {
2670 MachineFunction *MF = FuncInfo.MF;
2671 if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI())
2674 Type *RetTy = II->getCalledFunction()->getReturnType();
2677 if (!isTypeLegal(RetTy, VT))
2681 const TargetRegisterClass *RC = nullptr;
2683 switch (VT.SimpleTy) {
2684 default: llvm_unreachable("Invalid result type for frameaddress.");
2685 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
2686 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
2689 // This needs to be set before we call getPtrSizedFrameRegister, otherwise
2690 // we get the wrong frame register.
2691 MachineFrameInfo &MFI = MF->getFrameInfo();
2692 MFI.setFrameAddressIsTaken(true);
2694 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2695 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
2696 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2697 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2698 "Invalid Frame Register!");
2700 // Always make a copy of the frame register to a vreg first, so that we
2701 // never directly reference the frame register (the TwoAddressInstruction-
2702 // Pass doesn't like that).
2703 unsigned SrcReg = createResultReg(RC);
2704 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2705 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2707 // Now recursively load from the frame address.
2708 // movq (%rbp), %rax
2709 // movq (%rax), %rax
2710 // movq (%rax), %rax
2713 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
2715 DestReg = createResultReg(RC);
2716 addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2717 TII.get(Opc), DestReg), SrcReg);
2721 updateValueMap(II, SrcReg);
2724 case Intrinsic::memcpy: {
2725 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2726 // Don't handle volatile or variable length memcpys.
2727 if (MCI->isVolatile())
2730 if (isa<ConstantInt>(MCI->getLength())) {
2731 // Small memcpy's are common enough that we want to do them
2732 // without a call if possible.
2733 uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
2734 if (IsMemcpySmall(Len)) {
2735 X86AddressMode DestAM, SrcAM;
2736 if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
2737 !X86SelectAddress(MCI->getRawSource(), SrcAM))
2739 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2744 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2745 if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
2748 if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
2751 return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
2753 case Intrinsic::memset: {
2754 const MemSetInst *MSI = cast<MemSetInst>(II);
2756 if (MSI->isVolatile())
2759 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2760 if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
2763 if (MSI->getDestAddressSpace() > 255)
2766 return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
2768 case Intrinsic::stackprotector: {
2769 // Emit code to store the stack guard onto the stack.
2770 EVT PtrTy = TLI.getPointerTy(DL);
2772 const Value *Op1 = II->getArgOperand(0); // The guard's value.
2773 const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
2775 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
2777 // Grab the frame index.
2779 if (!X86SelectAddress(Slot, AM)) return false;
2780 if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
2783 case Intrinsic::dbg_declare: {
2784 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
2786 assert(DI->getAddress() && "Null address should be checked earlier!");
2787 if (!X86SelectAddress(DI->getAddress(), AM))
2789 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
2790 // FIXME may need to add RegState::Debug to any registers produced,
2791 // although ESP/EBP should be the only ones at the moment.
2792 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
2793 "Expected inlined-at fields to agree");
2794 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM)
2796 .addMetadata(DI->getVariable())
2797 .addMetadata(DI->getExpression());
2800 case Intrinsic::trap: {
2801 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
2804 case Intrinsic::sqrt: {
2805 if (!Subtarget->hasSSE1())
2808 Type *RetTy = II->getCalledFunction()->getReturnType();
2811 if (!isTypeLegal(RetTy, VT))
2814 // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
2815 // is not generated by FastISel yet.
2816 // FIXME: Update this code once tablegen can handle it.
2817 static const uint16_t SqrtOpc[3][2] = {
2818 { X86::SQRTSSr, X86::SQRTSDr },
2819 { X86::VSQRTSSr, X86::VSQRTSDr },
2820 { X86::VSQRTSSZr, X86::VSQRTSDZr },
2822 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2823 Subtarget->hasAVX() ? 1 :
2826 switch (VT.SimpleTy) {
2827 default: return false;
2828 case MVT::f32: Opc = SqrtOpc[AVXLevel][0]; break;
2829 case MVT::f64: Opc = SqrtOpc[AVXLevel][1]; break;
2832 const Value *SrcVal = II->getArgOperand(0);
2833 unsigned SrcReg = getRegForValue(SrcVal);
2838 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
2839 unsigned ImplicitDefReg = 0;
2841 ImplicitDefReg = createResultReg(RC);
2842 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2843 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2846 unsigned ResultReg = createResultReg(RC);
2847 MachineInstrBuilder MIB;
2848 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
2852 MIB.addReg(ImplicitDefReg);
2856 updateValueMap(II, ResultReg);
2859 case Intrinsic::sadd_with_overflow:
2860 case Intrinsic::uadd_with_overflow:
2861 case Intrinsic::ssub_with_overflow:
2862 case Intrinsic::usub_with_overflow:
2863 case Intrinsic::smul_with_overflow:
2864 case Intrinsic::umul_with_overflow: {
2865 // This implements the basic lowering of the xalu with overflow intrinsics
2866 // into add/sub/mul followed by either seto or setb.
2867 const Function *Callee = II->getCalledFunction();
2868 auto *Ty = cast<StructType>(Callee->getReturnType());
2869 Type *RetTy = Ty->getTypeAtIndex(0U);
2870 assert(Ty->getTypeAtIndex(1)->isIntegerTy() &&
2871 Ty->getTypeAtIndex(1)->getScalarSizeInBits() == 1 &&
2872 "Overflow value expected to be an i1");
2875 if (!isTypeLegal(RetTy, VT))
2878 if (VT < MVT::i8 || VT > MVT::i64)
2881 const Value *LHS = II->getArgOperand(0);
2882 const Value *RHS = II->getArgOperand(1);
2884 // Canonicalize immediate to the RHS.
2885 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
2886 isCommutativeIntrinsic(II))
2887 std::swap(LHS, RHS);
2889 unsigned BaseOpc, CondCode;
2890 switch (II->getIntrinsicID()) {
2891 default: llvm_unreachable("Unexpected intrinsic!");
2892 case Intrinsic::sadd_with_overflow:
2893 BaseOpc = ISD::ADD; CondCode = X86::COND_O; break;
2894 case Intrinsic::uadd_with_overflow:
2895 BaseOpc = ISD::ADD; CondCode = X86::COND_B; break;
2896 case Intrinsic::ssub_with_overflow:
2897 BaseOpc = ISD::SUB; CondCode = X86::COND_O; break;
2898 case Intrinsic::usub_with_overflow:
2899 BaseOpc = ISD::SUB; CondCode = X86::COND_B; break;
2900 case Intrinsic::smul_with_overflow:
2901 BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break;
2902 case Intrinsic::umul_with_overflow:
2903 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
2906 unsigned LHSReg = getRegForValue(LHS);
2909 bool LHSIsKill = hasTrivialKill(LHS);
2911 unsigned ResultReg = 0;
2912 // Check if we have an immediate version.
2913 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2914 static const uint16_t Opc[2][4] = {
2915 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2916 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2919 if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
2920 CondCode == X86::COND_O) {
2921 // We can use INC/DEC.
2922 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2923 bool IsDec = BaseOpc == ISD::SUB;
2924 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2925 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
2926 .addReg(LHSReg, getKillRegState(LHSIsKill));
2928 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
2929 CI->getZExtValue());
2935 RHSReg = getRegForValue(RHS);
2938 RHSIsKill = hasTrivialKill(RHS);
2939 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
2943 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
2945 if (BaseOpc == X86ISD::UMUL && !ResultReg) {
2946 static const uint16_t MULOpc[] =
2947 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2948 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2949 // First copy the first operand into RAX, which is an implicit input to
2950 // the X86::MUL*r instruction.
2951 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2952 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
2953 .addReg(LHSReg, getKillRegState(LHSIsKill));
2954 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
2955 TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
2956 } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
2957 static const uint16_t MULOpc[] =
2958 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2959 if (VT == MVT::i8) {
2960 // Copy the first operand into AL, which is an implicit input to the
2961 // X86::IMUL8r instruction.
2962 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2963 TII.get(TargetOpcode::COPY), X86::AL)
2964 .addReg(LHSReg, getKillRegState(LHSIsKill));
2965 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
2968 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
2969 TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
2976 // Assign to a GPR since the overflow return value is lowered to a SETcc.
2977 unsigned ResultReg2 = createResultReg(&X86::GR8RegClass);
2978 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2979 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2980 ResultReg2).addImm(CondCode);
2982 updateValueMap(II, ResultReg, 2);
2985 case Intrinsic::x86_sse_cvttss2si:
2986 case Intrinsic::x86_sse_cvttss2si64:
2987 case Intrinsic::x86_sse2_cvttsd2si:
2988 case Intrinsic::x86_sse2_cvttsd2si64: {
2990 switch (II->getIntrinsicID()) {
2991 default: llvm_unreachable("Unexpected intrinsic.");
2992 case Intrinsic::x86_sse_cvttss2si:
2993 case Intrinsic::x86_sse_cvttss2si64:
2994 if (!Subtarget->hasSSE1())
2996 IsInputDouble = false;
2998 case Intrinsic::x86_sse2_cvttsd2si:
2999 case Intrinsic::x86_sse2_cvttsd2si64:
3000 if (!Subtarget->hasSSE2())
3002 IsInputDouble = true;
3006 Type *RetTy = II->getCalledFunction()->getReturnType();
3008 if (!isTypeLegal(RetTy, VT))
3011 static const uint16_t CvtOpc[3][2][2] = {
3012 { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr },
3013 { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } },
3014 { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr },
3015 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } },
3016 { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr },
3017 { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } },
3019 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
3020 Subtarget->hasAVX() ? 1 :
3023 switch (VT.SimpleTy) {
3024 default: llvm_unreachable("Unexpected result type.");
3025 case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0]; break;
3026 case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1]; break;
3029 // Check if we can fold insertelement instructions into the convert.
3030 const Value *Op = II->getArgOperand(0);
3031 while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
3032 const Value *Index = IE->getOperand(2);
3033 if (!isa<ConstantInt>(Index))
3035 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
3038 Op = IE->getOperand(1);
3041 Op = IE->getOperand(0);
3044 unsigned Reg = getRegForValue(Op);
3048 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
3052 updateValueMap(II, ResultReg);
3058 bool X86FastISel::fastLowerArguments() {
3059 if (!FuncInfo.CanLowerReturn)
3062 const Function *F = FuncInfo.Fn;
3066 CallingConv::ID CC = F->getCallingConv();
3067 if (CC != CallingConv::C)
3070 if (Subtarget->isCallingConvWin64(CC))
3073 if (!Subtarget->is64Bit())
3076 if (Subtarget->useSoftFloat())
3079 // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
3080 unsigned GPRCnt = 0;
3081 unsigned FPRCnt = 0;
3082 for (auto const &Arg : F->args()) {
3083 if (Arg.hasAttribute(Attribute::ByVal) ||
3084 Arg.hasAttribute(Attribute::InReg) ||
3085 Arg.hasAttribute(Attribute::StructRet) ||
3086 Arg.hasAttribute(Attribute::SwiftSelf) ||
3087 Arg.hasAttribute(Attribute::SwiftError) ||
3088 Arg.hasAttribute(Attribute::Nest))
3091 Type *ArgTy = Arg.getType();
3092 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3095 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3096 if (!ArgVT.isSimple()) return false;
3097 switch (ArgVT.getSimpleVT().SimpleTy) {
3098 default: return false;
3105 if (!Subtarget->hasSSE1())
3118 static const MCPhysReg GPR32ArgRegs[] = {
3119 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
3121 static const MCPhysReg GPR64ArgRegs[] = {
3122 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
3124 static const MCPhysReg XMMArgRegs[] = {
3125 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3126 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3129 unsigned GPRIdx = 0;
3130 unsigned FPRIdx = 0;
3131 for (auto const &Arg : F->args()) {
3132 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
3133 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
3135 switch (VT.SimpleTy) {
3136 default: llvm_unreachable("Unexpected value type.");
3137 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
3138 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
3139 case MVT::f32: LLVM_FALLTHROUGH;
3140 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
3142 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3143 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3144 // Without this, EmitLiveInCopies may eliminate the livein if its only
3145 // use is a bitcast (which isn't turned into an instruction).
3146 unsigned ResultReg = createResultReg(RC);
3147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3148 TII.get(TargetOpcode::COPY), ResultReg)
3149 .addReg(DstReg, getKillRegState(true));
3150 updateValueMap(&Arg, ResultReg);
3155 static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
3157 ImmutableCallSite *CS) {
3158 if (Subtarget->is64Bit())
3160 if (Subtarget->getTargetTriple().isOSMSVCRT())
3162 if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3163 CC == CallingConv::HiPE || CC == CallingConv::Tail)
3167 if (CS->arg_empty() || !CS->paramHasAttr(0, Attribute::StructRet) ||
3168 CS->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
3174 bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3175 auto &OutVals = CLI.OutVals;
3176 auto &OutFlags = CLI.OutFlags;
3177 auto &OutRegs = CLI.OutRegs;
3178 auto &Ins = CLI.Ins;
3179 auto &InRegs = CLI.InRegs;
3180 CallingConv::ID CC = CLI.CallConv;
3181 bool &IsTailCall = CLI.IsTailCall;
3182 bool IsVarArg = CLI.IsVarArg;
3183 const Value *Callee = CLI.Callee;
3184 MCSymbol *Symbol = CLI.Symbol;
3186 bool Is64Bit = Subtarget->is64Bit();
3187 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
3189 const CallInst *CI =
3190 CLI.CS ? dyn_cast<CallInst>(CLI.CS->getInstruction()) : nullptr;
3191 const Function *CalledFn = CI ? CI->getCalledFunction() : nullptr;
3193 // Call / invoke instructions with NoCfCheck attribute require special
3196 CLI.CS ? dyn_cast<InvokeInst>(CLI.CS->getInstruction()) : nullptr;
3197 if ((CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck()))
3200 // Functions with no_caller_saved_registers that need special handling.
3201 if ((CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3202 (CalledFn && CalledFn->hasFnAttribute("no_caller_saved_registers")))
3205 // Functions using retpoline for indirect calls need to use SDISel.
3206 if (Subtarget->useRetpolineIndirectCalls())
3209 // Handle only C, fastcc, and webkit_js calling conventions for now.
3211 default: return false;
3212 case CallingConv::C:
3213 case CallingConv::Fast:
3214 case CallingConv::Tail:
3215 case CallingConv::WebKit_JS:
3216 case CallingConv::Swift:
3217 case CallingConv::X86_FastCall:
3218 case CallingConv::X86_StdCall:
3219 case CallingConv::X86_ThisCall:
3220 case CallingConv::Win64:
3221 case CallingConv::X86_64_SysV:
3222 case CallingConv::CFGuard_Check:
3226 // Allow SelectionDAG isel to handle tail calls.
3230 // fastcc with -tailcallopt is intended to provide a guaranteed
3231 // tail call optimization. Fastisel doesn't know how to do that.
3232 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
3233 CC == CallingConv::Tail)
3236 // Don't know how to handle Win64 varargs yet. Nothing special needed for
3237 // x86-32. Special handling for x86-64 is implemented.
3238 if (IsVarArg && IsWin64)
3241 // Don't know about inalloca yet.
3242 if (CLI.CS && CLI.CS->hasInAllocaArgument())
3245 for (auto Flag : CLI.OutFlags)
3246 if (Flag.isSwiftError())
3249 SmallVector<MVT, 16> OutVTs;
3250 SmallVector<unsigned, 16> ArgRegs;
3252 // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
3253 // instruction. This is safe because it is common to all FastISel supported
3254 // calling conventions on x86.
3255 for (int i = 0, e = OutVals.size(); i != e; ++i) {
3256 Value *&Val = OutVals[i];
3257 ISD::ArgFlagsTy Flags = OutFlags[i];
3258 if (auto *CI = dyn_cast<ConstantInt>(Val)) {
3259 if (CI->getBitWidth() < 32) {
3261 Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext()));
3263 Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext()));
3267 // Passing bools around ends up doing a trunc to i1 and passing it.
3268 // Codegen this as an argument + "and 1".
3270 auto *TI = dyn_cast<TruncInst>(Val);
3272 if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
3273 (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
3275 Value *PrevVal = TI->getOperand(0);
3276 ResultReg = getRegForValue(PrevVal);
3281 if (!isTypeLegal(PrevVal->getType(), VT))
3285 fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
3287 if (!isTypeLegal(Val->getType(), VT))
3289 ResultReg = getRegForValue(Val);
3295 ArgRegs.push_back(ResultReg);
3296 OutVTs.push_back(VT);
3299 // Analyze operands of the call, assigning locations to each operand.
3300 SmallVector<CCValAssign, 16> ArgLocs;
3301 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3303 // Allocate shadow area for Win64
3305 CCInfo.AllocateStack(32, 8);
3307 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
3309 // Get a count of how many bytes are to be pushed on the stack.
3310 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3312 // Issue CALLSEQ_START
3313 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
3314 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
3315 .addImm(NumBytes).addImm(0).addImm(0);
3317 // Walk the register/memloc assignments, inserting copies/loads.
3318 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3319 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3320 CCValAssign const &VA = ArgLocs[i];
3321 const Value *ArgVal = OutVals[VA.getValNo()];
3322 MVT ArgVT = OutVTs[VA.getValNo()];
3324 if (ArgVT == MVT::x86mmx)
3327 unsigned ArgReg = ArgRegs[VA.getValNo()];
3329 // Promote the value if needed.
3330 switch (VA.getLocInfo()) {
3331 case CCValAssign::Full: break;
3332 case CCValAssign::SExt: {
3333 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3334 "Unexpected extend");
3336 if (ArgVT == MVT::i1)
3339 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3341 assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
3342 ArgVT = VA.getLocVT();
3345 case CCValAssign::ZExt: {
3346 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3347 "Unexpected extend");
3349 // Handle zero-extension from i1 to i8, which is common.
3350 if (ArgVT == MVT::i1) {
3351 // Set the high bits to zero.
3352 ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false);
3359 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3361 assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
3362 ArgVT = VA.getLocVT();
3365 case CCValAssign::AExt: {
3366 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3367 "Unexpected extend");
3368 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
3371 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3374 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3377 assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
3378 ArgVT = VA.getLocVT();
3381 case CCValAssign::BCvt: {
3382 ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
3383 /*TODO: Kill=*/false);
3384 assert(ArgReg && "Failed to emit a bitcast!");
3385 ArgVT = VA.getLocVT();
3388 case CCValAssign::VExt:
3389 // VExt has not been implemented, so this should be impossible to reach
3390 // for now. However, fallback to Selection DAG isel once implemented.
3392 case CCValAssign::AExtUpper:
3393 case CCValAssign::SExtUpper:
3394 case CCValAssign::ZExtUpper:
3395 case CCValAssign::FPExt:
3396 case CCValAssign::Trunc:
3397 llvm_unreachable("Unexpected loc info!");
3398 case CCValAssign::Indirect:
3399 // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
3404 if (VA.isRegLoc()) {
3405 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3406 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
3407 OutRegs.push_back(VA.getLocReg());
3409 assert(VA.isMemLoc());
3411 // Don't emit stores for undef values.
3412 if (isa<UndefValue>(ArgVal))
3415 unsigned LocMemOffset = VA.getLocMemOffset();
3417 AM.Base.Reg = RegInfo->getStackRegister();
3418 AM.Disp = LocMemOffset;
3419 ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
3420 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
3421 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3422 MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
3423 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
3424 if (Flags.isByVal()) {
3425 X86AddressMode SrcAM;
3426 SrcAM.Base.Reg = ArgReg;
3427 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
3429 } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3430 // If this is a really simple value, emit this with the Value* version
3431 // of X86FastEmitStore. If it isn't simple, we don't want to do this,
3432 // as it can cause us to reevaluate the argument.
3433 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3436 bool ValIsKill = hasTrivialKill(ArgVal);
3437 if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
3443 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3445 if (Subtarget->isPICStyleGOT()) {
3446 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3448 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
3451 if (Is64Bit && IsVarArg && !IsWin64) {
3452 // From AMD64 ABI document:
3453 // For calls that may call functions that use varargs or stdargs
3454 // (prototype-less calls or calls to functions containing ellipsis (...) in
3455 // the declaration) %al is used as hidden argument to specify the number
3456 // of SSE registers used. The contents of %al do not need to match exactly
3457 // the number of registers, but must be an ubound on the number of SSE
3458 // registers used and is in the range 0 - 8 inclusive.
3460 // Count the number of XMM registers allocated.
3461 static const MCPhysReg XMMArgRegs[] = {
3462 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3463 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3465 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3466 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3467 && "SSE registers cannot be used when SSE is disabled");
3468 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
3469 X86::AL).addImm(NumXMMRegs);
3472 // Materialize callee address in a register. FIXME: GV address can be
3473 // handled with a CALLpcrel32 instead.
3474 X86AddressMode CalleeAM;
3475 if (!X86SelectCallAddress(Callee, CalleeAM))
3478 unsigned CalleeOp = 0;
3479 const GlobalValue *GV = nullptr;
3480 if (CalleeAM.GV != nullptr) {
3482 } else if (CalleeAM.Base.Reg != 0) {
3483 CalleeOp = CalleeAM.Base.Reg;
3488 MachineInstrBuilder MIB;
3490 // Register-indirect call.
3491 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3492 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
3496 assert(GV && "Not a direct call");
3497 // See if we need any target-specific flags on the GV operand.
3498 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
3500 // This will be a direct call, or an indirect call through memory for
3501 // NonLazyBind calls or dllimport calls.
3502 bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT ||
3503 OpFlags == X86II::MO_GOTPCREL ||
3504 OpFlags == X86II::MO_COFFSTUB;
3505 unsigned CallOpc = NeedLoad
3506 ? (Is64Bit ? X86::CALL64m : X86::CALL32m)
3507 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
3509 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
3511 MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0);
3513 MIB.addSym(Symbol, OpFlags);
3515 MIB.addGlobalAddress(GV, 0, OpFlags);
3520 // Add a register mask operand representing the call-preserved registers.
3521 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3522 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
3524 // Add an implicit use GOT pointer in EBX.
3525 if (Subtarget->isPICStyleGOT())
3526 MIB.addReg(X86::EBX, RegState::Implicit);
3528 if (Is64Bit && IsVarArg && !IsWin64)
3529 MIB.addReg(X86::AL, RegState::Implicit);
3531 // Add implicit physical register uses to the call.
3532 for (auto Reg : OutRegs)
3533 MIB.addReg(Reg, RegState::Implicit);
3535 // Issue CALLSEQ_END
3536 unsigned NumBytesForCalleeToPop =
3537 X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
3538 TM.Options.GuaranteedTailCallOpt)
3539 ? NumBytes // Callee pops everything.
3540 : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CS);
3541 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
3542 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
3543 .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
3545 // Now handle call return values.
3546 SmallVector<CCValAssign, 16> RVLocs;
3547 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
3548 CLI.RetTy->getContext());
3549 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
3551 // Copy all of the result registers out of their specified physreg.
3552 unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3553 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3554 CCValAssign &VA = RVLocs[i];
3555 EVT CopyVT = VA.getValVT();
3556 unsigned CopyReg = ResultReg + i;
3557 Register SrcReg = VA.getLocReg();
3559 // If this is x86-64, and we disabled SSE, we can't return FP values
3560 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
3561 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
3562 report_fatal_error("SSE register return with SSE disabled");
3565 // If we prefer to use the value in xmm registers, copy it out as f80 and
3566 // use a truncate to move it from fp stack reg to xmm reg.
3567 if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) &&
3568 isScalarFPTypeInSSEReg(VA.getValVT())) {
3570 CopyReg = createResultReg(&X86::RFP80RegClass);
3573 // Copy out the result.
3574 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3575 TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg);
3576 InRegs.push_back(VA.getLocReg());
3578 // Round the f80 to the right size, which also moves it to the appropriate
3579 // xmm register. This is accomplished by storing the f80 value in memory
3580 // and then loading it back.
3581 if (CopyVT != VA.getValVT()) {
3582 EVT ResVT = VA.getValVT();
3583 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3584 unsigned MemSize = ResVT.getSizeInBits()/8;
3585 int FI = MFI.CreateStackObject(MemSize, MemSize, false);
3586 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3589 Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt;
3590 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3591 TII.get(Opc), ResultReg + i), FI);
3595 CLI.ResultReg = ResultReg;
3596 CLI.NumResultRegs = RVLocs.size();
3603 X86FastISel::fastSelectInstruction(const Instruction *I) {
3604 switch (I->getOpcode()) {
3606 case Instruction::Load:
3607 return X86SelectLoad(I);
3608 case Instruction::Store:
3609 return X86SelectStore(I);
3610 case Instruction::Ret:
3611 return X86SelectRet(I);
3612 case Instruction::ICmp:
3613 case Instruction::FCmp:
3614 return X86SelectCmp(I);
3615 case Instruction::ZExt:
3616 return X86SelectZExt(I);
3617 case Instruction::SExt:
3618 return X86SelectSExt(I);
3619 case Instruction::Br:
3620 return X86SelectBranch(I);
3621 case Instruction::LShr:
3622 case Instruction::AShr:
3623 case Instruction::Shl:
3624 return X86SelectShift(I);
3625 case Instruction::SDiv:
3626 case Instruction::UDiv:
3627 case Instruction::SRem:
3628 case Instruction::URem:
3629 return X86SelectDivRem(I);
3630 case Instruction::Select:
3631 return X86SelectSelect(I);
3632 case Instruction::Trunc:
3633 return X86SelectTrunc(I);
3634 case Instruction::FPExt:
3635 return X86SelectFPExt(I);
3636 case Instruction::FPTrunc:
3637 return X86SelectFPTrunc(I);
3638 case Instruction::SIToFP:
3639 return X86SelectSIToFP(I);
3640 case Instruction::UIToFP:
3641 return X86SelectUIToFP(I);
3642 case Instruction::IntToPtr: // Deliberate fall-through.
3643 case Instruction::PtrToInt: {
3644 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
3645 EVT DstVT = TLI.getValueType(DL, I->getType());
3646 if (DstVT.bitsGT(SrcVT))
3647 return X86SelectZExt(I);
3648 if (DstVT.bitsLT(SrcVT))
3649 return X86SelectTrunc(I);
3650 unsigned Reg = getRegForValue(I->getOperand(0));
3651 if (Reg == 0) return false;
3652 updateValueMap(I, Reg);
3655 case Instruction::BitCast: {
3656 // Select SSE2/AVX bitcasts between 128/256/512 bit vector types.
3657 if (!Subtarget->hasSSE2())
3661 if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT) ||
3662 !isTypeLegal(I->getType(), DstVT))
3665 // Only allow vectors that use xmm/ymm/zmm.
3666 if (!SrcVT.isVector() || !DstVT.isVector() ||
3667 SrcVT.getVectorElementType() == MVT::i1 ||
3668 DstVT.getVectorElementType() == MVT::i1)
3671 unsigned Reg = getRegForValue(I->getOperand(0));
3675 // No instruction is needed for conversion. Reuse the register used by
3676 // the fist operand.
3677 updateValueMap(I, Reg);
3685 unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
3689 uint64_t Imm = CI->getZExtValue();
3691 unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3692 switch (VT.SimpleTy) {
3693 default: llvm_unreachable("Unexpected value type");
3696 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true,
3699 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true,
3704 unsigned ResultReg = createResultReg(&X86::GR64RegClass);
3705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3706 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3707 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3714 switch (VT.SimpleTy) {
3715 default: llvm_unreachable("Unexpected value type");
3719 case MVT::i8: Opc = X86::MOV8ri; break;
3720 case MVT::i16: Opc = X86::MOV16ri; break;
3721 case MVT::i32: Opc = X86::MOV32ri; break;
3723 if (isUInt<32>(Imm))
3724 Opc = X86::MOV32ri64;
3725 else if (isInt<32>(Imm))
3726 Opc = X86::MOV64ri32;
3732 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3735 unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
3736 if (CFP->isNullValue())
3737 return fastMaterializeFloatZero(CFP);
3739 // Can't handle alternate code models yet.
3740 CodeModel::Model CM = TM.getCodeModel();
3741 if (CM != CodeModel::Small && CM != CodeModel::Large)
3744 // Get opcode and regclass of the output for the given load instruction.
3746 bool HasAVX = Subtarget->hasAVX();
3747 bool HasAVX512 = Subtarget->hasAVX512();
3748 switch (VT.SimpleTy) {
3751 if (X86ScalarSSEf32)
3752 Opc = HasAVX512 ? X86::VMOVSSZrm_alt :
3753 HasAVX ? X86::VMOVSSrm_alt :
3756 Opc = X86::LD_Fp32m;
3759 if (X86ScalarSSEf64)
3760 Opc = HasAVX512 ? X86::VMOVSDZrm_alt :
3761 HasAVX ? X86::VMOVSDrm_alt :
3764 Opc = X86::LD_Fp64m;
3767 // No f80 support yet.
3771 // MachineConstantPool wants an explicit alignment.
3772 unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
3774 // Alignment of vector types. FIXME!
3775 Align = DL.getTypeAllocSize(CFP->getType());
3778 // x86-32 PIC requires a PIC base register for constant pools.
3779 unsigned PICBase = 0;
3780 unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
3781 if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
3782 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3783 else if (OpFlag == X86II::MO_GOTOFF)
3784 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3785 else if (Subtarget->is64Bit() && TM.getCodeModel() == CodeModel::Small)
3788 // Create the load from the constant pool.
3789 unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
3790 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
3792 if (CM == CodeModel::Large) {
3793 unsigned AddrReg = createResultReg(&X86::GR64RegClass);
3794 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3796 .addConstantPoolIndex(CPI, 0, OpFlag);
3797 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3798 TII.get(Opc), ResultReg);
3799 addDirectMem(MIB, AddrReg);
3800 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3801 MachinePointerInfo::getConstantPool(*FuncInfo.MF),
3802 MachineMemOperand::MOLoad, DL.getPointerSize(), Align);
3803 MIB->addMemOperand(*FuncInfo.MF, MMO);
3807 addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3808 TII.get(Opc), ResultReg),
3809 CPI, PICBase, OpFlag);
3813 unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
3814 // Can't handle alternate code models yet.
3815 if (TM.getCodeModel() != CodeModel::Small)
3818 // Materialize addresses with LEA/MOV instructions.
3820 if (X86SelectAddress(GV, AM)) {
3821 // If the expression is just a basereg, then we're done, otherwise we need
3823 if (AM.BaseType == X86AddressMode::RegBase &&
3824 AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
3827 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3828 if (TM.getRelocationModel() == Reloc::Static &&
3829 TLI.getPointerTy(DL) == MVT::i64) {
3830 // The displacement code could be more than 32 bits away so we need to use
3831 // an instruction with a 64 bit immediate
3832 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
3834 .addGlobalAddress(GV);
3837 TLI.getPointerTy(DL) == MVT::i32
3838 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3840 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3841 TII.get(Opc), ResultReg), AM);
3848 unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
3849 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
3851 // Only handle simple types.
3852 if (!CEVT.isSimple())
3854 MVT VT = CEVT.getSimpleVT();
3856 if (const auto *CI = dyn_cast<ConstantInt>(C))
3857 return X86MaterializeInt(CI, VT);
3858 else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
3859 return X86MaterializeFP(CFP, VT);
3860 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
3861 return X86MaterializeGV(GV, VT);
3866 unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
3867 // Fail on dynamic allocas. At this point, getRegForValue has already
3868 // checked its CSE maps, so if we're here trying to handle a dynamic
3869 // alloca, we're not going to succeed. X86SelectAddress has a
3870 // check for dynamic allocas, because it's called directly from
3871 // various places, but targetMaterializeAlloca also needs a check
3872 // in order to avoid recursion between getRegForValue,
3873 // X86SelectAddrss, and targetMaterializeAlloca.
3874 if (!FuncInfo.StaticAllocaMap.count(C))
3876 assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
3879 if (!X86SelectAddress(C, AM))
3882 TLI.getPointerTy(DL) == MVT::i32
3883 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3885 const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3886 unsigned ResultReg = createResultReg(RC);
3887 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
3888 TII.get(Opc), ResultReg), AM);
3892 unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
3894 if (!isTypeLegal(CF->getType(), VT))
3897 // Get opcode and regclass for the given zero.
3898 bool HasAVX512 = Subtarget->hasAVX512();
3900 switch (VT.SimpleTy) {
3903 if (X86ScalarSSEf32)
3904 Opc = HasAVX512 ? X86::AVX512_FsFLD0SS : X86::FsFLD0SS;
3906 Opc = X86::LD_Fp032;
3909 if (X86ScalarSSEf64)
3910 Opc = HasAVX512 ? X86::AVX512_FsFLD0SD : X86::FsFLD0SD;
3912 Opc = X86::LD_Fp064;
3915 // No f80 support yet.
3919 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
3920 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
3925 bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
3926 const LoadInst *LI) {
3927 const Value *Ptr = LI->getPointerOperand();
3929 if (!X86SelectAddress(Ptr, AM))
3932 const X86InstrInfo &XII = (const X86InstrInfo &)TII;
3934 unsigned Size = DL.getTypeAllocSize(LI->getType());
3935 unsigned Alignment = LI->getAlignment();
3937 if (Alignment == 0) // Ensure that codegen never sees alignment 0
3938 Alignment = DL.getABITypeAlignment(LI->getType());
3940 SmallVector<MachineOperand, 8> AddrOps;
3941 AM.getFullAddress(AddrOps);
3943 MachineInstr *Result = XII.foldMemoryOperandImpl(
3944 *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
3945 /*AllowCommute=*/true);
3949 // The index register could be in the wrong register class. Unfortunately,
3950 // foldMemoryOperandImpl could have commuted the instruction so its not enough
3951 // to just look at OpNo + the offset to the index reg. We actually need to
3952 // scan the instruction to find the index reg and see if its the correct reg
3954 unsigned OperandNo = 0;
3955 for (MachineInstr::mop_iterator I = Result->operands_begin(),
3956 E = Result->operands_end(); I != E; ++I, ++OperandNo) {
3957 MachineOperand &MO = *I;
3958 if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
3960 // Found the index reg, now try to rewrite it.
3961 unsigned IndexReg = constrainOperandRegClass(Result->getDesc(),
3962 MO.getReg(), OperandNo);
3963 if (IndexReg == MO.getReg())
3965 MO.setReg(IndexReg);
3968 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
3969 Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
3970 MachineBasicBlock::iterator I(MI);
3971 removeDeadCode(I, std::next(I));
3975 unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
3976 const TargetRegisterClass *RC,
3977 unsigned Op0, bool Op0IsKill,
3978 unsigned Op1, bool Op1IsKill,
3979 unsigned Op2, bool Op2IsKill,
3980 unsigned Op3, bool Op3IsKill) {
3981 const MCInstrDesc &II = TII.get(MachineInstOpcode);
3983 unsigned ResultReg = createResultReg(RC);
3984 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
3985 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
3986 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
3987 Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3);
3989 if (II.getNumDefs() >= 1)
3990 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
3991 .addReg(Op0, getKillRegState(Op0IsKill))
3992 .addReg(Op1, getKillRegState(Op1IsKill))
3993 .addReg(Op2, getKillRegState(Op2IsKill))
3994 .addReg(Op3, getKillRegState(Op3IsKill));
3996 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
3997 .addReg(Op0, getKillRegState(Op0IsKill))
3998 .addReg(Op1, getKillRegState(Op1IsKill))
3999 .addReg(Op2, getKillRegState(Op2IsKill))
4000 .addReg(Op3, getKillRegState(Op3IsKill));
4001 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
4002 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
4009 FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
4010 const TargetLibraryInfo *libInfo) {
4011 return new X86FastISel(funcInfo, libInfo);