1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the X86 implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "X86InstrInfo.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrFoldTables.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/CodeGen/LivePhysRegs.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/StackMaps.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/DebugInfoMetadata.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCExpr.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetOptions.h"
45 #define DEBUG_TYPE "x86-instr-info"
47 #define GET_INSTRINFO_CTOR_DTOR
48 #include "X86GenInstrInfo.inc"
51 NoFusing("disable-spill-fusing",
52 cl::desc("Disable fusing of spill code into instructions"),
55 PrintFailedFusing("print-failed-fuse-candidates",
56 cl::desc("Print instructions that the allocator wants to"
57 " fuse, but the X86 backend currently can't"),
60 ReMatPICStubLoad("remat-pic-stub-load",
61 cl::desc("Re-materialize load from stub in PIC mode"),
62 cl::init(false), cl::Hidden);
63 static cl::opt<unsigned>
64 PartialRegUpdateClearance("partial-reg-update-clearance",
65 cl::desc("Clearance between two register writes "
66 "for inserting XOR to avoid partial "
68 cl::init(64), cl::Hidden);
69 static cl::opt<unsigned>
70 UndefRegClearance("undef-reg-clearance",
71 cl::desc("How many idle instructions we would like before "
72 "certain undef register reads"),
73 cl::init(128), cl::Hidden);
76 // Pin the vtable to this file.
77 void X86InstrInfo::anchor() {}
79 X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
80 : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
81 : X86::ADJCALLSTACKDOWN32),
82 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
83 : X86::ADJCALLSTACKUP32),
85 (STI.is64Bit() ? X86::RETQ : X86::RETL)),
86 Subtarget(STI), RI(STI.getTargetTriple()) {
90 X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
91 unsigned &SrcReg, unsigned &DstReg,
92 unsigned &SubIdx) const {
93 switch (MI.getOpcode()) {
100 if (!Subtarget.is64Bit())
101 // It's not always legal to reference the low 8-bit of the larger
102 // register in 32-bit mode.
105 case X86::MOVSX32rr16:
106 case X86::MOVZX32rr16:
107 case X86::MOVSX64rr16:
108 case X86::MOVSX64rr32: {
109 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
112 SrcReg = MI.getOperand(1).getReg();
113 DstReg = MI.getOperand(0).getReg();
114 switch (MI.getOpcode()) {
115 default: llvm_unreachable("Unreachable!");
116 case X86::MOVSX16rr8:
117 case X86::MOVZX16rr8:
118 case X86::MOVSX32rr8:
119 case X86::MOVZX32rr8:
120 case X86::MOVSX64rr8:
121 SubIdx = X86::sub_8bit;
123 case X86::MOVSX32rr16:
124 case X86::MOVZX32rr16:
125 case X86::MOVSX64rr16:
126 SubIdx = X86::sub_16bit;
128 case X86::MOVSX64rr32:
129 SubIdx = X86::sub_32bit;
138 int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
139 const MachineFunction *MF = MI.getParent()->getParent();
140 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
142 if (isFrameInstr(MI)) {
143 unsigned StackAlign = TFI->getStackAlignment();
144 int SPAdj = alignTo(getFrameSize(MI), StackAlign);
145 SPAdj -= getFrameAdjustment(MI);
146 if (!isFrameSetup(MI))
151 // To know whether a call adjusts the stack, we need information
152 // that is bound to the following ADJCALLSTACKUP pseudo.
153 // Look for the next ADJCALLSTACKUP that follows the call.
155 const MachineBasicBlock *MBB = MI.getParent();
156 auto I = ++MachineBasicBlock::const_iterator(MI);
157 for (auto E = MBB->end(); I != E; ++I) {
158 if (I->getOpcode() == getCallFrameDestroyOpcode() ||
163 // If we could not find a frame destroy opcode, then it has already
164 // been simplified, so we don't care.
165 if (I->getOpcode() != getCallFrameDestroyOpcode())
168 return -(I->getOperand(1).getImm());
171 // Currently handle only PUSHes we can reasonably expect to see
173 switch (MI.getOpcode()) {
191 /// Return true and the FrameIndex if the specified
192 /// operand and follow operands form a reference to the stack frame.
193 bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
194 int &FrameIndex) const {
195 if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
196 MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
197 MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
198 MI.getOperand(Op + X86::AddrDisp).isImm() &&
199 MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
200 MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
201 MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
202 FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
208 static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
222 case X86::MOVSSrm_alt:
224 case X86::VMOVSSrm_alt:
226 case X86::VMOVSSZrm_alt:
233 case X86::MOVSDrm_alt:
235 case X86::VMOVSDrm_alt:
237 case X86::VMOVSDZrm_alt:
238 case X86::MMX_MOVD64rm:
239 case X86::MMX_MOVQ64rm:
255 case X86::VMOVAPSZ128rm:
256 case X86::VMOVUPSZ128rm:
257 case X86::VMOVAPSZ128rm_NOVLX:
258 case X86::VMOVUPSZ128rm_NOVLX:
259 case X86::VMOVAPDZ128rm:
260 case X86::VMOVUPDZ128rm:
261 case X86::VMOVDQU8Z128rm:
262 case X86::VMOVDQU16Z128rm:
263 case X86::VMOVDQA32Z128rm:
264 case X86::VMOVDQU32Z128rm:
265 case X86::VMOVDQA64Z128rm:
266 case X86::VMOVDQU64Z128rm:
269 case X86::VMOVAPSYrm:
270 case X86::VMOVUPSYrm:
271 case X86::VMOVAPDYrm:
272 case X86::VMOVUPDYrm:
273 case X86::VMOVDQAYrm:
274 case X86::VMOVDQUYrm:
275 case X86::VMOVAPSZ256rm:
276 case X86::VMOVUPSZ256rm:
277 case X86::VMOVAPSZ256rm_NOVLX:
278 case X86::VMOVUPSZ256rm_NOVLX:
279 case X86::VMOVAPDZ256rm:
280 case X86::VMOVUPDZ256rm:
281 case X86::VMOVDQU8Z256rm:
282 case X86::VMOVDQU16Z256rm:
283 case X86::VMOVDQA32Z256rm:
284 case X86::VMOVDQU32Z256rm:
285 case X86::VMOVDQA64Z256rm:
286 case X86::VMOVDQU64Z256rm:
289 case X86::VMOVAPSZrm:
290 case X86::VMOVUPSZrm:
291 case X86::VMOVAPDZrm:
292 case X86::VMOVUPDZrm:
293 case X86::VMOVDQU8Zrm:
294 case X86::VMOVDQU16Zrm:
295 case X86::VMOVDQA32Zrm:
296 case X86::VMOVDQU32Zrm:
297 case X86::VMOVDQA64Zrm:
298 case X86::VMOVDQU64Zrm:
304 static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
328 case X86::MMX_MOVD64mr:
329 case X86::MMX_MOVQ64mr:
330 case X86::MMX_MOVNTQmr:
346 case X86::VMOVUPSZ128mr:
347 case X86::VMOVAPSZ128mr:
348 case X86::VMOVUPSZ128mr_NOVLX:
349 case X86::VMOVAPSZ128mr_NOVLX:
350 case X86::VMOVUPDZ128mr:
351 case X86::VMOVAPDZ128mr:
352 case X86::VMOVDQA32Z128mr:
353 case X86::VMOVDQU32Z128mr:
354 case X86::VMOVDQA64Z128mr:
355 case X86::VMOVDQU64Z128mr:
356 case X86::VMOVDQU8Z128mr:
357 case X86::VMOVDQU16Z128mr:
360 case X86::VMOVUPSYmr:
361 case X86::VMOVAPSYmr:
362 case X86::VMOVUPDYmr:
363 case X86::VMOVAPDYmr:
364 case X86::VMOVDQUYmr:
365 case X86::VMOVDQAYmr:
366 case X86::VMOVUPSZ256mr:
367 case X86::VMOVAPSZ256mr:
368 case X86::VMOVUPSZ256mr_NOVLX:
369 case X86::VMOVAPSZ256mr_NOVLX:
370 case X86::VMOVUPDZ256mr:
371 case X86::VMOVAPDZ256mr:
372 case X86::VMOVDQU8Z256mr:
373 case X86::VMOVDQU16Z256mr:
374 case X86::VMOVDQA32Z256mr:
375 case X86::VMOVDQU32Z256mr:
376 case X86::VMOVDQA64Z256mr:
377 case X86::VMOVDQU64Z256mr:
380 case X86::VMOVUPSZmr:
381 case X86::VMOVAPSZmr:
382 case X86::VMOVUPDZmr:
383 case X86::VMOVAPDZmr:
384 case X86::VMOVDQU8Zmr:
385 case X86::VMOVDQU16Zmr:
386 case X86::VMOVDQA32Zmr:
387 case X86::VMOVDQU32Zmr:
388 case X86::VMOVDQA64Zmr:
389 case X86::VMOVDQU64Zmr:
396 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
397 int &FrameIndex) const {
399 return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
402 unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
404 unsigned &MemBytes) const {
405 if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
406 if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
407 return MI.getOperand(0).getReg();
411 unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
412 int &FrameIndex) const {
414 if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
416 if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
418 // Check for post-frame index elimination operations
419 SmallVector<const MachineMemOperand *, 1> Accesses;
420 if (hasLoadFromStackSlot(MI, Accesses)) {
422 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
430 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
431 int &FrameIndex) const {
433 return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
436 unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
438 unsigned &MemBytes) const {
439 if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
440 if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
441 isFrameOperand(MI, 0, FrameIndex))
442 return MI.getOperand(X86::AddrNumOperands).getReg();
446 unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
447 int &FrameIndex) const {
449 if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
451 if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
453 // Check for post-frame index elimination operations
454 SmallVector<const MachineMemOperand *, 1> Accesses;
455 if (hasStoreToStackSlot(MI, Accesses)) {
457 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
465 /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
466 static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
467 // Don't waste compile time scanning use-def chains of physregs.
468 if (!Register::isVirtualRegister(BaseReg))
470 bool isPICBase = false;
471 for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
472 E = MRI.def_instr_end(); I != E; ++I) {
473 MachineInstr *DefMI = &*I;
474 if (DefMI->getOpcode() != X86::MOVPC32r)
476 assert(!isPICBase && "More than one PIC base?");
482 bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
483 AAResults *AA) const {
484 switch (MI.getOpcode()) {
486 // This function should only be called for opcodes with the ReMaterializable
488 llvm_unreachable("Unknown rematerializable operation!");
491 case X86::LOAD_STACK_GUARD:
492 case X86::AVX1_SETALLONES:
493 case X86::AVX2_SETALLONES:
494 case X86::AVX512_128_SET0:
495 case X86::AVX512_256_SET0:
496 case X86::AVX512_512_SET0:
497 case X86::AVX512_512_SETALLONES:
498 case X86::AVX512_FsFLD0SD:
499 case X86::AVX512_FsFLD0SS:
500 case X86::AVX512_FsFLD0F128:
504 case X86::FsFLD0F128:
512 case X86::MOV32ImmSExti8:
517 case X86::MOV64ImmSExti8:
519 case X86::V_SETALLONES:
528 case X86::MOV8rm_NOREX:
533 case X86::MOVSSrm_alt:
535 case X86::MOVSDrm_alt:
543 case X86::VMOVSSrm_alt:
545 case X86::VMOVSDrm_alt:
552 case X86::VMOVAPSYrm:
553 case X86::VMOVUPSYrm:
554 case X86::VMOVAPDYrm:
555 case X86::VMOVUPDYrm:
556 case X86::VMOVDQAYrm:
557 case X86::VMOVDQUYrm:
558 case X86::MMX_MOVD64rm:
559 case X86::MMX_MOVQ64rm:
562 case X86::VMOVSSZrm_alt:
564 case X86::VMOVSDZrm_alt:
565 case X86::VMOVAPDZ128rm:
566 case X86::VMOVAPDZ256rm:
567 case X86::VMOVAPDZrm:
568 case X86::VMOVAPSZ128rm:
569 case X86::VMOVAPSZ256rm:
570 case X86::VMOVAPSZ128rm_NOVLX:
571 case X86::VMOVAPSZ256rm_NOVLX:
572 case X86::VMOVAPSZrm:
573 case X86::VMOVDQA32Z128rm:
574 case X86::VMOVDQA32Z256rm:
575 case X86::VMOVDQA32Zrm:
576 case X86::VMOVDQA64Z128rm:
577 case X86::VMOVDQA64Z256rm:
578 case X86::VMOVDQA64Zrm:
579 case X86::VMOVDQU16Z128rm:
580 case X86::VMOVDQU16Z256rm:
581 case X86::VMOVDQU16Zrm:
582 case X86::VMOVDQU32Z128rm:
583 case X86::VMOVDQU32Z256rm:
584 case X86::VMOVDQU32Zrm:
585 case X86::VMOVDQU64Z128rm:
586 case X86::VMOVDQU64Z256rm:
587 case X86::VMOVDQU64Zrm:
588 case X86::VMOVDQU8Z128rm:
589 case X86::VMOVDQU8Z256rm:
590 case X86::VMOVDQU8Zrm:
591 case X86::VMOVUPDZ128rm:
592 case X86::VMOVUPDZ256rm:
593 case X86::VMOVUPDZrm:
594 case X86::VMOVUPSZ128rm:
595 case X86::VMOVUPSZ256rm:
596 case X86::VMOVUPSZ128rm_NOVLX:
597 case X86::VMOVUPSZ256rm_NOVLX:
598 case X86::VMOVUPSZrm: {
599 // Loads from constant pools are trivially rematerializable.
600 if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
601 MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
602 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
603 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
604 MI.isDereferenceableInvariantLoad(AA)) {
605 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
606 if (BaseReg == 0 || BaseReg == X86::RIP)
608 // Allow re-materialization of PIC load.
609 if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
611 const MachineFunction &MF = *MI.getParent()->getParent();
612 const MachineRegisterInfo &MRI = MF.getRegInfo();
613 return regIsPICBase(BaseReg, MRI);
620 if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
621 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
622 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
623 !MI.getOperand(1 + X86::AddrDisp).isReg()) {
624 // lea fi#, lea GV, etc. are all rematerializable.
625 if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
627 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
630 // Allow re-materialization of lea PICBase + x.
631 const MachineFunction &MF = *MI.getParent()->getParent();
632 const MachineRegisterInfo &MRI = MF.getRegInfo();
633 return regIsPICBase(BaseReg, MRI);
640 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
641 MachineBasicBlock::iterator I,
642 unsigned DestReg, unsigned SubIdx,
643 const MachineInstr &Orig,
644 const TargetRegisterInfo &TRI) const {
645 bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
646 if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) {
647 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
650 switch (Orig.getOpcode()) {
651 case X86::MOV32r0: Value = 0; break;
652 case X86::MOV32r1: Value = 1; break;
653 case X86::MOV32r_1: Value = -1; break;
655 llvm_unreachable("Unexpected instruction!");
658 const DebugLoc &DL = Orig.getDebugLoc();
659 BuildMI(MBB, I, DL, get(X86::MOV32ri))
660 .add(Orig.getOperand(0))
663 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
667 MachineInstr &NewMI = *std::prev(I);
668 NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
671 /// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
672 bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
673 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
674 MachineOperand &MO = MI.getOperand(i);
675 if (MO.isReg() && MO.isDef() &&
676 MO.getReg() == X86::EFLAGS && !MO.isDead()) {
683 /// Check whether the shift count for a machine operand is non-zero.
684 inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
685 unsigned ShiftAmtOperandIdx) {
686 // The shift count is six bits with the REX.W prefix and five bits without.
687 unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
688 unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
689 return Imm & ShiftCountMask;
692 /// Check whether the given shift count is appropriate
693 /// can be represented by a LEA instruction.
694 inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
695 // Left shift instructions can be transformed into load-effective-address
696 // instructions if we can encode them appropriately.
697 // A LEA instruction utilizes a SIB byte to encode its scale factor.
698 // The SIB.scale field is two bits wide which means that we can encode any
699 // shift amount less than 4.
700 return ShAmt < 4 && ShAmt > 0;
703 bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
704 unsigned Opc, bool AllowSP, Register &NewSrc,
705 bool &isKill, MachineOperand &ImplicitOp,
706 LiveVariables *LV) const {
707 MachineFunction &MF = *MI.getParent()->getParent();
708 const TargetRegisterClass *RC;
710 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
712 RC = Opc != X86::LEA32r ?
713 &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
715 Register SrcReg = Src.getReg();
717 // For both LEA64 and LEA32 the register already has essentially the right
718 // type (32-bit or 64-bit) we may just need to forbid SP.
719 if (Opc != X86::LEA64_32r) {
721 isKill = Src.isKill();
722 assert(!Src.isUndef() && "Undef op doesn't need optimization");
724 if (Register::isVirtualRegister(NewSrc) &&
725 !MF.getRegInfo().constrainRegClass(NewSrc, RC))
731 // This is for an LEA64_32r and incoming registers are 32-bit. One way or
732 // another we need to add 64-bit registers to the final MI.
733 if (Register::isPhysicalRegister(SrcReg)) {
735 ImplicitOp.setImplicit();
737 NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
738 isKill = Src.isKill();
739 assert(!Src.isUndef() && "Undef op doesn't need optimization");
741 // Virtual register of the wrong class, we have to create a temporary 64-bit
742 // vreg to feed into the LEA.
743 NewSrc = MF.getRegInfo().createVirtualRegister(RC);
745 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
746 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
749 // Which is obviously going to be dead after we're done with it.
753 LV->replaceKillInstruction(SrcReg, MI, *Copy);
756 // We've set all the parameters without issue.
760 MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
761 unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
762 LiveVariables *LV, bool Is8BitOp) const {
763 // We handle 8-bit adds and various 16-bit opcodes in the switch below.
764 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
765 assert((Is8BitOp || RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
766 *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
767 "Unexpected type for LEA transform");
769 // TODO: For a 32-bit target, we need to adjust the LEA variables with
770 // something like this:
771 // Opcode = X86::LEA32r;
772 // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
774 // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
775 // : RegInfo.createVirtualRegister(&X86::GR32RegClass);
776 if (!Subtarget.is64Bit())
779 unsigned Opcode = X86::LEA64_32r;
780 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
781 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
783 // Build and insert into an implicit UNDEF value. This is OK because
784 // we will be shifting and then extracting the lower 8/16-bits.
785 // This has the potential to cause partial register stall. e.g.
786 // movw (%rbp,%rcx,2), %dx
787 // leal -65(%rdx), %esi
788 // But testing has shown this *does* help performance in 64-bit mode (at
789 // least on modern x86 machines).
790 MachineBasicBlock::iterator MBBI = MI.getIterator();
791 Register Dest = MI.getOperand(0).getReg();
792 Register Src = MI.getOperand(1).getReg();
793 bool IsDead = MI.getOperand(0).isDead();
794 bool IsKill = MI.getOperand(1).isKill();
795 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
796 assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
797 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
798 MachineInstr *InsMI =
799 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
800 .addReg(InRegLEA, RegState::Define, SubReg)
801 .addReg(Src, getKillRegState(IsKill));
803 MachineInstrBuilder MIB =
804 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
806 default: llvm_unreachable("Unreachable!");
809 unsigned ShAmt = MI.getOperand(2).getImm();
810 MIB.addReg(0).addImm(1ULL << ShAmt)
811 .addReg(InRegLEA, RegState::Kill).addImm(0).addReg(0);
816 addRegOffset(MIB, InRegLEA, true, 1);
820 addRegOffset(MIB, InRegLEA, true, -1);
826 case X86::ADD16ri_DB:
827 case X86::ADD16ri8_DB:
828 addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
833 case X86::ADD16rr_DB: {
834 Register Src2 = MI.getOperand(2).getReg();
835 bool IsKill2 = MI.getOperand(2).isKill();
836 assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
837 unsigned InRegLEA2 = 0;
838 MachineInstr *InsMI2 = nullptr;
840 // ADD8rr/ADD16rr killed %reg1028, %reg1028
841 // just a single insert_subreg.
842 addRegReg(MIB, InRegLEA, true, InRegLEA, false);
844 if (Subtarget.is64Bit())
845 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
847 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
848 // Build and insert into an implicit UNDEF value. This is OK because
849 // we will be shifting and then extracting the lower 8/16-bits.
850 BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA2);
851 InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
852 .addReg(InRegLEA2, RegState::Define, SubReg)
853 .addReg(Src2, getKillRegState(IsKill2));
854 addRegReg(MIB, InRegLEA, true, InRegLEA2, true);
856 if (LV && IsKill2 && InsMI2)
857 LV->replaceKillInstruction(Src2, MI, *InsMI2);
862 MachineInstr *NewMI = MIB;
863 MachineInstr *ExtMI =
864 BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
865 .addReg(Dest, RegState::Define | getDeadRegState(IsDead))
866 .addReg(OutRegLEA, RegState::Kill, SubReg);
869 // Update live variables.
870 LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
871 LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
873 LV->replaceKillInstruction(Src, MI, *InsMI);
875 LV->replaceKillInstruction(Dest, MI, *ExtMI);
881 /// This method must be implemented by targets that
882 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
883 /// may be able to convert a two-address instruction into a true
884 /// three-address instruction on demand. This allows the X86 target (for
885 /// example) to convert ADD and SHL instructions into LEA instructions if they
886 /// would require register copies due to two-addressness.
888 /// This method returns a null pointer if the transformation cannot be
889 /// performed, otherwise it returns the new instruction.
892 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
893 MachineInstr &MI, LiveVariables *LV) const {
894 // The following opcodes also sets the condition code register(s). Only
895 // convert them to equivalent lea if the condition code register def's
897 if (hasLiveCondCodeDef(MI))
900 MachineFunction &MF = *MI.getParent()->getParent();
901 // All instructions input are two-addr instructions. Get the known operands.
902 const MachineOperand &Dest = MI.getOperand(0);
903 const MachineOperand &Src = MI.getOperand(1);
905 // Ideally, operations with undef should be folded before we get here, but we
906 // can't guarantee it. Bail out because optimizing undefs is a waste of time.
907 // Without this, we have to forward undef state to new register operands to
908 // avoid machine verifier errors.
911 if (MI.getNumOperands() > 2)
912 if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
915 MachineInstr *NewMI = nullptr;
916 bool Is64Bit = Subtarget.is64Bit();
918 bool Is8BitOp = false;
919 unsigned MIOpc = MI.getOpcode();
921 default: llvm_unreachable("Unreachable!");
923 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
924 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
925 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
927 // LEA can't handle RSP.
928 if (Register::isVirtualRegister(Src.getReg()) &&
929 !MF.getRegInfo().constrainRegClass(Src.getReg(),
930 &X86::GR64_NOSPRegClass))
933 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
936 .addImm(1ULL << ShAmt)
943 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
944 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
945 if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
947 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
949 // LEA can't handle ESP.
952 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
953 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
954 SrcReg, isKill, ImplicitOp, LV))
957 MachineInstrBuilder MIB =
958 BuildMI(MF, MI.getDebugLoc(), get(Opc))
961 .addImm(1ULL << ShAmt)
962 .addReg(SrcReg, getKillRegState(isKill))
965 if (ImplicitOp.getReg() != 0)
975 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
976 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
977 if (!isTruncatedShiftCountForLEA(ShAmt))
979 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
983 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
984 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r :
985 (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
988 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
989 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
993 MachineInstrBuilder MIB =
994 BuildMI(MF, MI.getDebugLoc(), get(Opc))
996 .addReg(SrcReg, getKillRegState(isKill));
997 if (ImplicitOp.getReg() != 0)
1000 NewMI = addOffset(MIB, 1);
1005 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1006 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1007 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1011 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1012 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false, SrcReg, isKill,
1016 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1018 .addReg(SrcReg, getKillRegState(isKill));
1019 if (ImplicitOp.getReg() != 0)
1020 MIB.add(ImplicitOp);
1022 NewMI = addOffset(MIB, -1);
1032 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1034 case X86::ADD64rr_DB:
1036 case X86::ADD32rr_DB: {
1037 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1039 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
1042 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1046 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1047 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1048 SrcReg, isKill, ImplicitOp, LV))
1051 const MachineOperand &Src2 = MI.getOperand(2);
1054 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1055 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
1056 SrcReg2, isKill2, ImplicitOp2, LV))
1059 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1060 if (ImplicitOp.getReg() != 0)
1061 MIB.add(ImplicitOp);
1062 if (ImplicitOp2.getReg() != 0)
1063 MIB.add(ImplicitOp2);
1065 NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
1066 if (LV && Src2.isKill())
1067 LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
1071 case X86::ADD8rr_DB:
1075 case X86::ADD16rr_DB:
1076 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1077 case X86::ADD64ri32:
1079 case X86::ADD64ri32_DB:
1080 case X86::ADD64ri8_DB:
1081 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1083 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1088 case X86::ADD32ri_DB:
1089 case X86::ADD32ri8_DB: {
1090 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1091 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1095 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1096 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1097 SrcReg, isKill, ImplicitOp, LV))
1100 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1102 .addReg(SrcReg, getKillRegState(isKill));
1103 if (ImplicitOp.getReg() != 0)
1104 MIB.add(ImplicitOp);
1106 NewMI = addOffset(MIB, MI.getOperand(2));
1110 case X86::ADD8ri_DB:
1115 case X86::ADD16ri_DB:
1116 case X86::ADD16ri8_DB:
1117 return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
1121 /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1124 case X86::SUB32ri: {
1125 if (!MI.getOperand(2).isImm())
1127 int64_t Imm = MI.getOperand(2).getImm();
1128 if (!isInt<32>(-Imm))
1131 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1132 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1136 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1137 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
1138 SrcReg, isKill, ImplicitOp, LV))
1141 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1143 .addReg(SrcReg, getKillRegState(isKill));
1144 if (ImplicitOp.getReg() != 0)
1145 MIB.add(ImplicitOp);
1147 NewMI = addOffset(MIB, -Imm);
1152 case X86::SUB64ri32: {
1153 if (!MI.getOperand(2).isImm())
1155 int64_t Imm = MI.getOperand(2).getImm();
1156 if (!isInt<32>(-Imm))
1159 assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1161 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
1162 get(X86::LEA64r)).add(Dest).add(Src);
1163 NewMI = addOffset(MIB, -Imm);
1167 case X86::VMOVDQU8Z128rmk:
1168 case X86::VMOVDQU8Z256rmk:
1169 case X86::VMOVDQU8Zrmk:
1170 case X86::VMOVDQU16Z128rmk:
1171 case X86::VMOVDQU16Z256rmk:
1172 case X86::VMOVDQU16Zrmk:
1173 case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
1174 case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
1175 case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk:
1176 case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
1177 case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
1178 case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk:
1179 case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk:
1180 case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk:
1181 case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk:
1182 case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk:
1183 case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk:
1184 case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk:
1185 case X86::VBROADCASTSDZ256mk:
1186 case X86::VBROADCASTSDZmk:
1187 case X86::VBROADCASTSSZ128mk:
1188 case X86::VBROADCASTSSZ256mk:
1189 case X86::VBROADCASTSSZmk:
1190 case X86::VPBROADCASTDZ128mk:
1191 case X86::VPBROADCASTDZ256mk:
1192 case X86::VPBROADCASTDZmk:
1193 case X86::VPBROADCASTQZ128mk:
1194 case X86::VPBROADCASTQZ256mk:
1195 case X86::VPBROADCASTQZmk: {
1198 default: llvm_unreachable("Unreachable!");
1199 case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break;
1200 case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break;
1201 case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break;
1202 case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break;
1203 case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break;
1204 case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break;
1205 case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
1206 case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
1207 case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
1208 case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
1209 case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
1210 case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
1211 case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
1212 case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
1213 case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
1214 case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
1215 case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
1216 case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
1217 case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
1218 case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
1219 case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
1220 case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
1221 case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
1222 case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
1223 case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
1224 case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
1225 case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
1226 case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
1227 case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
1228 case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
1229 case X86::VBROADCASTSDZ256mk: Opc = X86::VBLENDMPDZ256rmbk; break;
1230 case X86::VBROADCASTSDZmk: Opc = X86::VBLENDMPDZrmbk; break;
1231 case X86::VBROADCASTSSZ128mk: Opc = X86::VBLENDMPSZ128rmbk; break;
1232 case X86::VBROADCASTSSZ256mk: Opc = X86::VBLENDMPSZ256rmbk; break;
1233 case X86::VBROADCASTSSZmk: Opc = X86::VBLENDMPSZrmbk; break;
1234 case X86::VPBROADCASTDZ128mk: Opc = X86::VPBLENDMDZ128rmbk; break;
1235 case X86::VPBROADCASTDZ256mk: Opc = X86::VPBLENDMDZ256rmbk; break;
1236 case X86::VPBROADCASTDZmk: Opc = X86::VPBLENDMDZrmbk; break;
1237 case X86::VPBROADCASTQZ128mk: Opc = X86::VPBLENDMQZ128rmbk; break;
1238 case X86::VPBROADCASTQZ256mk: Opc = X86::VPBLENDMQZ256rmbk; break;
1239 case X86::VPBROADCASTQZmk: Opc = X86::VPBLENDMQZrmbk; break;
1242 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1244 .add(MI.getOperand(2))
1246 .add(MI.getOperand(3))
1247 .add(MI.getOperand(4))
1248 .add(MI.getOperand(5))
1249 .add(MI.getOperand(6))
1250 .add(MI.getOperand(7));
1254 case X86::VMOVDQU8Z128rrk:
1255 case X86::VMOVDQU8Z256rrk:
1256 case X86::VMOVDQU8Zrrk:
1257 case X86::VMOVDQU16Z128rrk:
1258 case X86::VMOVDQU16Z256rrk:
1259 case X86::VMOVDQU16Zrrk:
1260 case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
1261 case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
1262 case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk:
1263 case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
1264 case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
1265 case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk:
1266 case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk:
1267 case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk:
1268 case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk:
1269 case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk:
1270 case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk:
1271 case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: {
1274 default: llvm_unreachable("Unreachable!");
1275 case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break;
1276 case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break;
1277 case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break;
1278 case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
1279 case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
1280 case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break;
1281 case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1282 case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1283 case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
1284 case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1285 case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1286 case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
1287 case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
1288 case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
1289 case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
1290 case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
1291 case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
1292 case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
1293 case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
1294 case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
1295 case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
1296 case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
1297 case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
1298 case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
1299 case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
1300 case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
1301 case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
1302 case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
1303 case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
1304 case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
1307 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1309 .add(MI.getOperand(2))
1311 .add(MI.getOperand(3));
1316 if (!NewMI) return nullptr;
1318 if (LV) { // Update live variables
1320 LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
1322 LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
1325 MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
1329 /// This determines which of three possible cases of a three source commute
1330 /// the source indexes correspond to taking into account any mask operands.
1331 /// All prevents commuting a passthru operand. Returns -1 if the commute isn't
1333 /// Case 0 - Possible to commute the first and second operands.
1334 /// Case 1 - Possible to commute the first and third operands.
1335 /// Case 2 - Possible to commute the second and third operands.
1336 static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
1337 unsigned SrcOpIdx2) {
1338 // Put the lowest index to SrcOpIdx1 to simplify the checks below.
1339 if (SrcOpIdx1 > SrcOpIdx2)
1340 std::swap(SrcOpIdx1, SrcOpIdx2);
1342 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
1343 if (X86II::isKMasked(TSFlags)) {
1348 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
1350 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
1352 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
1354 llvm_unreachable("Unknown three src commute case.");
1357 unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
1358 const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
1359 const X86InstrFMA3Group &FMA3Group) const {
1361 unsigned Opc = MI.getOpcode();
1363 // TODO: Commuting the 1st operand of FMA*_Int requires some additional
1364 // analysis. The commute optimization is legal only if all users of FMA*_Int
1365 // use only the lowest element of the FMA*_Int instruction. Such analysis are
1366 // not implemented yet. So, just return 0 in that case.
1367 // When such analysis are available this place will be the right place for
1369 assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
1370 "Intrinsic instructions can't commute operand 1");
1372 // Determine which case this commute is or if it can't be done.
1373 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1375 assert(Case < 3 && "Unexpected case number!");
1377 // Define the FMA forms mapping array that helps to map input FMA form
1378 // to output FMA form to preserve the operation semantics after
1379 // commuting the operands.
1380 const unsigned Form132Index = 0;
1381 const unsigned Form213Index = 1;
1382 const unsigned Form231Index = 2;
1383 static const unsigned FormMapping[][3] = {
1384 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
1385 // FMA132 A, C, b; ==> FMA231 C, A, b;
1386 // FMA213 B, A, c; ==> FMA213 A, B, c;
1387 // FMA231 C, A, b; ==> FMA132 A, C, b;
1388 { Form231Index, Form213Index, Form132Index },
1389 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
1390 // FMA132 A, c, B; ==> FMA132 B, c, A;
1391 // FMA213 B, a, C; ==> FMA231 C, a, B;
1392 // FMA231 C, a, B; ==> FMA213 B, a, C;
1393 { Form132Index, Form231Index, Form213Index },
1394 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
1395 // FMA132 a, C, B; ==> FMA213 a, B, C;
1396 // FMA213 b, A, C; ==> FMA132 b, C, A;
1397 // FMA231 c, A, B; ==> FMA231 c, B, A;
1398 { Form213Index, Form132Index, Form231Index }
1401 unsigned FMAForms[3];
1402 FMAForms[0] = FMA3Group.get132Opcode();
1403 FMAForms[1] = FMA3Group.get213Opcode();
1404 FMAForms[2] = FMA3Group.get231Opcode();
1406 for (FormIndex = 0; FormIndex < 3; FormIndex++)
1407 if (Opc == FMAForms[FormIndex])
1410 // Everything is ready, just adjust the FMA opcode and return it.
1411 FormIndex = FormMapping[Case][FormIndex];
1412 return FMAForms[FormIndex];
1415 static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
1416 unsigned SrcOpIdx2) {
1417 // Determine which case this commute is or if it can't be done.
1418 unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
1420 assert(Case < 3 && "Unexpected case value!");
1422 // For each case we need to swap two pairs of bits in the final immediate.
1423 static const uint8_t SwapMasks[3][4] = {
1424 { 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
1425 { 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
1426 { 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
1429 uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
1430 // Clear out the bits we are swapping.
1431 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
1432 SwapMasks[Case][2] | SwapMasks[Case][3]);
1433 // If the immediate had a bit of the pair set, then set the opposite bit.
1434 if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
1435 if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
1436 if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
1437 if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
1438 MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
1441 // Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
1443 static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
1444 #define VPERM_CASES(Suffix) \
1445 case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \
1446 case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \
1447 case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \
1448 case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \
1449 case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \
1450 case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \
1451 case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \
1452 case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \
1453 case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \
1454 case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \
1455 case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \
1456 case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz:
1458 #define VPERM_CASES_BROADCAST(Suffix) \
1459 VPERM_CASES(Suffix) \
1460 case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \
1461 case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \
1462 case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \
1463 case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
1464 case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
1465 case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz:
1468 default: return false;
1470 VPERM_CASES_BROADCAST(D)
1471 VPERM_CASES_BROADCAST(PD)
1472 VPERM_CASES_BROADCAST(PS)
1473 VPERM_CASES_BROADCAST(Q)
1477 #undef VPERM_CASES_BROADCAST
1481 // Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
1482 // from the I opcode to the T opcode and vice versa.
1483 static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
1484 #define VPERM_CASES(Orig, New) \
1485 case X86::Orig##128rr: return X86::New##128rr; \
1486 case X86::Orig##128rrkz: return X86::New##128rrkz; \
1487 case X86::Orig##128rm: return X86::New##128rm; \
1488 case X86::Orig##128rmkz: return X86::New##128rmkz; \
1489 case X86::Orig##256rr: return X86::New##256rr; \
1490 case X86::Orig##256rrkz: return X86::New##256rrkz; \
1491 case X86::Orig##256rm: return X86::New##256rm; \
1492 case X86::Orig##256rmkz: return X86::New##256rmkz; \
1493 case X86::Orig##rr: return X86::New##rr; \
1494 case X86::Orig##rrkz: return X86::New##rrkz; \
1495 case X86::Orig##rm: return X86::New##rm; \
1496 case X86::Orig##rmkz: return X86::New##rmkz;
1498 #define VPERM_CASES_BROADCAST(Orig, New) \
1499 VPERM_CASES(Orig, New) \
1500 case X86::Orig##128rmb: return X86::New##128rmb; \
1501 case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
1502 case X86::Orig##256rmb: return X86::New##256rmb; \
1503 case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
1504 case X86::Orig##rmb: return X86::New##rmb; \
1505 case X86::Orig##rmbkz: return X86::New##rmbkz;
1508 VPERM_CASES(VPERMI2B, VPERMT2B)
1509 VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
1510 VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
1511 VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
1512 VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
1513 VPERM_CASES(VPERMI2W, VPERMT2W)
1514 VPERM_CASES(VPERMT2B, VPERMI2B)
1515 VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
1516 VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
1517 VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
1518 VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
1519 VPERM_CASES(VPERMT2W, VPERMI2W)
1522 llvm_unreachable("Unreachable!");
1523 #undef VPERM_CASES_BROADCAST
1527 MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
1529 unsigned OpIdx2) const {
1530 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
1532 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
1536 switch (MI.getOpcode()) {
1537 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
1538 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
1539 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
1540 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
1541 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
1542 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
1545 switch (MI.getOpcode()) {
1546 default: llvm_unreachable("Unreachable!");
1547 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
1548 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
1549 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
1550 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
1551 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
1552 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
1554 unsigned Amt = MI.getOperand(3).getImm();
1555 auto &WorkingMI = cloneIfNew(MI);
1556 WorkingMI.setDesc(get(Opc));
1557 WorkingMI.getOperand(3).setImm(Size - Amt);
1558 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1562 case X86::PFSUBRrr: {
1563 // PFSUB x, y: x = x - y
1564 // PFSUBR x, y: x = y - x
1566 (X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
1567 auto &WorkingMI = cloneIfNew(MI);
1568 WorkingMI.setDesc(get(Opc));
1569 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1572 case X86::BLENDPDrri:
1573 case X86::BLENDPSrri:
1574 case X86::VBLENDPDrri:
1575 case X86::VBLENDPSrri:
1576 // If we're optimizing for size, try to use MOVSD/MOVSS.
1577 if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
1579 switch (MI.getOpcode()) {
1580 default: llvm_unreachable("Unreachable!");
1581 case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break;
1582 case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break;
1583 case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
1584 case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
1586 if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
1587 auto &WorkingMI = cloneIfNew(MI);
1588 WorkingMI.setDesc(get(Opc));
1589 WorkingMI.RemoveOperand(3);
1590 return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
1596 case X86::PBLENDWrri:
1597 case X86::VBLENDPDYrri:
1598 case X86::VBLENDPSYrri:
1599 case X86::VPBLENDDrri:
1600 case X86::VPBLENDWrri:
1601 case X86::VPBLENDDYrri:
1602 case X86::VPBLENDWYrri:{
1604 switch (MI.getOpcode()) {
1605 default: llvm_unreachable("Unreachable!");
1606 case X86::BLENDPDrri: Mask = (int8_t)0x03; break;
1607 case X86::BLENDPSrri: Mask = (int8_t)0x0F; break;
1608 case X86::PBLENDWrri: Mask = (int8_t)0xFF; break;
1609 case X86::VBLENDPDrri: Mask = (int8_t)0x03; break;
1610 case X86::VBLENDPSrri: Mask = (int8_t)0x0F; break;
1611 case X86::VBLENDPDYrri: Mask = (int8_t)0x0F; break;
1612 case X86::VBLENDPSYrri: Mask = (int8_t)0xFF; break;
1613 case X86::VPBLENDDrri: Mask = (int8_t)0x0F; break;
1614 case X86::VPBLENDWrri: Mask = (int8_t)0xFF; break;
1615 case X86::VPBLENDDYrri: Mask = (int8_t)0xFF; break;
1616 case X86::VPBLENDWYrri: Mask = (int8_t)0xFF; break;
1618 // Only the least significant bits of Imm are used.
1619 // Using int8_t to ensure it will be sign extended to the int64_t that
1620 // setImm takes in order to match isel behavior.
1621 int8_t Imm = MI.getOperand(3).getImm() & Mask;
1622 auto &WorkingMI = cloneIfNew(MI);
1623 WorkingMI.getOperand(3).setImm(Mask ^ Imm);
1624 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1627 case X86::INSERTPSrr:
1628 case X86::VINSERTPSrr:
1629 case X86::VINSERTPSZrr: {
1630 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
1631 unsigned ZMask = Imm & 15;
1632 unsigned DstIdx = (Imm >> 4) & 3;
1633 unsigned SrcIdx = (Imm >> 6) & 3;
1635 // We can commute insertps if we zero 2 of the elements, the insertion is
1636 // "inline" and we don't override the insertion with a zero.
1637 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
1638 countPopulation(ZMask) == 2) {
1639 unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15);
1640 assert(AltIdx < 4 && "Illegal insertion index");
1641 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
1642 auto &WorkingMI = cloneIfNew(MI);
1643 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm);
1644 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1652 case X86::VMOVSSrr:{
1653 // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
1654 if (Subtarget.hasSSE41()) {
1656 switch (MI.getOpcode()) {
1657 default: llvm_unreachable("Unreachable!");
1658 case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break;
1659 case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break;
1660 case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
1661 case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
1664 auto &WorkingMI = cloneIfNew(MI);
1665 WorkingMI.setDesc(get(Opc));
1666 WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
1667 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1671 // Convert to SHUFPD.
1672 assert(MI.getOpcode() == X86::MOVSDrr &&
1673 "Can only commute MOVSDrr without SSE4.1");
1675 auto &WorkingMI = cloneIfNew(MI);
1676 WorkingMI.setDesc(get(X86::SHUFPDrri));
1677 WorkingMI.addOperand(MachineOperand::CreateImm(0x02));
1678 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1681 case X86::SHUFPDrri: {
1682 // Commute to MOVSD.
1683 assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
1684 auto &WorkingMI = cloneIfNew(MI);
1685 WorkingMI.setDesc(get(X86::MOVSDrr));
1686 WorkingMI.RemoveOperand(3);
1687 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1690 case X86::PCLMULQDQrr:
1691 case X86::VPCLMULQDQrr:
1692 case X86::VPCLMULQDQYrr:
1693 case X86::VPCLMULQDQZrr:
1694 case X86::VPCLMULQDQZ128rr:
1695 case X86::VPCLMULQDQZ256rr: {
1696 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
1697 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
1698 unsigned Imm = MI.getOperand(3).getImm();
1699 unsigned Src1Hi = Imm & 0x01;
1700 unsigned Src2Hi = Imm & 0x10;
1701 auto &WorkingMI = cloneIfNew(MI);
1702 WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
1703 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1706 case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri:
1707 case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri:
1708 case X86::VPCMPBZrri: case X86::VPCMPUBZrri:
1709 case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri:
1710 case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri:
1711 case X86::VPCMPDZrri: case X86::VPCMPUDZrri:
1712 case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri:
1713 case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri:
1714 case X86::VPCMPQZrri: case X86::VPCMPUQZrri:
1715 case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri:
1716 case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri:
1717 case X86::VPCMPWZrri: case X86::VPCMPUWZrri:
1718 case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
1719 case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
1720 case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik:
1721 case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
1722 case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
1723 case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik:
1724 case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
1725 case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
1726 case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik:
1727 case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
1728 case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
1729 case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: {
1730 // Flip comparison mode immediate (if necessary).
1731 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
1732 Imm = X86::getSwappedVPCMPImm(Imm);
1733 auto &WorkingMI = cloneIfNew(MI);
1734 WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
1735 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1738 case X86::VPCOMBri: case X86::VPCOMUBri:
1739 case X86::VPCOMDri: case X86::VPCOMUDri:
1740 case X86::VPCOMQri: case X86::VPCOMUQri:
1741 case X86::VPCOMWri: case X86::VPCOMUWri: {
1742 // Flip comparison mode immediate (if necessary).
1743 unsigned Imm = MI.getOperand(3).getImm() & 0x7;
1744 Imm = X86::getSwappedVPCOMImm(Imm);
1745 auto &WorkingMI = cloneIfNew(MI);
1746 WorkingMI.getOperand(3).setImm(Imm);
1747 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1750 case X86::VCMPSDZrr:
1751 case X86::VCMPSSZrr:
1752 case X86::VCMPPDZrri:
1753 case X86::VCMPPSZrri:
1754 case X86::VCMPPDZ128rri:
1755 case X86::VCMPPSZ128rri:
1756 case X86::VCMPPDZ256rri:
1757 case X86::VCMPPSZ256rri:
1758 case X86::VCMPPDZrrik:
1759 case X86::VCMPPSZrrik:
1760 case X86::VCMPPDZ128rrik:
1761 case X86::VCMPPSZ128rrik:
1762 case X86::VCMPPDZ256rrik:
1763 case X86::VCMPPSZ256rrik: {
1765 MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f;
1766 Imm = X86::getSwappedVCMPImm(Imm);
1767 auto &WorkingMI = cloneIfNew(MI);
1768 WorkingMI.getOperand(MI.getNumExplicitOperands() - 1).setImm(Imm);
1769 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1772 case X86::VPERM2F128rr:
1773 case X86::VPERM2I128rr: {
1774 // Flip permute source immediate.
1775 // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
1776 // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
1777 int8_t Imm = MI.getOperand(3).getImm() & 0xFF;
1778 auto &WorkingMI = cloneIfNew(MI);
1779 WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
1780 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1783 case X86::MOVHLPSrr:
1784 case X86::UNPCKHPDrr:
1785 case X86::VMOVHLPSrr:
1786 case X86::VUNPCKHPDrr:
1787 case X86::VMOVHLPSZrr:
1788 case X86::VUNPCKHPDZ128rr: {
1789 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
1791 unsigned Opc = MI.getOpcode();
1793 default: llvm_unreachable("Unreachable!");
1794 case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break;
1795 case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break;
1796 case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break;
1797 case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break;
1798 case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break;
1799 case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break;
1801 auto &WorkingMI = cloneIfNew(MI);
1802 WorkingMI.setDesc(get(Opc));
1803 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1806 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: {
1807 auto &WorkingMI = cloneIfNew(MI);
1808 unsigned OpNo = MI.getDesc().getNumOperands() - 1;
1809 X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
1810 WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
1811 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1814 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
1815 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
1816 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
1817 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
1818 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
1819 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
1820 case X86::VPTERNLOGDZrrik:
1821 case X86::VPTERNLOGDZ128rrik:
1822 case X86::VPTERNLOGDZ256rrik:
1823 case X86::VPTERNLOGQZrrik:
1824 case X86::VPTERNLOGQZ128rrik:
1825 case X86::VPTERNLOGQZ256rrik:
1826 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
1827 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
1828 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
1829 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
1830 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
1831 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
1832 case X86::VPTERNLOGDZ128rmbi:
1833 case X86::VPTERNLOGDZ256rmbi:
1834 case X86::VPTERNLOGDZrmbi:
1835 case X86::VPTERNLOGQZ128rmbi:
1836 case X86::VPTERNLOGQZ256rmbi:
1837 case X86::VPTERNLOGQZrmbi:
1838 case X86::VPTERNLOGDZ128rmbikz:
1839 case X86::VPTERNLOGDZ256rmbikz:
1840 case X86::VPTERNLOGDZrmbikz:
1841 case X86::VPTERNLOGQZ128rmbikz:
1842 case X86::VPTERNLOGQZ256rmbikz:
1843 case X86::VPTERNLOGQZrmbikz: {
1844 auto &WorkingMI = cloneIfNew(MI);
1845 commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
1846 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1850 if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
1851 unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
1852 auto &WorkingMI = cloneIfNew(MI);
1853 WorkingMI.setDesc(get(Opc));
1854 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1858 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
1859 MI.getDesc().TSFlags);
1862 getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
1863 auto &WorkingMI = cloneIfNew(MI);
1864 WorkingMI.setDesc(get(Opc));
1865 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
1869 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
1875 X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
1876 unsigned &SrcOpIdx1,
1877 unsigned &SrcOpIdx2,
1878 bool IsIntrinsic) const {
1879 uint64_t TSFlags = MI.getDesc().TSFlags;
1881 unsigned FirstCommutableVecOp = 1;
1882 unsigned LastCommutableVecOp = 3;
1883 unsigned KMaskOp = -1U;
1884 if (X86II::isKMasked(TSFlags)) {
1885 // For k-zero-masked operations it is Ok to commute the first vector
1887 // For regular k-masked operations a conservative choice is done as the
1888 // elements of the first vector operand, for which the corresponding bit
1889 // in the k-mask operand is set to 0, are copied to the result of the
1891 // TODO/FIXME: The commute still may be legal if it is known that the
1892 // k-mask operand is set to either all ones or all zeroes.
1893 // It is also Ok to commute the 1st operand if all users of MI use only
1894 // the elements enabled by the k-mask operand. For example,
1895 // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
1897 // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
1898 // // Ok, to commute v1 in FMADD213PSZrk.
1900 // The k-mask operand has index = 2 for masked and zero-masked operations.
1903 // The operand with index = 1 is used as a source for those elements for
1904 // which the corresponding bit in the k-mask is set to 0.
1905 if (X86II::isKMergeMasked(TSFlags))
1906 FirstCommutableVecOp = 3;
1908 LastCommutableVecOp++;
1909 } else if (IsIntrinsic) {
1910 // Commuting the first operand of an intrinsic instruction isn't possible
1911 // unless we can prove that only the lowest element of the result is used.
1912 FirstCommutableVecOp = 2;
1915 if (isMem(MI, LastCommutableVecOp))
1916 LastCommutableVecOp--;
1918 // Only the first RegOpsNum operands are commutable.
1919 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
1920 // that the operand is not specified/fixed.
1921 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
1922 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
1923 SrcOpIdx1 == KMaskOp))
1925 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
1926 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
1927 SrcOpIdx2 == KMaskOp))
1930 // Look for two different register operands assumed to be commutable
1931 // regardless of the FMA opcode. The FMA opcode is adjusted later.
1932 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
1933 SrcOpIdx2 == CommuteAnyOperandIndex) {
1934 unsigned CommutableOpIdx2 = SrcOpIdx2;
1936 // At least one of operands to be commuted is not specified and
1937 // this method is free to choose appropriate commutable operands.
1938 if (SrcOpIdx1 == SrcOpIdx2)
1939 // Both of operands are not fixed. By default set one of commutable
1940 // operands to the last register operand of the instruction.
1941 CommutableOpIdx2 = LastCommutableVecOp;
1942 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
1943 // Only one of operands is not fixed.
1944 CommutableOpIdx2 = SrcOpIdx1;
1946 // CommutableOpIdx2 is well defined now. Let's choose another commutable
1947 // operand and assign its index to CommutableOpIdx1.
1948 Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
1950 unsigned CommutableOpIdx1;
1951 for (CommutableOpIdx1 = LastCommutableVecOp;
1952 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
1953 // Just ignore and skip the k-mask operand.
1954 if (CommutableOpIdx1 == KMaskOp)
1957 // The commuted operands must have different registers.
1958 // Otherwise, the commute transformation does not change anything and
1960 if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
1964 // No appropriate commutable operands were found.
1965 if (CommutableOpIdx1 < FirstCommutableVecOp)
1968 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
1969 // to return those values.
1970 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
1971 CommutableOpIdx1, CommutableOpIdx2))
1978 bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
1979 unsigned &SrcOpIdx1,
1980 unsigned &SrcOpIdx2) const {
1981 const MCInstrDesc &Desc = MI.getDesc();
1982 if (!Desc.isCommutable())
1985 switch (MI.getOpcode()) {
1992 case X86::VCMPPDrri:
1993 case X86::VCMPPSrri:
1994 case X86::VCMPPDYrri:
1995 case X86::VCMPPSYrri:
1996 case X86::VCMPSDZrr:
1997 case X86::VCMPSSZrr:
1998 case X86::VCMPPDZrri:
1999 case X86::VCMPPSZrri:
2000 case X86::VCMPPDZ128rri:
2001 case X86::VCMPPSZ128rri:
2002 case X86::VCMPPDZ256rri:
2003 case X86::VCMPPSZ256rri:
2004 case X86::VCMPPDZrrik:
2005 case X86::VCMPPSZrrik:
2006 case X86::VCMPPDZ128rrik:
2007 case X86::VCMPPSZ128rrik:
2008 case X86::VCMPPDZ256rrik:
2009 case X86::VCMPPSZ256rrik: {
2010 unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2012 // Float comparison can be safely commuted for
2013 // Ordered/Unordered/Equal/NotEqual tests
2014 unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2017 // EVEX versions can be commuted.
2018 if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2022 case 0x03: // UNORDERED
2023 case 0x04: // NOT EQUAL
2024 case 0x07: // ORDERED
2028 // The indices of the commutable operands are 1 and 2 (or 2 and 3
2030 // Assign them to the returned operand indices here.
2031 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2035 // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2036 // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2037 // AVX implies sse4.1.
2038 if (Subtarget.hasSSE41())
2039 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2041 case X86::SHUFPDrri:
2042 // We can commute this to MOVSD.
2043 if (MI.getOperand(3).getImm() == 0x02)
2044 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2046 case X86::MOVHLPSrr:
2047 case X86::UNPCKHPDrr:
2048 case X86::VMOVHLPSrr:
2049 case X86::VUNPCKHPDrr:
2050 case X86::VMOVHLPSZrr:
2051 case X86::VUNPCKHPDZ128rr:
2052 if (Subtarget.hasSSE2())
2053 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2055 case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
2056 case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
2057 case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
2058 case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
2059 case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
2060 case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
2061 case X86::VPTERNLOGDZrrik:
2062 case X86::VPTERNLOGDZ128rrik:
2063 case X86::VPTERNLOGDZ256rrik:
2064 case X86::VPTERNLOGQZrrik:
2065 case X86::VPTERNLOGQZ128rrik:
2066 case X86::VPTERNLOGQZ256rrik:
2067 case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
2068 case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
2069 case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
2070 case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
2071 case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
2072 case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
2073 case X86::VPTERNLOGDZ128rmbi:
2074 case X86::VPTERNLOGDZ256rmbi:
2075 case X86::VPTERNLOGDZrmbi:
2076 case X86::VPTERNLOGQZ128rmbi:
2077 case X86::VPTERNLOGQZ256rmbi:
2078 case X86::VPTERNLOGQZrmbi:
2079 case X86::VPTERNLOGDZ128rmbikz:
2080 case X86::VPTERNLOGDZ256rmbikz:
2081 case X86::VPTERNLOGDZrmbikz:
2082 case X86::VPTERNLOGQZ128rmbikz:
2083 case X86::VPTERNLOGQZ256rmbikz:
2084 case X86::VPTERNLOGQZrmbikz:
2085 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2086 case X86::VPDPWSSDZ128r:
2087 case X86::VPDPWSSDZ128rk:
2088 case X86::VPDPWSSDZ128rkz:
2089 case X86::VPDPWSSDZ256r:
2090 case X86::VPDPWSSDZ256rk:
2091 case X86::VPDPWSSDZ256rkz:
2092 case X86::VPDPWSSDZr:
2093 case X86::VPDPWSSDZrk:
2094 case X86::VPDPWSSDZrkz:
2095 case X86::VPDPWSSDSZ128r:
2096 case X86::VPDPWSSDSZ128rk:
2097 case X86::VPDPWSSDSZ128rkz:
2098 case X86::VPDPWSSDSZ256r:
2099 case X86::VPDPWSSDSZ256rk:
2100 case X86::VPDPWSSDSZ256rkz:
2101 case X86::VPDPWSSDSZr:
2102 case X86::VPDPWSSDSZrk:
2103 case X86::VPDPWSSDSZrkz:
2104 case X86::VPMADD52HUQZ128r:
2105 case X86::VPMADD52HUQZ128rk:
2106 case X86::VPMADD52HUQZ128rkz:
2107 case X86::VPMADD52HUQZ256r:
2108 case X86::VPMADD52HUQZ256rk:
2109 case X86::VPMADD52HUQZ256rkz:
2110 case X86::VPMADD52HUQZr:
2111 case X86::VPMADD52HUQZrk:
2112 case X86::VPMADD52HUQZrkz:
2113 case X86::VPMADD52LUQZ128r:
2114 case X86::VPMADD52LUQZ128rk:
2115 case X86::VPMADD52LUQZ128rkz:
2116 case X86::VPMADD52LUQZ256r:
2117 case X86::VPMADD52LUQZ256rk:
2118 case X86::VPMADD52LUQZ256rkz:
2119 case X86::VPMADD52LUQZr:
2120 case X86::VPMADD52LUQZrk:
2121 case X86::VPMADD52LUQZrkz: {
2122 unsigned CommutableOpIdx1 = 2;
2123 unsigned CommutableOpIdx2 = 3;
2124 if (X86II::isKMasked(Desc.TSFlags)) {
2125 // Skip the mask register.
2129 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2130 CommutableOpIdx1, CommutableOpIdx2))
2132 if (!MI.getOperand(SrcOpIdx1).isReg() ||
2133 !MI.getOperand(SrcOpIdx2).isReg())
2140 const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
2141 MI.getDesc().TSFlags);
2143 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
2144 FMA3Group->isIntrinsic());
2146 // Handled masked instructions since we need to skip over the mask input
2147 // and the preserved input.
2148 if (X86II::isKMasked(Desc.TSFlags)) {
2149 // First assume that the first input is the mask operand and skip past it.
2150 unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
2151 unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
2152 // Check if the first input is tied. If there isn't one then we only
2153 // need to skip the mask operand which we did above.
2154 if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
2155 MCOI::TIED_TO) != -1)) {
2156 // If this is zero masking instruction with a tied operand, we need to
2157 // move the first index back to the first input since this must
2158 // be a 3 input instruction and we want the first two non-mask inputs.
2159 // Otherwise this is a 2 input instruction with a preserved input and
2160 // mask, so we need to move the indices to skip one more input.
2161 if (X86II::isKMergeMasked(Desc.TSFlags)) {
2169 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
2170 CommutableOpIdx1, CommutableOpIdx2))
2173 if (!MI.getOperand(SrcOpIdx1).isReg() ||
2174 !MI.getOperand(SrcOpIdx2).isReg())
2180 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2185 X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
2186 switch (MI.getOpcode()) {
2187 default: return X86::COND_INVALID;
2189 return static_cast<X86::CondCode>(
2190 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2194 /// Return condition code of a SETCC opcode.
2195 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
2196 switch (MI.getOpcode()) {
2197 default: return X86::COND_INVALID;
2198 case X86::SETCCr: case X86::SETCCm:
2199 return static_cast<X86::CondCode>(
2200 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2204 /// Return condition code of a CMov opcode.
2205 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
2206 switch (MI.getOpcode()) {
2207 default: return X86::COND_INVALID;
2208 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
2209 case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
2210 return static_cast<X86::CondCode>(
2211 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
2215 /// Return the inverse of the specified condition,
2216 /// e.g. turning COND_E to COND_NE.
2217 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
2219 default: llvm_unreachable("Illegal condition code!");
2220 case X86::COND_E: return X86::COND_NE;
2221 case X86::COND_NE: return X86::COND_E;
2222 case X86::COND_L: return X86::COND_GE;
2223 case X86::COND_LE: return X86::COND_G;
2224 case X86::COND_G: return X86::COND_LE;
2225 case X86::COND_GE: return X86::COND_L;
2226 case X86::COND_B: return X86::COND_AE;
2227 case X86::COND_BE: return X86::COND_A;
2228 case X86::COND_A: return X86::COND_BE;
2229 case X86::COND_AE: return X86::COND_B;
2230 case X86::COND_S: return X86::COND_NS;
2231 case X86::COND_NS: return X86::COND_S;
2232 case X86::COND_P: return X86::COND_NP;
2233 case X86::COND_NP: return X86::COND_P;
2234 case X86::COND_O: return X86::COND_NO;
2235 case X86::COND_NO: return X86::COND_O;
2236 case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
2237 case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
2241 /// Assuming the flags are set by MI(a,b), return the condition code if we
2242 /// modify the instructions such that flags are set by MI(b,a).
2243 static X86::CondCode getSwappedCondition(X86::CondCode CC) {
2245 default: return X86::COND_INVALID;
2246 case X86::COND_E: return X86::COND_E;
2247 case X86::COND_NE: return X86::COND_NE;
2248 case X86::COND_L: return X86::COND_G;
2249 case X86::COND_LE: return X86::COND_GE;
2250 case X86::COND_G: return X86::COND_L;
2251 case X86::COND_GE: return X86::COND_LE;
2252 case X86::COND_B: return X86::COND_A;
2253 case X86::COND_BE: return X86::COND_AE;
2254 case X86::COND_A: return X86::COND_B;
2255 case X86::COND_AE: return X86::COND_BE;
2259 std::pair<X86::CondCode, bool>
2260 X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
2261 X86::CondCode CC = X86::COND_INVALID;
2262 bool NeedSwap = false;
2263 switch (Predicate) {
2265 // Floating-point Predicates
2266 case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
2267 case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
2268 case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
2269 case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
2270 case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
2271 case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
2272 case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
2273 case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
2274 case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
2275 case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
2276 case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
2277 case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
2278 case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
2279 case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
2281 // Integer Predicates
2282 case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
2283 case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
2284 case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
2285 case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
2286 case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
2287 case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
2288 case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
2289 case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
2290 case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
2291 case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
2294 return std::make_pair(CC, NeedSwap);
2297 /// Return a setcc opcode based on whether it has memory operand.
2298 unsigned X86::getSETOpc(bool HasMemoryOperand) {
2299 return HasMemoryOperand ? X86::SETCCr : X86::SETCCm;
2302 /// Return a cmov opcode for the given register size in bytes, and operand type.
2303 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
2305 default: llvm_unreachable("Illegal register size!");
2306 case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
2307 case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
2308 case 8: return HasMemoryOperand ? X86::CMOV64rm : X86::CMOV64rr;
2312 /// Get the VPCMP immediate for the given condition.
2313 unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
2315 default: llvm_unreachable("Unexpected SETCC condition");
2316 case ISD::SETNE: return 4;
2317 case ISD::SETEQ: return 0;
2319 case ISD::SETLT: return 1;
2321 case ISD::SETGT: return 6;
2323 case ISD::SETGE: return 5;
2325 case ISD::SETLE: return 2;
2329 /// Get the VPCMP immediate if the operands are swapped.
2330 unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
2332 default: llvm_unreachable("Unreachable!");
2333 case 0x01: Imm = 0x06; break; // LT -> NLE
2334 case 0x02: Imm = 0x05; break; // LE -> NLT
2335 case 0x05: Imm = 0x02; break; // NLT -> LE
2336 case 0x06: Imm = 0x01; break; // NLE -> LT
2347 /// Get the VPCOM immediate if the operands are swapped.
2348 unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
2350 default: llvm_unreachable("Unreachable!");
2351 case 0x00: Imm = 0x02; break; // LT -> GT
2352 case 0x01: Imm = 0x03; break; // LE -> GE
2353 case 0x02: Imm = 0x00; break; // GT -> LT
2354 case 0x03: Imm = 0x01; break; // GE -> LE
2365 /// Get the VCMP immediate if the operands are swapped.
2366 unsigned X86::getSwappedVCMPImm(unsigned Imm) {
2367 // Only need the lower 2 bits to distinquish.
2368 switch (Imm & 0x3) {
2369 default: llvm_unreachable("Unreachable!");
2370 case 0x00: case 0x03:
2371 // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
2373 case 0x01: case 0x02:
2374 // Need to toggle bits 3:0. Bit 4 stays the same.
2382 bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
2383 if (!MI.isTerminator()) return false;
2385 // Conditional branch is a special case.
2386 if (MI.isBranch() && !MI.isBarrier())
2388 if (!MI.isPredicable())
2390 return !isPredicated(MI);
2393 bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
2394 switch (MI.getOpcode()) {
2395 case X86::TCRETURNdi:
2396 case X86::TCRETURNri:
2397 case X86::TCRETURNmi:
2398 case X86::TCRETURNdi64:
2399 case X86::TCRETURNri64:
2400 case X86::TCRETURNmi64:
2407 bool X86InstrInfo::canMakeTailCallConditional(
2408 SmallVectorImpl<MachineOperand> &BranchCond,
2409 const MachineInstr &TailCall) const {
2410 if (TailCall.getOpcode() != X86::TCRETURNdi &&
2411 TailCall.getOpcode() != X86::TCRETURNdi64) {
2412 // Only direct calls can be done with a conditional branch.
2416 const MachineFunction *MF = TailCall.getParent()->getParent();
2417 if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
2418 // Conditional tail calls confuse the Win64 unwinder.
2422 assert(BranchCond.size() == 1);
2423 if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
2424 // Can't make a conditional tail call with this condition.
2428 const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
2429 if (X86FI->getTCReturnAddrDelta() != 0 ||
2430 TailCall.getOperand(1).getImm() != 0) {
2431 // A conditional tail call cannot do any stack adjustment.
2438 void X86InstrInfo::replaceBranchWithTailCall(
2439 MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
2440 const MachineInstr &TailCall) const {
2441 assert(canMakeTailCallConditional(BranchCond, TailCall));
2443 MachineBasicBlock::iterator I = MBB.end();
2444 while (I != MBB.begin()) {
2446 if (I->isDebugInstr())
2449 assert(0 && "Can't find the branch to replace!");
2451 X86::CondCode CC = X86::getCondFromBranch(*I);
2452 assert(BranchCond.size() == 1);
2453 if (CC != BranchCond[0].getImm())
2459 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
2460 : X86::TCRETURNdi64cc;
2462 auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
2463 MIB->addOperand(TailCall.getOperand(0)); // Destination.
2464 MIB.addImm(0); // Stack offset (not used).
2465 MIB->addOperand(BranchCond[0]); // Condition.
2466 MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
2468 // Add implicit uses and defs of all live regs potentially clobbered by the
2469 // call. This way they still appear live across the call.
2470 LivePhysRegs LiveRegs(getRegisterInfo());
2471 LiveRegs.addLiveOuts(MBB);
2472 SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
2473 LiveRegs.stepForward(*MIB, Clobbers);
2474 for (const auto &C : Clobbers) {
2475 MIB.addReg(C.first, RegState::Implicit);
2476 MIB.addReg(C.first, RegState::Implicit | RegState::Define);
2479 I->eraseFromParent();
2482 // Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
2483 // not be a fallthrough MBB now due to layout changes). Return nullptr if the
2484 // fallthrough MBB cannot be identified.
2485 static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
2486 MachineBasicBlock *TBB) {
2487 // Look for non-EHPad successors other than TBB. If we find exactly one, it
2488 // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
2489 // and fallthrough MBB. If we find more than one, we cannot identify the
2490 // fallthrough MBB and should return nullptr.
2491 MachineBasicBlock *FallthroughBB = nullptr;
2492 for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
2493 if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
2495 // Return a nullptr if we found more than one fallthrough successor.
2496 if (FallthroughBB && FallthroughBB != TBB)
2498 FallthroughBB = *SI;
2500 return FallthroughBB;
2503 bool X86InstrInfo::AnalyzeBranchImpl(
2504 MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
2505 SmallVectorImpl<MachineOperand> &Cond,
2506 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
2508 // Start from the bottom of the block and work up, examining the
2509 // terminator instructions.
2510 MachineBasicBlock::iterator I = MBB.end();
2511 MachineBasicBlock::iterator UnCondBrIter = MBB.end();
2512 while (I != MBB.begin()) {
2514 if (I->isDebugInstr())
2517 // Working from the bottom, when we see a non-terminator instruction, we're
2519 if (!isUnpredicatedTerminator(*I))
2522 // A terminator that isn't a branch can't easily be handled by this
2527 // Handle unconditional branches.
2528 if (I->getOpcode() == X86::JMP_1) {
2532 TBB = I->getOperand(0).getMBB();
2536 // If the block has any instructions after a JMP, delete them.
2537 while (std::next(I) != MBB.end())
2538 std::next(I)->eraseFromParent();
2543 // Delete the JMP if it's equivalent to a fall-through.
2544 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
2546 I->eraseFromParent();
2548 UnCondBrIter = MBB.end();
2552 // TBB is used to indicate the unconditional destination.
2553 TBB = I->getOperand(0).getMBB();
2557 // Handle conditional branches.
2558 X86::CondCode BranchCode = X86::getCondFromBranch(*I);
2559 if (BranchCode == X86::COND_INVALID)
2560 return true; // Can't handle indirect branch.
2562 // In practice we should never have an undef eflags operand, if we do
2563 // abort here as we are not prepared to preserve the flag.
2564 if (I->findRegisterUseOperand(X86::EFLAGS)->isUndef())
2567 // Working from the bottom, handle the first conditional branch.
2569 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
2570 if (AllowModify && UnCondBrIter != MBB.end() &&
2571 MBB.isLayoutSuccessor(TargetBB)) {
2572 // If we can modify the code and it ends in something like:
2580 // Then we can change this to:
2587 // Which is a bit more efficient.
2588 // We conditionally jump to the fall-through block.
2589 BranchCode = GetOppositeBranchCondition(BranchCode);
2590 MachineBasicBlock::iterator OldInst = I;
2592 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JCC_1))
2593 .addMBB(UnCondBrIter->getOperand(0).getMBB())
2594 .addImm(BranchCode);
2595 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
2598 OldInst->eraseFromParent();
2599 UnCondBrIter->eraseFromParent();
2601 // Restart the analysis.
2602 UnCondBrIter = MBB.end();
2608 TBB = I->getOperand(0).getMBB();
2609 Cond.push_back(MachineOperand::CreateImm(BranchCode));
2610 CondBranches.push_back(&*I);
2614 // Handle subsequent conditional branches. Only handle the case where all
2615 // conditional branches branch to the same destination and their condition
2616 // opcodes fit one of the special multi-branch idioms.
2617 assert(Cond.size() == 1);
2620 // If the conditions are the same, we can leave them alone.
2621 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
2622 auto NewTBB = I->getOperand(0).getMBB();
2623 if (OldBranchCode == BranchCode && TBB == NewTBB)
2626 // If they differ, see if they fit one of the known patterns. Theoretically,
2627 // we could handle more patterns here, but we shouldn't expect to see them
2628 // if instruction selection has done a reasonable job.
2629 if (TBB == NewTBB &&
2630 ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
2631 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
2632 BranchCode = X86::COND_NE_OR_P;
2633 } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
2634 (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
2635 if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
2638 // X86::COND_E_AND_NP usually has two different branch destinations.
2646 // Here this condition branches to B2 only if NP && E. It has another
2655 // Similarly it branches to B2 only if E && NP. That is why this condition
2656 // is named with COND_E_AND_NP.
2657 BranchCode = X86::COND_E_AND_NP;
2661 // Update the MachineOperand.
2662 Cond[0].setImm(BranchCode);
2663 CondBranches.push_back(&*I);
2669 bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
2670 MachineBasicBlock *&TBB,
2671 MachineBasicBlock *&FBB,
2672 SmallVectorImpl<MachineOperand> &Cond,
2673 bool AllowModify) const {
2674 SmallVector<MachineInstr *, 4> CondBranches;
2675 return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
2678 bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
2679 MachineBranchPredicate &MBP,
2680 bool AllowModify) const {
2681 using namespace std::placeholders;
2683 SmallVector<MachineOperand, 4> Cond;
2684 SmallVector<MachineInstr *, 4> CondBranches;
2685 if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
2689 if (Cond.size() != 1)
2692 assert(MBP.TrueDest && "expected!");
2695 MBP.FalseDest = MBB.getNextNode();
2697 const TargetRegisterInfo *TRI = &getRegisterInfo();
2699 MachineInstr *ConditionDef = nullptr;
2700 bool SingleUseCondition = true;
2702 for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
2703 if (I->modifiesRegister(X86::EFLAGS, TRI)) {
2708 if (I->readsRegister(X86::EFLAGS, TRI))
2709 SingleUseCondition = false;
2715 if (SingleUseCondition) {
2716 for (auto *Succ : MBB.successors())
2717 if (Succ->isLiveIn(X86::EFLAGS))
2718 SingleUseCondition = false;
2721 MBP.ConditionDef = ConditionDef;
2722 MBP.SingleUseCondition = SingleUseCondition;
2724 // Currently we only recognize the simple pattern:
2729 const unsigned TestOpcode =
2730 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
2732 if (ConditionDef->getOpcode() == TestOpcode &&
2733 ConditionDef->getNumOperands() == 3 &&
2734 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
2735 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
2736 MBP.LHS = ConditionDef->getOperand(0);
2737 MBP.RHS = MachineOperand::CreateImm(0);
2738 MBP.Predicate = Cond[0].getImm() == X86::COND_NE
2739 ? MachineBranchPredicate::PRED_NE
2740 : MachineBranchPredicate::PRED_EQ;
2747 unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
2748 int *BytesRemoved) const {
2749 assert(!BytesRemoved && "code size not handled");
2751 MachineBasicBlock::iterator I = MBB.end();
2754 while (I != MBB.begin()) {
2756 if (I->isDebugInstr())
2758 if (I->getOpcode() != X86::JMP_1 &&
2759 X86::getCondFromBranch(*I) == X86::COND_INVALID)
2761 // Remove the branch.
2762 I->eraseFromParent();
2770 unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
2771 MachineBasicBlock *TBB,
2772 MachineBasicBlock *FBB,
2773 ArrayRef<MachineOperand> Cond,
2775 int *BytesAdded) const {
2776 // Shouldn't be a fall through.
2777 assert(TBB && "insertBranch must not be told to insert a fallthrough");
2778 assert((Cond.size() == 1 || Cond.size() == 0) &&
2779 "X86 branch conditions have one component!");
2780 assert(!BytesAdded && "code size not handled");
2783 // Unconditional branch?
2784 assert(!FBB && "Unconditional branch with multiple successors!");
2785 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
2789 // If FBB is null, it is implied to be a fall-through block.
2790 bool FallThru = FBB == nullptr;
2792 // Conditional branch.
2794 X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
2796 case X86::COND_NE_OR_P:
2797 // Synthesize NE_OR_P with two branches.
2798 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
2800 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
2803 case X86::COND_E_AND_NP:
2804 // Use the next block of MBB as FBB if it is null.
2805 if (FBB == nullptr) {
2806 FBB = getFallThroughMBB(&MBB, TBB);
2807 assert(FBB && "MBB cannot be the last block in function when the false "
2808 "body is a fall-through.");
2810 // Synthesize COND_E_AND_NP with two branches.
2811 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
2813 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
2817 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
2822 // Two-way Conditional branch. Insert the second branch.
2823 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
2830 canInsertSelect(const MachineBasicBlock &MBB,
2831 ArrayRef<MachineOperand> Cond,
2832 unsigned TrueReg, unsigned FalseReg,
2833 int &CondCycles, int &TrueCycles, int &FalseCycles) const {
2834 // Not all subtargets have cmov instructions.
2835 if (!Subtarget.hasCMov())
2837 if (Cond.size() != 1)
2839 // We cannot do the composite conditions, at least not in SSA form.
2840 if ((X86::CondCode)Cond[0].getImm() > X86::LAST_VALID_COND)
2843 // Check register classes.
2844 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2845 const TargetRegisterClass *RC =
2846 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
2850 // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
2851 if (X86::GR16RegClass.hasSubClassEq(RC) ||
2852 X86::GR32RegClass.hasSubClassEq(RC) ||
2853 X86::GR64RegClass.hasSubClassEq(RC)) {
2854 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
2855 // Bridge. Probably Ivy Bridge as well.
2862 // Can't do vectors.
2866 void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
2867 MachineBasicBlock::iterator I,
2868 const DebugLoc &DL, unsigned DstReg,
2869 ArrayRef<MachineOperand> Cond, unsigned TrueReg,
2870 unsigned FalseReg) const {
2871 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2872 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
2873 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
2874 assert(Cond.size() == 1 && "Invalid Cond array");
2875 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
2876 false /*HasMemoryOperand*/);
2877 BuildMI(MBB, I, DL, get(Opc), DstReg)
2880 .addImm(Cond[0].getImm());
2883 /// Test if the given register is a physical h register.
2884 static bool isHReg(unsigned Reg) {
2885 return X86::GR8_ABCD_HRegClass.contains(Reg);
2888 // Try and copy between VR128/VR64 and GR64 registers.
2889 static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
2890 const X86Subtarget &Subtarget) {
2891 bool HasAVX = Subtarget.hasAVX();
2892 bool HasAVX512 = Subtarget.hasAVX512();
2894 // SrcReg(MaskReg) -> DestReg(GR64)
2895 // SrcReg(MaskReg) -> DestReg(GR32)
2897 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
2898 if (X86::VK16RegClass.contains(SrcReg)) {
2899 if (X86::GR64RegClass.contains(DestReg)) {
2900 assert(Subtarget.hasBWI());
2901 return X86::KMOVQrk;
2903 if (X86::GR32RegClass.contains(DestReg))
2904 return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
2907 // SrcReg(GR64) -> DestReg(MaskReg)
2908 // SrcReg(GR32) -> DestReg(MaskReg)
2910 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
2911 if (X86::VK16RegClass.contains(DestReg)) {
2912 if (X86::GR64RegClass.contains(SrcReg)) {
2913 assert(Subtarget.hasBWI());
2914 return X86::KMOVQkr;
2916 if (X86::GR32RegClass.contains(SrcReg))
2917 return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
2921 // SrcReg(VR128) -> DestReg(GR64)
2922 // SrcReg(VR64) -> DestReg(GR64)
2923 // SrcReg(GR64) -> DestReg(VR128)
2924 // SrcReg(GR64) -> DestReg(VR64)
2926 if (X86::GR64RegClass.contains(DestReg)) {
2927 if (X86::VR128XRegClass.contains(SrcReg))
2928 // Copy from a VR128 register to a GR64 register.
2929 return HasAVX512 ? X86::VMOVPQIto64Zrr :
2930 HasAVX ? X86::VMOVPQIto64rr :
2932 if (X86::VR64RegClass.contains(SrcReg))
2933 // Copy from a VR64 register to a GR64 register.
2934 return X86::MMX_MOVD64from64rr;
2935 } else if (X86::GR64RegClass.contains(SrcReg)) {
2936 // Copy from a GR64 register to a VR128 register.
2937 if (X86::VR128XRegClass.contains(DestReg))
2938 return HasAVX512 ? X86::VMOV64toPQIZrr :
2939 HasAVX ? X86::VMOV64toPQIrr :
2941 // Copy from a GR64 register to a VR64 register.
2942 if (X86::VR64RegClass.contains(DestReg))
2943 return X86::MMX_MOVD64to64rr;
2946 // SrcReg(VR128) -> DestReg(GR32)
2947 // SrcReg(GR32) -> DestReg(VR128)
2949 if (X86::GR32RegClass.contains(DestReg) &&
2950 X86::VR128XRegClass.contains(SrcReg))
2951 // Copy from a VR128 register to a GR32 register.
2952 return HasAVX512 ? X86::VMOVPDI2DIZrr :
2953 HasAVX ? X86::VMOVPDI2DIrr :
2956 if (X86::VR128XRegClass.contains(DestReg) &&
2957 X86::GR32RegClass.contains(SrcReg))
2958 // Copy from a VR128 register to a VR128 register.
2959 return HasAVX512 ? X86::VMOVDI2PDIZrr :
2960 HasAVX ? X86::VMOVDI2PDIrr :
2965 void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
2966 MachineBasicBlock::iterator MI,
2967 const DebugLoc &DL, MCRegister DestReg,
2968 MCRegister SrcReg, bool KillSrc) const {
2969 // First deal with the normal symmetric copies.
2970 bool HasAVX = Subtarget.hasAVX();
2971 bool HasVLX = Subtarget.hasVLX();
2973 if (X86::GR64RegClass.contains(DestReg, SrcReg))
2975 else if (X86::GR32RegClass.contains(DestReg, SrcReg))
2977 else if (X86::GR16RegClass.contains(DestReg, SrcReg))
2979 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
2980 // Copying to or from a physical H register on x86-64 requires a NOREX
2981 // move. Otherwise use a normal move.
2982 if ((isHReg(DestReg) || isHReg(SrcReg)) &&
2983 Subtarget.is64Bit()) {
2984 Opc = X86::MOV8rr_NOREX;
2985 // Both operands must be encodable without an REX prefix.
2986 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
2987 "8-bit H register can not be copied outside GR8_NOREX");
2991 else if (X86::VR64RegClass.contains(DestReg, SrcReg))
2992 Opc = X86::MMX_MOVQ64rr;
2993 else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
2995 Opc = X86::VMOVAPSZ128rr;
2996 else if (X86::VR128RegClass.contains(DestReg, SrcReg))
2997 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
2999 // If this an extended register and we don't have VLX we need to use a
3001 Opc = X86::VMOVAPSZrr;
3002 const TargetRegisterInfo *TRI = &getRegisterInfo();
3003 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
3004 &X86::VR512RegClass);
3005 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
3006 &X86::VR512RegClass);
3008 } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
3010 Opc = X86::VMOVAPSZ256rr;
3011 else if (X86::VR256RegClass.contains(DestReg, SrcReg))
3012 Opc = X86::VMOVAPSYrr;
3014 // If this an extended register and we don't have VLX we need to use a
3016 Opc = X86::VMOVAPSZrr;
3017 const TargetRegisterInfo *TRI = &getRegisterInfo();
3018 DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
3019 &X86::VR512RegClass);
3020 SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
3021 &X86::VR512RegClass);
3023 } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
3024 Opc = X86::VMOVAPSZrr;
3025 // All KMASK RegClasses hold the same k registers, can be tested against anyone.
3026 else if (X86::VK16RegClass.contains(DestReg, SrcReg))
3027 Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
3029 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
3032 BuildMI(MBB, MI, DL, get(Opc), DestReg)
3033 .addReg(SrcReg, getKillRegState(KillSrc));
3037 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
3038 // FIXME: We use a fatal error here because historically LLVM has tried
3039 // lower some of these physreg copies and we want to ensure we get
3040 // reasonable bug reports if someone encounters a case no other testing
3041 // found. This path should be removed after the LLVM 7 release.
3042 report_fatal_error("Unable to copy EFLAGS physical register!");
3045 LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
3046 << RI.getName(DestReg) << '\n');
3047 report_fatal_error("Cannot emit physreg copy instruction");
3050 Optional<DestSourcePair>
3051 X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
3053 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
3057 static unsigned getLoadStoreRegOpcode(unsigned Reg,
3058 const TargetRegisterClass *RC,
3059 bool isStackAligned,
3060 const X86Subtarget &STI,
3062 bool HasAVX = STI.hasAVX();
3063 bool HasAVX512 = STI.hasAVX512();
3064 bool HasVLX = STI.hasVLX();
3066 switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
3068 llvm_unreachable("Unknown spill size");
3070 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
3072 // Copying to or from a physical H register on x86-64 requires a NOREX
3073 // move. Otherwise use a normal move.
3074 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
3075 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
3076 return load ? X86::MOV8rm : X86::MOV8mr;
3078 if (X86::VK16RegClass.hasSubClassEq(RC))
3079 return load ? X86::KMOVWkm : X86::KMOVWmk;
3080 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
3081 return load ? X86::MOV16rm : X86::MOV16mr;
3083 if (X86::GR32RegClass.hasSubClassEq(RC))
3084 return load ? X86::MOV32rm : X86::MOV32mr;
3085 if (X86::FR32XRegClass.hasSubClassEq(RC))
3087 (HasAVX512 ? X86::VMOVSSZrm_alt :
3088 HasAVX ? X86::VMOVSSrm_alt :
3090 (HasAVX512 ? X86::VMOVSSZmr :
3091 HasAVX ? X86::VMOVSSmr :
3093 if (X86::RFP32RegClass.hasSubClassEq(RC))
3094 return load ? X86::LD_Fp32m : X86::ST_Fp32m;
3095 if (X86::VK32RegClass.hasSubClassEq(RC)) {
3096 assert(STI.hasBWI() && "KMOVD requires BWI");
3097 return load ? X86::KMOVDkm : X86::KMOVDmk;
3099 // All of these mask pair classes have the same spill size, the same kind
3100 // of kmov instructions can be used with all of them.
3101 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
3102 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
3103 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
3104 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
3105 X86::VK16PAIRRegClass.hasSubClassEq(RC))
3106 return load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
3107 llvm_unreachable("Unknown 4-byte regclass");
3109 if (X86::GR64RegClass.hasSubClassEq(RC))
3110 return load ? X86::MOV64rm : X86::MOV64mr;
3111 if (X86::FR64XRegClass.hasSubClassEq(RC))
3113 (HasAVX512 ? X86::VMOVSDZrm_alt :
3114 HasAVX ? X86::VMOVSDrm_alt :
3116 (HasAVX512 ? X86::VMOVSDZmr :
3117 HasAVX ? X86::VMOVSDmr :
3119 if (X86::VR64RegClass.hasSubClassEq(RC))
3120 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
3121 if (X86::RFP64RegClass.hasSubClassEq(RC))
3122 return load ? X86::LD_Fp64m : X86::ST_Fp64m;
3123 if (X86::VK64RegClass.hasSubClassEq(RC)) {
3124 assert(STI.hasBWI() && "KMOVQ requires BWI");
3125 return load ? X86::KMOVQkm : X86::KMOVQmk;
3127 llvm_unreachable("Unknown 8-byte regclass");
3129 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
3130 return load ? X86::LD_Fp80m : X86::ST_FpP80m;
3132 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
3133 // If stack is realigned we can use aligned stores.
3136 (HasVLX ? X86::VMOVAPSZ128rm :
3137 HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
3138 HasAVX ? X86::VMOVAPSrm :
3140 (HasVLX ? X86::VMOVAPSZ128mr :
3141 HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
3142 HasAVX ? X86::VMOVAPSmr :
3146 (HasVLX ? X86::VMOVUPSZ128rm :
3147 HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
3148 HasAVX ? X86::VMOVUPSrm :
3150 (HasVLX ? X86::VMOVUPSZ128mr :
3151 HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
3152 HasAVX ? X86::VMOVUPSmr :
3155 if (X86::BNDRRegClass.hasSubClassEq(RC)) {
3157 return load ? X86::BNDMOV64rm : X86::BNDMOV64mr;
3159 return load ? X86::BNDMOV32rm : X86::BNDMOV32mr;
3161 llvm_unreachable("Unknown 16-byte regclass");
3164 assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
3165 // If stack is realigned we can use aligned stores.
3168 (HasVLX ? X86::VMOVAPSZ256rm :
3169 HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
3171 (HasVLX ? X86::VMOVAPSZ256mr :
3172 HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
3176 (HasVLX ? X86::VMOVUPSZ256rm :
3177 HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
3179 (HasVLX ? X86::VMOVUPSZ256mr :
3180 HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
3183 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
3184 assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
3186 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
3188 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
3192 bool X86InstrInfo::getMemOperandWithOffset(
3193 const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset,
3194 const TargetRegisterInfo *TRI) const {
3195 const MCInstrDesc &Desc = MemOp.getDesc();
3196 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3197 if (MemRefBegin < 0)
3200 MemRefBegin += X86II::getOperandBias(Desc);
3202 BaseOp = &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
3203 if (!BaseOp->isReg()) // Can be an MO_FrameIndex
3206 if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
3209 if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
3213 const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
3215 // Displacement can be symbolic
3216 if (!DispMO.isImm())
3219 Offset = DispMO.getImm();
3221 if (!BaseOp->isReg())
3227 static unsigned getStoreRegOpcode(unsigned SrcReg,
3228 const TargetRegisterClass *RC,
3229 bool isStackAligned,
3230 const X86Subtarget &STI) {
3231 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false);
3235 static unsigned getLoadRegOpcode(unsigned DestReg,
3236 const TargetRegisterClass *RC,
3237 bool isStackAligned,
3238 const X86Subtarget &STI) {
3239 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true);
3242 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
3243 MachineBasicBlock::iterator MI,
3244 unsigned SrcReg, bool isKill, int FrameIdx,
3245 const TargetRegisterClass *RC,
3246 const TargetRegisterInfo *TRI) const {
3247 const MachineFunction &MF = *MBB.getParent();
3248 assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
3249 "Stack slot too small for store");
3250 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3252 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
3253 RI.canRealignStack(MF);
3254 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
3255 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
3256 .addReg(SrcReg, getKillRegState(isKill));
3259 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
3260 MachineBasicBlock::iterator MI,
3261 unsigned DestReg, int FrameIdx,
3262 const TargetRegisterClass *RC,
3263 const TargetRegisterInfo *TRI) const {
3264 const MachineFunction &MF = *MBB.getParent();
3265 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
3267 (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
3268 RI.canRealignStack(MF);
3269 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
3270 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx);
3273 bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
3274 unsigned &SrcReg2, int &CmpMask,
3275 int &CmpValue) const {
3276 switch (MI.getOpcode()) {
3278 case X86::CMP64ri32:
3285 SrcReg = MI.getOperand(0).getReg();
3287 if (MI.getOperand(1).isImm()) {
3289 CmpValue = MI.getOperand(1).getImm();
3291 CmpMask = CmpValue = 0;
3294 // A SUB can be used to perform comparison.
3299 SrcReg = MI.getOperand(1).getReg();
3308 SrcReg = MI.getOperand(1).getReg();
3309 SrcReg2 = MI.getOperand(2).getReg();
3313 case X86::SUB64ri32:
3320 SrcReg = MI.getOperand(1).getReg();
3322 if (MI.getOperand(2).isImm()) {
3324 CmpValue = MI.getOperand(2).getImm();
3326 CmpMask = CmpValue = 0;
3333 SrcReg = MI.getOperand(0).getReg();
3334 SrcReg2 = MI.getOperand(1).getReg();
3342 SrcReg = MI.getOperand(0).getReg();
3343 if (MI.getOperand(1).getReg() != SrcReg)
3345 // Compare against zero.
3354 /// Check whether the first instruction, whose only
3355 /// purpose is to update flags, can be made redundant.
3356 /// CMPrr can be made redundant by SUBrr if the operands are the same.
3357 /// This function can be extended later on.
3358 /// SrcReg, SrcRegs: register operands for FlagI.
3359 /// ImmValue: immediate for FlagI if it takes an immediate.
3360 inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
3361 unsigned SrcReg, unsigned SrcReg2,
3362 int ImmMask, int ImmValue,
3363 const MachineInstr &OI) {
3364 if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
3365 (FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
3366 (FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
3367 (FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
3368 ((OI.getOperand(1).getReg() == SrcReg &&
3369 OI.getOperand(2).getReg() == SrcReg2) ||
3370 (OI.getOperand(1).getReg() == SrcReg2 &&
3371 OI.getOperand(2).getReg() == SrcReg)))
3375 ((FlagI.getOpcode() == X86::CMP64ri32 &&
3376 OI.getOpcode() == X86::SUB64ri32) ||
3377 (FlagI.getOpcode() == X86::CMP64ri8 &&
3378 OI.getOpcode() == X86::SUB64ri8) ||
3379 (FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
3380 (FlagI.getOpcode() == X86::CMP32ri8 &&
3381 OI.getOpcode() == X86::SUB32ri8) ||
3382 (FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
3383 (FlagI.getOpcode() == X86::CMP16ri8 &&
3384 OI.getOpcode() == X86::SUB16ri8) ||
3385 (FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
3386 OI.getOperand(1).getReg() == SrcReg &&
3387 OI.getOperand(2).getImm() == ImmValue)
3392 /// Check whether the definition can be converted
3393 /// to remove a comparison against zero.
3394 inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag) {
3397 switch (MI.getOpcode()) {
3398 default: return false;
3400 // The shift instructions only modify ZF if their shift count is non-zero.
3401 // N.B.: The processor truncates the shift count depending on the encoding.
3402 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri:
3403 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri:
3404 return getTruncatedShiftCount(MI, 2) != 0;
3406 // Some left shift instructions can be turned into LEA instructions but only
3407 // if their flags aren't used. Avoid transforming such instructions.
3408 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{
3409 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
3410 if (isTruncatedShiftCountForLEA(ShAmt)) return false;
3414 case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
3415 case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
3416 return getTruncatedShiftCount(MI, 3) != 0;
3418 case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
3419 case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
3420 case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
3421 case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
3422 case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
3423 case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
3424 case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
3425 case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
3426 case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
3427 case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
3428 case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
3429 case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
3430 case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
3431 case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
3432 case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
3433 case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
3434 case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
3435 case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
3436 case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
3437 case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
3438 case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
3439 case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
3440 case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
3441 case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
3442 case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
3443 case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
3444 case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
3445 case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
3446 case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8:
3447 case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr:
3448 case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm:
3449 case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm:
3450 case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
3451 case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8:
3452 case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr:
3453 case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm:
3454 case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm:
3455 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
3456 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1:
3457 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1:
3458 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1:
3459 case X86::ANDN32rr: case X86::ANDN32rm:
3460 case X86::ANDN64rr: case X86::ANDN64rm:
3461 case X86::BLSI32rr: case X86::BLSI32rm:
3462 case X86::BLSI64rr: case X86::BLSI64rm:
3463 case X86::BLSMSK32rr:case X86::BLSMSK32rm:
3464 case X86::BLSMSK64rr:case X86::BLSMSK64rm:
3465 case X86::BLSR32rr: case X86::BLSR32rm:
3466 case X86::BLSR64rr: case X86::BLSR64rm:
3467 case X86::BZHI32rr: case X86::BZHI32rm:
3468 case X86::BZHI64rr: case X86::BZHI64rm:
3469 case X86::LZCNT16rr: case X86::LZCNT16rm:
3470 case X86::LZCNT32rr: case X86::LZCNT32rm:
3471 case X86::LZCNT64rr: case X86::LZCNT64rm:
3472 case X86::POPCNT16rr:case X86::POPCNT16rm:
3473 case X86::POPCNT32rr:case X86::POPCNT32rm:
3474 case X86::POPCNT64rr:case X86::POPCNT64rm:
3475 case X86::TZCNT16rr: case X86::TZCNT16rm:
3476 case X86::TZCNT32rr: case X86::TZCNT32rm:
3477 case X86::TZCNT64rr: case X86::TZCNT64rm:
3478 case X86::BLCFILL32rr: case X86::BLCFILL32rm:
3479 case X86::BLCFILL64rr: case X86::BLCFILL64rm:
3480 case X86::BLCI32rr: case X86::BLCI32rm:
3481 case X86::BLCI64rr: case X86::BLCI64rm:
3482 case X86::BLCIC32rr: case X86::BLCIC32rm:
3483 case X86::BLCIC64rr: case X86::BLCIC64rm:
3484 case X86::BLCMSK32rr: case X86::BLCMSK32rm:
3485 case X86::BLCMSK64rr: case X86::BLCMSK64rm:
3486 case X86::BLCS32rr: case X86::BLCS32rm:
3487 case X86::BLCS64rr: case X86::BLCS64rm:
3488 case X86::BLSFILL32rr: case X86::BLSFILL32rm:
3489 case X86::BLSFILL64rr: case X86::BLSFILL64rm:
3490 case X86::BLSIC32rr: case X86::BLSIC32rm:
3491 case X86::BLSIC64rr: case X86::BLSIC64rm:
3492 case X86::T1MSKC32rr: case X86::T1MSKC32rm:
3493 case X86::T1MSKC64rr: case X86::T1MSKC64rm:
3494 case X86::TZMSK32rr: case X86::TZMSK32rm:
3495 case X86::TZMSK64rr: case X86::TZMSK64rm:
3497 case X86::BEXTR32rr: case X86::BEXTR64rr:
3498 case X86::BEXTR32rm: case X86::BEXTR64rm:
3499 case X86::BEXTRI32ri: case X86::BEXTRI32mi:
3500 case X86::BEXTRI64ri: case X86::BEXTRI64mi:
3501 // BEXTR doesn't update the sign flag so we can't use it.
3507 /// Check whether the use can be converted to remove a comparison against zero.
3508 static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
3509 switch (MI.getOpcode()) {
3510 default: return X86::COND_INVALID;
3515 return X86::COND_AE;
3516 case X86::LZCNT16rr:
3517 case X86::LZCNT32rr:
3518 case X86::LZCNT64rr:
3520 case X86::POPCNT16rr:
3521 case X86::POPCNT32rr:
3522 case X86::POPCNT64rr:
3524 case X86::TZCNT16rr:
3525 case X86::TZCNT32rr:
3526 case X86::TZCNT64rr:
3537 return X86::COND_AE;
3540 case X86::BLSMSK32rr:
3541 case X86::BLSMSK64rr:
3543 // TODO: TBM instructions.
3547 /// Check if there exists an earlier instruction that
3548 /// operates on the same source operands and sets flags in the same way as
3549 /// Compare; remove Compare if possible.
3550 bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
3551 unsigned SrcReg2, int CmpMask,
3553 const MachineRegisterInfo *MRI) const {
3554 // Check whether we can replace SUB with CMP.
3555 switch (CmpInstr.getOpcode()) {
3557 case X86::SUB64ri32:
3572 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
3574 // There is no use of the destination register, we can replace SUB with CMP.
3575 unsigned NewOpcode = 0;
3576 switch (CmpInstr.getOpcode()) {
3577 default: llvm_unreachable("Unreachable!");
3578 case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
3579 case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
3580 case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
3581 case X86::SUB8rm: NewOpcode = X86::CMP8rm; break;
3582 case X86::SUB64rr: NewOpcode = X86::CMP64rr; break;
3583 case X86::SUB32rr: NewOpcode = X86::CMP32rr; break;
3584 case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
3585 case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
3586 case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
3587 case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
3588 case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
3589 case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
3590 case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
3591 case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
3592 case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
3594 CmpInstr.setDesc(get(NewOpcode));
3595 CmpInstr.RemoveOperand(0);
3596 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
3597 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
3598 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
3603 // Get the unique definition of SrcReg.
3604 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
3605 if (!MI) return false;
3607 // CmpInstr is the first instruction of the BB.
3608 MachineBasicBlock::iterator I = CmpInstr, Def = MI;
3610 // If we are comparing against zero, check whether we can use MI to update
3611 // EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
3612 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
3613 if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
3616 // If we have a use of the source register between the def and our compare
3617 // instruction we can eliminate the compare iff the use sets EFLAGS in the
3619 bool ShouldUpdateCC = false;
3620 bool NoSignFlag = false;
3621 X86::CondCode NewCC = X86::COND_INVALID;
3622 if (IsCmpZero && !isDefConvertible(*MI, NoSignFlag)) {
3623 // Scan forward from the use until we hit the use we're looking for or the
3624 // compare instruction.
3625 for (MachineBasicBlock::iterator J = MI;; ++J) {
3626 // Do we have a convertible instruction?
3627 NewCC = isUseDefConvertible(*J);
3628 if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
3629 J->getOperand(1).getReg() == SrcReg) {
3630 assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
3631 ShouldUpdateCC = true; // Update CC later on.
3632 // This is not a def of SrcReg, but still a def of EFLAGS. Keep going
3633 // with the new def.
3644 // We are searching for an earlier instruction that can make CmpInstr
3645 // redundant and that instruction will be saved in Sub.
3646 MachineInstr *Sub = nullptr;
3647 const TargetRegisterInfo *TRI = &getRegisterInfo();
3649 // We iterate backward, starting from the instruction before CmpInstr and
3650 // stop when reaching the definition of a source register or done with the BB.
3651 // RI points to the instruction before CmpInstr.
3652 // If the definition is in this basic block, RE points to the definition;
3653 // otherwise, RE is the rend of the basic block.
3654 MachineBasicBlock::reverse_iterator
3655 RI = ++I.getReverse(),
3656 RE = CmpInstr.getParent() == MI->getParent()
3657 ? Def.getReverse() /* points to MI */
3658 : CmpInstr.getParent()->rend();
3659 MachineInstr *Movr0Inst = nullptr;
3660 for (; RI != RE; ++RI) {
3661 MachineInstr &Instr = *RI;
3662 // Check whether CmpInstr can be made redundant by the current instruction.
3663 if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask,
3669 if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
3670 Instr.readsRegister(X86::EFLAGS, TRI)) {
3671 // This instruction modifies or uses EFLAGS.
3673 // MOV32r0 etc. are implemented with xor which clobbers condition code.
3674 // They are safe to move up, if the definition to EFLAGS is dead and
3675 // earlier instructions do not read or write EFLAGS.
3676 if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
3677 Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
3682 // We can't remove CmpInstr.
3687 // Return false if no candidates exist.
3688 if (!IsCmpZero && !Sub)
3692 (SrcReg2 != 0 && Sub && Sub->getOperand(1).getReg() == SrcReg2 &&
3693 Sub->getOperand(2).getReg() == SrcReg);
3695 // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
3696 // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
3697 // If we are done with the basic block, we need to check whether EFLAGS is
3699 bool IsSafe = false;
3700 SmallVector<std::pair<MachineInstr*, X86::CondCode>, 4> OpsToUpdate;
3701 MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
3702 for (++I; I != E; ++I) {
3703 const MachineInstr &Instr = *I;
3704 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
3705 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
3706 // We should check the usage if this instruction uses and updates EFLAGS.
3707 if (!UseEFLAGS && ModifyEFLAGS) {
3708 // It is safe to remove CmpInstr if EFLAGS is updated again.
3712 if (!UseEFLAGS && !ModifyEFLAGS)
3715 // EFLAGS is used by this instruction.
3716 X86::CondCode OldCC = X86::COND_INVALID;
3717 if (IsCmpZero || IsSwapped) {
3718 // We decode the condition code from opcode.
3719 if (Instr.isBranch())
3720 OldCC = X86::getCondFromBranch(Instr);
3722 OldCC = X86::getCondFromSETCC(Instr);
3723 if (OldCC == X86::COND_INVALID)
3724 OldCC = X86::getCondFromCMov(Instr);
3726 if (OldCC == X86::COND_INVALID) return false;
3728 X86::CondCode ReplacementCC = X86::COND_INVALID;
3732 case X86::COND_A: case X86::COND_AE:
3733 case X86::COND_B: case X86::COND_BE:
3734 case X86::COND_G: case X86::COND_GE:
3735 case X86::COND_L: case X86::COND_LE:
3736 case X86::COND_O: case X86::COND_NO:
3737 // CF and OF are used, we can't perform this optimization.
3739 case X86::COND_S: case X86::COND_NS:
3740 // If SF is used, but the instruction doesn't update the SF, then we
3741 // can't do the optimization.
3747 // If we're updating the condition code check if we have to reverse the
3754 ReplacementCC = NewCC;
3757 ReplacementCC = GetOppositeBranchCondition(NewCC);
3760 } else if (IsSwapped) {
3761 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
3762 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
3763 // We swap the condition code and synthesize the new opcode.
3764 ReplacementCC = getSwappedCondition(OldCC);
3765 if (ReplacementCC == X86::COND_INVALID) return false;
3768 if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
3769 // Push the MachineInstr to OpsToUpdate.
3770 // If it is safe to remove CmpInstr, the condition code of these
3771 // instructions will be modified.
3772 OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC));
3774 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
3775 // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
3781 // If EFLAGS is not killed nor re-defined, we should check whether it is
3782 // live-out. If it is live-out, do not optimize.
3783 if ((IsCmpZero || IsSwapped) && !IsSafe) {
3784 MachineBasicBlock *MBB = CmpInstr.getParent();
3785 for (MachineBasicBlock *Successor : MBB->successors())
3786 if (Successor->isLiveIn(X86::EFLAGS))
3790 // The instruction to be updated is either Sub or MI.
3791 Sub = IsCmpZero ? MI : Sub;
3792 // Move Movr0Inst to the appropriate place before Sub.
3794 // Look backwards until we find a def that doesn't use the current EFLAGS.
3796 MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(),
3797 InsertE = Sub->getParent()->rend();
3798 for (; InsertI != InsertE; ++InsertI) {
3799 MachineInstr *Instr = &*InsertI;
3800 if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
3801 Instr->modifiesRegister(X86::EFLAGS, TRI)) {
3802 Sub->getParent()->remove(Movr0Inst);
3803 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
3808 if (InsertI == InsertE)
3812 // Make sure Sub instruction defines EFLAGS and mark the def live.
3813 MachineOperand *FlagDef = Sub->findRegisterDefOperand(X86::EFLAGS);
3814 assert(FlagDef && "Unable to locate a def EFLAGS operand");
3815 FlagDef->setIsDead(false);
3817 CmpInstr.eraseFromParent();
3819 // Modify the condition code of instructions in OpsToUpdate.
3820 for (auto &Op : OpsToUpdate) {
3821 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
3827 /// Try to remove the load by folding it to a register
3828 /// operand at the use. We fold the load instructions if load defines a virtual
3829 /// register, the virtual register is used once in the same BB, and the
3830 /// instructions in-between do not load or store, and have no side effects.
3831 MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
3832 const MachineRegisterInfo *MRI,
3833 unsigned &FoldAsLoadDefReg,
3834 MachineInstr *&DefMI) const {
3835 // Check whether we can move DefMI here.
3836 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
3838 bool SawStore = false;
3839 if (!DefMI->isSafeToMove(nullptr, SawStore))
3842 // Collect information about virtual register operands of MI.
3843 SmallVector<unsigned, 1> SrcOperandIds;
3844 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
3845 MachineOperand &MO = MI.getOperand(i);
3848 Register Reg = MO.getReg();
3849 if (Reg != FoldAsLoadDefReg)
3851 // Do not fold if we have a subreg use or a def.
3852 if (MO.getSubReg() || MO.isDef())
3854 SrcOperandIds.push_back(i);
3856 if (SrcOperandIds.empty())
3859 // Check whether we can fold the def into SrcOperandId.
3860 if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
3861 FoldAsLoadDefReg = 0;
3868 /// Expand a single-def pseudo instruction to a two-addr
3869 /// instruction with two undef reads of the register being defined.
3870 /// This is used for mapping:
3873 /// %xmm4 = PXORrr undef %xmm4, undef %xmm4
3875 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
3876 const MCInstrDesc &Desc) {
3877 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
3878 Register Reg = MIB->getOperand(0).getReg();
3881 // MachineInstr::addOperand() will insert explicit operands before any
3882 // implicit operands.
3883 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
3884 // But we don't trust that.
3885 assert(MIB->getOperand(1).getReg() == Reg &&
3886 MIB->getOperand(2).getReg() == Reg && "Misplaced operand");
3890 /// Expand a single-def pseudo instruction to a two-addr
3891 /// instruction with two %k0 reads.
3892 /// This is used for mapping:
3895 /// %k4 = KXNORrr %k0, %k0
3896 static bool Expand2AddrKreg(MachineInstrBuilder &MIB,
3897 const MCInstrDesc &Desc, unsigned Reg) {
3898 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
3900 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
3904 static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
3906 MachineBasicBlock &MBB = *MIB->getParent();
3907 DebugLoc DL = MIB->getDebugLoc();
3908 Register Reg = MIB->getOperand(0).getReg();
3911 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
3912 .addReg(Reg, RegState::Undef)
3913 .addReg(Reg, RegState::Undef);
3915 // Turn the pseudo into an INC or DEC.
3916 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
3922 static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
3923 const TargetInstrInfo &TII,
3924 const X86Subtarget &Subtarget) {
3925 MachineBasicBlock &MBB = *MIB->getParent();
3926 DebugLoc DL = MIB->getDebugLoc();
3927 int64_t Imm = MIB->getOperand(1).getImm();
3928 assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
3929 MachineBasicBlock::iterator I = MIB.getInstr();
3931 int StackAdjustment;
3933 if (Subtarget.is64Bit()) {
3934 assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
3935 MIB->getOpcode() == X86::MOV32ImmSExti8);
3937 // Can't use push/pop lowering if the function might write to the red zone.
3938 X86MachineFunctionInfo *X86FI =
3939 MBB.getParent()->getInfo<X86MachineFunctionInfo>();
3940 if (X86FI->getUsesRedZone()) {
3941 MIB->setDesc(TII.get(MIB->getOpcode() ==
3942 X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
3946 // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
3947 // widen the register if necessary.
3948 StackAdjustment = 8;
3949 BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
3950 MIB->setDesc(TII.get(X86::POP64r));
3952 .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64));
3954 assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
3955 StackAdjustment = 4;
3956 BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
3957 MIB->setDesc(TII.get(X86::POP32r));
3959 MIB->RemoveOperand(1);
3960 MIB->addImplicitDefUseOperands(*MBB.getParent());
3962 // Build CFI if necessary.
3963 MachineFunction &MF = *MBB.getParent();
3964 const X86FrameLowering *TFL = Subtarget.getFrameLowering();
3965 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
3966 bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
3967 bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
3969 TFL->BuildCFI(MBB, I, DL,
3970 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
3971 TFL->BuildCFI(MBB, std::next(I), DL,
3972 MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
3978 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
3979 // code sequence is needed for other targets.
3980 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
3981 const TargetInstrInfo &TII) {
3982 MachineBasicBlock &MBB = *MIB->getParent();
3983 DebugLoc DL = MIB->getDebugLoc();
3984 Register Reg = MIB->getOperand(0).getReg();
3985 const GlobalValue *GV =
3986 cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
3987 auto Flags = MachineMemOperand::MOLoad |
3988 MachineMemOperand::MODereferenceable |
3989 MachineMemOperand::MOInvariant;
3990 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
3991 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8);
3992 MachineBasicBlock::iterator I = MIB.getInstr();
3994 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
3995 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
3996 .addMemOperand(MMO);
3997 MIB->setDebugLoc(DL);
3998 MIB->setDesc(TII.get(X86::MOV64rm));
3999 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
4002 static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
4003 MachineBasicBlock &MBB = *MIB->getParent();
4004 MachineFunction &MF = *MBB.getParent();
4005 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
4006 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4008 MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
4009 MIB->setDesc(TII.get(XorOp));
4010 MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
4014 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4015 // but not VLX. If it uses an extended register we need to use an instruction
4016 // that loads the lower 128/256-bit, but is available with only AVX512F.
4017 static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
4018 const TargetRegisterInfo *TRI,
4019 const MCInstrDesc &LoadDesc,
4020 const MCInstrDesc &BroadcastDesc,
4022 Register DestReg = MIB->getOperand(0).getReg();
4023 // Check if DestReg is XMM16-31 or YMM16-31.
4024 if (TRI->getEncodingValue(DestReg) < 16) {
4025 // We can use a normal VEX encoded load.
4026 MIB->setDesc(LoadDesc);
4028 // Use a 128/256-bit VBROADCAST instruction.
4029 MIB->setDesc(BroadcastDesc);
4030 // Change the destination to a 512-bit register.
4031 DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
4032 MIB->getOperand(0).setReg(DestReg);
4037 // This is used to handle spills for 128/256-bit registers when we have AVX512,
4038 // but not VLX. If it uses an extended register we need to use an instruction
4039 // that stores the lower 128/256-bit, but is available with only AVX512F.
4040 static bool expandNOVLXStore(MachineInstrBuilder &MIB,
4041 const TargetRegisterInfo *TRI,
4042 const MCInstrDesc &StoreDesc,
4043 const MCInstrDesc &ExtractDesc,
4045 Register SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg();
4046 // Check if DestReg is XMM16-31 or YMM16-31.
4047 if (TRI->getEncodingValue(SrcReg) < 16) {
4048 // We can use a normal VEX encoded store.
4049 MIB->setDesc(StoreDesc);
4051 // Use a VEXTRACTF instruction.
4052 MIB->setDesc(ExtractDesc);
4053 // Change the destination to a 512-bit register.
4054 SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
4055 MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
4056 MIB.addImm(0x0); // Append immediate to extract from the lower bits.
4062 static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {
4064 int64_t ShiftAmt = MIB->getOperand(2).getImm();
4065 // Temporarily remove the immediate so we can add another source register.
4066 MIB->RemoveOperand(2);
4067 // Add the register. Don't copy the kill flag if there is one.
4068 MIB.addReg(MIB->getOperand(1).getReg(),
4069 getUndefRegState(MIB->getOperand(1).isUndef()));
4070 // Add back the immediate.
4071 MIB.addImm(ShiftAmt);
4075 bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
4076 bool HasAVX = Subtarget.hasAVX();
4077 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
4078 switch (MI.getOpcode()) {
4080 return Expand2AddrUndef(MIB, get(X86::XOR32rr));
4082 return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
4084 return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
4085 case X86::MOV32ImmSExti8:
4086 case X86::MOV64ImmSExti8:
4087 return ExpandMOVImmSExti8(MIB, *this, Subtarget);
4089 return Expand2AddrUndef(MIB, get(X86::SBB8rr));
4090 case X86::SETB_C16r:
4091 return Expand2AddrUndef(MIB, get(X86::SBB16rr));
4092 case X86::SETB_C32r:
4093 return Expand2AddrUndef(MIB, get(X86::SBB32rr));
4094 case X86::SETB_C64r:
4095 return Expand2AddrUndef(MIB, get(X86::SBB64rr));
4097 return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
4101 case X86::FsFLD0F128:
4102 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
4103 case X86::AVX_SET0: {
4104 assert(HasAVX && "AVX not supported");
4105 const TargetRegisterInfo *TRI = &getRegisterInfo();
4106 Register SrcReg = MIB->getOperand(0).getReg();
4107 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4108 MIB->getOperand(0).setReg(XReg);
4109 Expand2AddrUndef(MIB, get(X86::VXORPSrr));
4110 MIB.addReg(SrcReg, RegState::ImplicitDefine);
4113 case X86::AVX512_128_SET0:
4114 case X86::AVX512_FsFLD0SS:
4115 case X86::AVX512_FsFLD0SD:
4116 case X86::AVX512_FsFLD0F128: {
4117 bool HasVLX = Subtarget.hasVLX();
4118 Register SrcReg = MIB->getOperand(0).getReg();
4119 const TargetRegisterInfo *TRI = &getRegisterInfo();
4120 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
4121 return Expand2AddrUndef(MIB,
4122 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4123 // Extended register without VLX. Use a larger XOR.
4125 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4126 MIB->getOperand(0).setReg(SrcReg);
4127 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4129 case X86::AVX512_256_SET0:
4130 case X86::AVX512_512_SET0: {
4131 bool HasVLX = Subtarget.hasVLX();
4132 Register SrcReg = MIB->getOperand(0).getReg();
4133 const TargetRegisterInfo *TRI = &getRegisterInfo();
4134 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
4135 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
4136 MIB->getOperand(0).setReg(XReg);
4137 Expand2AddrUndef(MIB,
4138 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
4139 MIB.addReg(SrcReg, RegState::ImplicitDefine);
4142 if (MI.getOpcode() == X86::AVX512_256_SET0) {
4143 // No VLX so we must reference a zmm.
4145 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4146 MIB->getOperand(0).setReg(ZReg);
4148 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
4150 case X86::V_SETALLONES:
4151 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
4152 case X86::AVX2_SETALLONES:
4153 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
4154 case X86::AVX1_SETALLONES: {
4155 Register Reg = MIB->getOperand(0).getReg();
4156 // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
4157 MIB->setDesc(get(X86::VCMPPSYrri));
4158 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
4161 case X86::AVX512_512_SETALLONES: {
4162 Register Reg = MIB->getOperand(0).getReg();
4163 MIB->setDesc(get(X86::VPTERNLOGDZrri));
4164 // VPTERNLOGD needs 3 register inputs and an immediate.
4165 // 0xff will return 1s for any input.
4166 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
4167 .addReg(Reg, RegState::Undef).addImm(0xff);
4170 case X86::AVX512_512_SEXT_MASK_32:
4171 case X86::AVX512_512_SEXT_MASK_64: {
4172 Register Reg = MIB->getOperand(0).getReg();
4173 Register MaskReg = MIB->getOperand(1).getReg();
4174 unsigned MaskState = getRegState(MIB->getOperand(1));
4175 unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
4176 X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
4177 MI.RemoveOperand(1);
4178 MIB->setDesc(get(Opc));
4179 // VPTERNLOG needs 3 register inputs and an immediate.
4180 // 0xff will return 1s for any input.
4181 MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
4182 .addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
4185 case X86::VMOVAPSZ128rm_NOVLX:
4186 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
4187 get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4188 case X86::VMOVUPSZ128rm_NOVLX:
4189 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
4190 get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
4191 case X86::VMOVAPSZ256rm_NOVLX:
4192 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
4193 get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4194 case X86::VMOVUPSZ256rm_NOVLX:
4195 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
4196 get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
4197 case X86::VMOVAPSZ128mr_NOVLX:
4198 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
4199 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4200 case X86::VMOVUPSZ128mr_NOVLX:
4201 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
4202 get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
4203 case X86::VMOVAPSZ256mr_NOVLX:
4204 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
4205 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4206 case X86::VMOVUPSZ256mr_NOVLX:
4207 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
4208 get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
4209 case X86::MOV32ri64: {
4210 Register Reg = MIB->getOperand(0).getReg();
4211 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
4212 MI.setDesc(get(X86::MOV32ri));
4213 MIB->getOperand(0).setReg(Reg32);
4214 MIB.addReg(Reg, RegState::ImplicitDefine);
4218 // KNL does not recognize dependency-breaking idioms for mask registers,
4219 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
4220 // Using %k0 as the undef input register is a performance heuristic based
4221 // on the assumption that %k0 is used less frequently than the other mask
4222 // registers, since it is not usable as a write mask.
4223 // FIXME: A more advanced approach would be to choose the best input mask
4224 // register based on context.
4225 case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
4226 case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
4227 case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
4228 case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
4229 case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
4230 case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
4231 case TargetOpcode::LOAD_STACK_GUARD:
4232 expandLoadStackGuard(MIB, *this);
4236 return expandXorFP(MIB, *this);
4237 case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8));
4238 case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8));
4239 case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8));
4240 case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8));
4241 case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break;
4242 case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break;
4243 case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break;
4244 case X86::ADD64rr_DB: MIB->setDesc(get(X86::OR64rr)); break;
4245 case X86::ADD8ri_DB: MIB->setDesc(get(X86::OR8ri)); break;
4246 case X86::ADD16ri_DB: MIB->setDesc(get(X86::OR16ri)); break;
4247 case X86::ADD32ri_DB: MIB->setDesc(get(X86::OR32ri)); break;
4248 case X86::ADD64ri32_DB: MIB->setDesc(get(X86::OR64ri32)); break;
4249 case X86::ADD16ri8_DB: MIB->setDesc(get(X86::OR16ri8)); break;
4250 case X86::ADD32ri8_DB: MIB->setDesc(get(X86::OR32ri8)); break;
4251 case X86::ADD64ri8_DB: MIB->setDesc(get(X86::OR64ri8)); break;
4256 /// Return true for all instructions that only update
4257 /// the first 32 or 64-bits of the destination register and leave the rest
4258 /// unmodified. This can be used to avoid folding loads if the instructions
4259 /// only update part of the destination register, and the non-updated part is
4260 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
4261 /// instructions breaks the partial register dependency and it can improve
4262 /// performance. e.g.:
4264 /// movss (%rdi), %xmm0
4265 /// cvtss2sd %xmm0, %xmm0
4268 /// cvtss2sd (%rdi), %xmm0
4270 /// FIXME: This should be turned into a TSFlags.
4272 static bool hasPartialRegUpdate(unsigned Opcode,
4273 const X86Subtarget &Subtarget,
4274 bool ForLoadFold = false) {
4276 case X86::CVTSI2SSrr:
4277 case X86::CVTSI2SSrm:
4278 case X86::CVTSI642SSrr:
4279 case X86::CVTSI642SSrm:
4280 case X86::CVTSI2SDrr:
4281 case X86::CVTSI2SDrm:
4282 case X86::CVTSI642SDrr:
4283 case X86::CVTSI642SDrm:
4284 // Load folding won't effect the undef register update since the input is
4286 return !ForLoadFold;
4287 case X86::CVTSD2SSrr:
4288 case X86::CVTSD2SSrm:
4289 case X86::CVTSS2SDrr:
4290 case X86::CVTSS2SDrm:
4297 case X86::RCPSSr_Int:
4298 case X86::RCPSSm_Int:
4305 case X86::RSQRTSSr_Int:
4306 case X86::RSQRTSSm_Int:
4309 case X86::SQRTSSr_Int:
4310 case X86::SQRTSSm_Int:
4313 case X86::SQRTSDr_Int:
4314 case X86::SQRTSDm_Int:
4317 case X86::POPCNT32rm:
4318 case X86::POPCNT32rr:
4319 case X86::POPCNT64rm:
4320 case X86::POPCNT64rr:
4321 return Subtarget.hasPOPCNTFalseDeps();
4322 case X86::LZCNT32rm:
4323 case X86::LZCNT32rr:
4324 case X86::LZCNT64rm:
4325 case X86::LZCNT64rr:
4326 case X86::TZCNT32rm:
4327 case X86::TZCNT32rr:
4328 case X86::TZCNT64rm:
4329 case X86::TZCNT64rr:
4330 return Subtarget.hasLZCNTFalseDeps();
4336 /// Inform the BreakFalseDeps pass how many idle
4337 /// instructions we would like before a partial register update.
4338 unsigned X86InstrInfo::getPartialRegUpdateClearance(
4339 const MachineInstr &MI, unsigned OpNum,
4340 const TargetRegisterInfo *TRI) const {
4341 if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
4344 // If MI is marked as reading Reg, the partial register update is wanted.
4345 const MachineOperand &MO = MI.getOperand(0);
4346 Register Reg = MO.getReg();
4347 if (Register::isVirtualRegister(Reg)) {
4348 if (MO.readsReg() || MI.readsVirtualRegister(Reg))
4351 if (MI.readsRegister(Reg, TRI))
4355 // If any instructions in the clearance range are reading Reg, insert a
4356 // dependency breaking instruction, which is inexpensive and is likely to
4357 // be hidden in other instruction's cycles.
4358 return PartialRegUpdateClearance;
4361 // Return true for any instruction the copies the high bits of the first source
4362 // operand into the unused high bits of the destination operand.
4363 static bool hasUndefRegUpdate(unsigned Opcode, unsigned &OpNum,
4364 bool ForLoadFold = false) {
4365 // Set the OpNum parameter to the first source operand.
4368 case X86::VCVTSI2SSrr:
4369 case X86::VCVTSI2SSrm:
4370 case X86::VCVTSI2SSrr_Int:
4371 case X86::VCVTSI2SSrm_Int:
4372 case X86::VCVTSI642SSrr:
4373 case X86::VCVTSI642SSrm:
4374 case X86::VCVTSI642SSrr_Int:
4375 case X86::VCVTSI642SSrm_Int:
4376 case X86::VCVTSI2SDrr:
4377 case X86::VCVTSI2SDrm:
4378 case X86::VCVTSI2SDrr_Int:
4379 case X86::VCVTSI2SDrm_Int:
4380 case X86::VCVTSI642SDrr:
4381 case X86::VCVTSI642SDrm:
4382 case X86::VCVTSI642SDrr_Int:
4383 case X86::VCVTSI642SDrm_Int:
4385 case X86::VCVTSI2SSZrr:
4386 case X86::VCVTSI2SSZrm:
4387 case X86::VCVTSI2SSZrr_Int:
4388 case X86::VCVTSI2SSZrrb_Int:
4389 case X86::VCVTSI2SSZrm_Int:
4390 case X86::VCVTSI642SSZrr:
4391 case X86::VCVTSI642SSZrm:
4392 case X86::VCVTSI642SSZrr_Int:
4393 case X86::VCVTSI642SSZrrb_Int:
4394 case X86::VCVTSI642SSZrm_Int:
4395 case X86::VCVTSI2SDZrr:
4396 case X86::VCVTSI2SDZrm:
4397 case X86::VCVTSI2SDZrr_Int:
4398 case X86::VCVTSI2SDZrm_Int:
4399 case X86::VCVTSI642SDZrr:
4400 case X86::VCVTSI642SDZrm:
4401 case X86::VCVTSI642SDZrr_Int:
4402 case X86::VCVTSI642SDZrrb_Int:
4403 case X86::VCVTSI642SDZrm_Int:
4404 case X86::VCVTUSI2SSZrr:
4405 case X86::VCVTUSI2SSZrm:
4406 case X86::VCVTUSI2SSZrr_Int:
4407 case X86::VCVTUSI2SSZrrb_Int:
4408 case X86::VCVTUSI2SSZrm_Int:
4409 case X86::VCVTUSI642SSZrr:
4410 case X86::VCVTUSI642SSZrm:
4411 case X86::VCVTUSI642SSZrr_Int:
4412 case X86::VCVTUSI642SSZrrb_Int:
4413 case X86::VCVTUSI642SSZrm_Int:
4414 case X86::VCVTUSI2SDZrr:
4415 case X86::VCVTUSI2SDZrm:
4416 case X86::VCVTUSI2SDZrr_Int:
4417 case X86::VCVTUSI2SDZrm_Int:
4418 case X86::VCVTUSI642SDZrr:
4419 case X86::VCVTUSI642SDZrm:
4420 case X86::VCVTUSI642SDZrr_Int:
4421 case X86::VCVTUSI642SDZrrb_Int:
4422 case X86::VCVTUSI642SDZrm_Int:
4423 // Load folding won't effect the undef register update since the input is
4425 return !ForLoadFold;
4426 case X86::VCVTSD2SSrr:
4427 case X86::VCVTSD2SSrm:
4428 case X86::VCVTSD2SSrr_Int:
4429 case X86::VCVTSD2SSrm_Int:
4430 case X86::VCVTSS2SDrr:
4431 case X86::VCVTSS2SDrm:
4432 case X86::VCVTSS2SDrr_Int:
4433 case X86::VCVTSS2SDrm_Int:
4435 case X86::VRCPSSr_Int:
4437 case X86::VRCPSSm_Int:
4438 case X86::VROUNDSDr:
4439 case X86::VROUNDSDm:
4440 case X86::VROUNDSDr_Int:
4441 case X86::VROUNDSDm_Int:
4442 case X86::VROUNDSSr:
4443 case X86::VROUNDSSm:
4444 case X86::VROUNDSSr_Int:
4445 case X86::VROUNDSSm_Int:
4446 case X86::VRSQRTSSr:
4447 case X86::VRSQRTSSr_Int:
4448 case X86::VRSQRTSSm:
4449 case X86::VRSQRTSSm_Int:
4451 case X86::VSQRTSSr_Int:
4453 case X86::VSQRTSSm_Int:
4455 case X86::VSQRTSDr_Int:
4457 case X86::VSQRTSDm_Int:
4459 case X86::VCVTSD2SSZrr:
4460 case X86::VCVTSD2SSZrr_Int:
4461 case X86::VCVTSD2SSZrrb_Int:
4462 case X86::VCVTSD2SSZrm:
4463 case X86::VCVTSD2SSZrm_Int:
4464 case X86::VCVTSS2SDZrr:
4465 case X86::VCVTSS2SDZrr_Int:
4466 case X86::VCVTSS2SDZrrb_Int:
4467 case X86::VCVTSS2SDZrm:
4468 case X86::VCVTSS2SDZrm_Int:
4469 case X86::VGETEXPSDZr:
4470 case X86::VGETEXPSDZrb:
4471 case X86::VGETEXPSDZm:
4472 case X86::VGETEXPSSZr:
4473 case X86::VGETEXPSSZrb:
4474 case X86::VGETEXPSSZm:
4475 case X86::VGETMANTSDZrri:
4476 case X86::VGETMANTSDZrrib:
4477 case X86::VGETMANTSDZrmi:
4478 case X86::VGETMANTSSZrri:
4479 case X86::VGETMANTSSZrrib:
4480 case X86::VGETMANTSSZrmi:
4481 case X86::VRNDSCALESDZr:
4482 case X86::VRNDSCALESDZr_Int:
4483 case X86::VRNDSCALESDZrb_Int:
4484 case X86::VRNDSCALESDZm:
4485 case X86::VRNDSCALESDZm_Int:
4486 case X86::VRNDSCALESSZr:
4487 case X86::VRNDSCALESSZr_Int:
4488 case X86::VRNDSCALESSZrb_Int:
4489 case X86::VRNDSCALESSZm:
4490 case X86::VRNDSCALESSZm_Int:
4491 case X86::VRCP14SDZrr:
4492 case X86::VRCP14SDZrm:
4493 case X86::VRCP14SSZrr:
4494 case X86::VRCP14SSZrm:
4495 case X86::VRCP28SDZr:
4496 case X86::VRCP28SDZrb:
4497 case X86::VRCP28SDZm:
4498 case X86::VRCP28SSZr:
4499 case X86::VRCP28SSZrb:
4500 case X86::VRCP28SSZm:
4501 case X86::VREDUCESSZrmi:
4502 case X86::VREDUCESSZrri:
4503 case X86::VREDUCESSZrrib:
4504 case X86::VRSQRT14SDZrr:
4505 case X86::VRSQRT14SDZrm:
4506 case X86::VRSQRT14SSZrr:
4507 case X86::VRSQRT14SSZrm:
4508 case X86::VRSQRT28SDZr:
4509 case X86::VRSQRT28SDZrb:
4510 case X86::VRSQRT28SDZm:
4511 case X86::VRSQRT28SSZr:
4512 case X86::VRSQRT28SSZrb:
4513 case X86::VRSQRT28SSZm:
4514 case X86::VSQRTSSZr:
4515 case X86::VSQRTSSZr_Int:
4516 case X86::VSQRTSSZrb_Int:
4517 case X86::VSQRTSSZm:
4518 case X86::VSQRTSSZm_Int:
4519 case X86::VSQRTSDZr:
4520 case X86::VSQRTSDZr_Int:
4521 case X86::VSQRTSDZrb_Int:
4522 case X86::VSQRTSDZm:
4523 case X86::VSQRTSDZm_Int:
4525 case X86::VMOVSSZrrk:
4526 case X86::VMOVSDZrrk:
4529 case X86::VMOVSSZrrkz:
4530 case X86::VMOVSDZrrkz:
4538 /// Inform the BreakFalseDeps pass how many idle instructions we would like
4539 /// before certain undef register reads.
4541 /// This catches the VCVTSI2SD family of instructions:
4543 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
4545 /// We should to be careful *not* to catch VXOR idioms which are presumably
4546 /// handled specially in the pipeline:
4548 /// vxorps undef %xmm1, undef %xmm1, %xmm1
4550 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
4551 /// high bits that are passed-through are not live.
4553 X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
4554 const TargetRegisterInfo *TRI) const {
4555 if (!hasUndefRegUpdate(MI.getOpcode(), OpNum))
4558 const MachineOperand &MO = MI.getOperand(OpNum);
4559 if (MO.isUndef() && Register::isPhysicalRegister(MO.getReg())) {
4560 return UndefRegClearance;
4565 void X86InstrInfo::breakPartialRegDependency(
4566 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
4567 Register Reg = MI.getOperand(OpNum).getReg();
4568 // If MI kills this register, the false dependence is already broken.
4569 if (MI.killsRegister(Reg, TRI))
4572 if (X86::VR128RegClass.contains(Reg)) {
4573 // These instructions are all floating point domain, so xorps is the best
4575 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
4576 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
4577 .addReg(Reg, RegState::Undef)
4578 .addReg(Reg, RegState::Undef);
4579 MI.addRegisterKilled(Reg, TRI, true);
4580 } else if (X86::VR256RegClass.contains(Reg)) {
4581 // Use vxorps to clear the full ymm register.
4582 // It wants to read and write the xmm sub-register.
4583 Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
4584 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
4585 .addReg(XReg, RegState::Undef)
4586 .addReg(XReg, RegState::Undef)
4587 .addReg(Reg, RegState::ImplicitDefine);
4588 MI.addRegisterKilled(Reg, TRI, true);
4589 } else if (X86::GR64RegClass.contains(Reg)) {
4590 // Using XOR32rr because it has shorter encoding and zeros up the upper bits
4592 Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
4593 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
4594 .addReg(XReg, RegState::Undef)
4595 .addReg(XReg, RegState::Undef)
4596 .addReg(Reg, RegState::ImplicitDefine);
4597 MI.addRegisterKilled(Reg, TRI, true);
4598 } else if (X86::GR32RegClass.contains(Reg)) {
4599 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
4600 .addReg(Reg, RegState::Undef)
4601 .addReg(Reg, RegState::Undef);
4602 MI.addRegisterKilled(Reg, TRI, true);
4606 static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
4607 int PtrOffset = 0) {
4608 unsigned NumAddrOps = MOs.size();
4610 if (NumAddrOps < 4) {
4611 // FrameIndex only - add an immediate offset (whether its zero or not).
4612 for (unsigned i = 0; i != NumAddrOps; ++i)
4614 addOffset(MIB, PtrOffset);
4616 // General Memory Addressing - we need to add any offset to an existing
4618 assert(MOs.size() == 5 && "Unexpected memory operand list length");
4619 for (unsigned i = 0; i != NumAddrOps; ++i) {
4620 const MachineOperand &MO = MOs[i];
4621 if (i == 3 && PtrOffset != 0) {
4622 MIB.addDisp(MO, PtrOffset);
4630 static void updateOperandRegConstraints(MachineFunction &MF,
4631 MachineInstr &NewMI,
4632 const TargetInstrInfo &TII) {
4633 MachineRegisterInfo &MRI = MF.getRegInfo();
4634 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
4636 for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
4637 MachineOperand &MO = NewMI.getOperand(Idx);
4638 // We only need to update constraints on virtual register operands.
4641 Register Reg = MO.getReg();
4642 if (!Register::isVirtualRegister(Reg))
4645 auto *NewRC = MRI.constrainRegClass(
4646 Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
4649 dbgs() << "WARNING: Unable to update register constraint for operand "
4650 << Idx << " of instruction:\n";
4651 NewMI.dump(); dbgs() << "\n");
4656 static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
4657 ArrayRef<MachineOperand> MOs,
4658 MachineBasicBlock::iterator InsertPt,
4660 const TargetInstrInfo &TII) {
4661 // Create the base instruction with the memory operand as the first part.
4662 // Omit the implicit operands, something BuildMI can't do.
4663 MachineInstr *NewMI =
4664 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
4665 MachineInstrBuilder MIB(MF, NewMI);
4666 addOperands(MIB, MOs);
4668 // Loop over the rest of the ri operands, converting them over.
4669 unsigned NumOps = MI.getDesc().getNumOperands() - 2;
4670 for (unsigned i = 0; i != NumOps; ++i) {
4671 MachineOperand &MO = MI.getOperand(i + 2);
4674 for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
4675 MachineOperand &MO = MI.getOperand(i);
4679 updateOperandRegConstraints(MF, *NewMI, TII);
4681 MachineBasicBlock *MBB = InsertPt->getParent();
4682 MBB->insert(InsertPt, NewMI);
4687 static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
4688 unsigned OpNo, ArrayRef<MachineOperand> MOs,
4689 MachineBasicBlock::iterator InsertPt,
4690 MachineInstr &MI, const TargetInstrInfo &TII,
4691 int PtrOffset = 0) {
4692 // Omit the implicit operands, something BuildMI can't do.
4693 MachineInstr *NewMI =
4694 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
4695 MachineInstrBuilder MIB(MF, NewMI);
4697 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4698 MachineOperand &MO = MI.getOperand(i);
4700 assert(MO.isReg() && "Expected to fold into reg operand!");
4701 addOperands(MIB, MOs, PtrOffset);
4707 updateOperandRegConstraints(MF, *NewMI, TII);
4709 // Copy the NoFPExcept flag from the instruction we're fusing.
4710 if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
4711 NewMI->setFlag(MachineInstr::MIFlag::NoFPExcept);
4713 MachineBasicBlock *MBB = InsertPt->getParent();
4714 MBB->insert(InsertPt, NewMI);
4719 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
4720 ArrayRef<MachineOperand> MOs,
4721 MachineBasicBlock::iterator InsertPt,
4723 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
4724 MI.getDebugLoc(), TII.get(Opcode));
4725 addOperands(MIB, MOs);
4726 return MIB.addImm(0);
4729 MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
4730 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
4731 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
4732 unsigned Size, unsigned Align) const {
4733 switch (MI.getOpcode()) {
4734 case X86::INSERTPSrr:
4735 case X86::VINSERTPSrr:
4736 case X86::VINSERTPSZrr:
4737 // Attempt to convert the load of inserted vector into a fold load
4738 // of a single float.
4740 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
4741 unsigned ZMask = Imm & 15;
4742 unsigned DstIdx = (Imm >> 4) & 3;
4743 unsigned SrcIdx = (Imm >> 6) & 3;
4745 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
4746 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
4747 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
4748 if ((Size == 0 || Size >= 16) && RCSize >= 16 && 4 <= Align) {
4749 int PtrOffset = SrcIdx * 4;
4750 unsigned NewImm = (DstIdx << 4) | ZMask;
4751 unsigned NewOpCode =
4752 (MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
4753 (MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm :
4755 MachineInstr *NewMI =
4756 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
4757 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
4762 case X86::MOVHLPSrr:
4763 case X86::VMOVHLPSrr:
4764 case X86::VMOVHLPSZrr:
4765 // Move the upper 64-bits of the second operand to the lower 64-bits.
4766 // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
4767 // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
4769 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
4770 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
4771 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
4772 if ((Size == 0 || Size >= 16) && RCSize >= 16 && 8 <= Align) {
4773 unsigned NewOpCode =
4774 (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
4775 (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm :
4777 MachineInstr *NewMI =
4778 FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
4783 case X86::UNPCKLPDrr:
4784 // If we won't be able to fold this to the memory form of UNPCKL, use
4785 // MOVHPD instead. Done as custom because we can't have this in the load
4788 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
4789 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
4790 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
4791 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Align < 16) {
4792 MachineInstr *NewMI =
4793 FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
4803 static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
4806 if (!hasUndefRegUpdate(MI.getOpcode(), Ignored, /*ForLoadFold*/true) ||
4807 !MI.getOperand(1).isReg())
4810 // The are two cases we need to handle depending on where in the pipeline
4811 // the folding attempt is being made.
4812 // -Register has the undef flag set.
4813 // -Register is produced by the IMPLICIT_DEF instruction.
4815 if (MI.getOperand(1).isUndef())
4818 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4819 MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
4820 return VRegDef && VRegDef->isImplicitDef();
4824 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
4825 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
4826 ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
4827 unsigned Size, unsigned Align, bool AllowCommute) const {
4828 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
4829 bool isTwoAddrFold = false;
4831 // For CPUs that favor the register form of a call or push,
4832 // do not fold loads into calls or pushes, unless optimizing for size
4834 if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
4835 (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
4836 MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
4837 MI.getOpcode() == X86::PUSH64r))
4840 // Avoid partial and undef register update stalls unless optimizing for size.
4841 if (!MF.getFunction().hasOptSize() &&
4842 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
4843 shouldPreventUndefRegUpdateMemFold(MF, MI)))
4846 unsigned NumOps = MI.getDesc().getNumOperands();
4848 NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
4850 // FIXME: AsmPrinter doesn't know how to handle
4851 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
4852 if (MI.getOpcode() == X86::ADD32ri &&
4853 MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
4856 // GOTTPOFF relocation loads can only be folded into add instructions.
4857 // FIXME: Need to exclude other relocations that only support specific
4859 if (MOs.size() == X86::AddrNumOperands &&
4860 MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
4861 MI.getOpcode() != X86::ADD64rr)
4864 MachineInstr *NewMI = nullptr;
4866 // Attempt to fold any custom cases we have.
4867 if (MachineInstr *CustomMI =
4868 foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align))
4871 const X86MemoryFoldTableEntry *I = nullptr;
4873 // Folding a memory location into the two-address part of a two-address
4874 // instruction is different than folding it other places. It requires
4875 // replacing the *two* registers with the memory location.
4876 if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
4877 MI.getOperand(1).isReg() &&
4878 MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
4879 I = lookupTwoAddrFoldTable(MI.getOpcode());
4880 isTwoAddrFold = true;
4883 if (MI.getOpcode() == X86::MOV32r0) {
4884 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
4890 I = lookupFoldTable(MI.getOpcode(), OpNum);
4894 unsigned Opcode = I->DstOp;
4895 unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
4896 MinAlign = MinAlign ? 1 << (MinAlign - 1) : 0;
4897 if (Align < MinAlign)
4899 bool NarrowToMOV32rm = false;
4901 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
4902 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
4904 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
4905 if (Size < RCSize) {
4906 // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
4907 // Check if it's safe to fold the load. If the size of the object is
4908 // narrower than the load width, then it's not.
4909 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
4911 // If this is a 64-bit load, but the spill slot is 32, then we can do
4912 // a 32-bit load which is implicitly zero-extended. This likely is
4913 // due to live interval analysis remat'ing a load from stack slot.
4914 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
4916 Opcode = X86::MOV32rm;
4917 NarrowToMOV32rm = true;
4922 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
4924 NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
4926 if (NarrowToMOV32rm) {
4927 // If this is the special case where we use a MOV32rm to load a 32-bit
4928 // value and zero-extend the top bits. Change the destination register
4930 Register DstReg = NewMI->getOperand(0).getReg();
4931 if (Register::isPhysicalRegister(DstReg))
4932 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
4934 NewMI->getOperand(0).setSubReg(X86::sub_32bit);
4939 // If the instruction and target operand are commutable, commute the
4940 // instruction and try again.
4942 unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
4943 if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
4944 bool HasDef = MI.getDesc().getNumDefs();
4945 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
4946 Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
4947 Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
4949 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
4951 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
4953 // If either of the commutable operands are tied to the destination
4954 // then we can not commute + fold.
4955 if ((HasDef && Reg0 == Reg1 && Tied1) ||
4956 (HasDef && Reg0 == Reg2 && Tied2))
4959 MachineInstr *CommutedMI =
4960 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
4962 // Unable to commute.
4965 if (CommutedMI != &MI) {
4966 // New instruction. We can't fold from this.
4967 CommutedMI->eraseFromParent();
4971 // Attempt to fold with the commuted version of the instruction.
4972 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt,
4973 Size, Align, /*AllowCommute=*/false);
4977 // Folding failed again - undo the commute before returning.
4978 MachineInstr *UncommutedMI =
4979 commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
4980 if (!UncommutedMI) {
4981 // Unable to commute.
4984 if (UncommutedMI != &MI) {
4985 // New instruction. It doesn't need to be kept.
4986 UncommutedMI->eraseFromParent();
4990 // Return here to prevent duplicate fuse failure report.
4996 if (PrintFailedFusing && !MI.isCopy())
4997 dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
5002 X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
5003 ArrayRef<unsigned> Ops,
5004 MachineBasicBlock::iterator InsertPt,
5005 int FrameIndex, LiveIntervals *LIS,
5006 VirtRegMap *VRM) const {
5007 // Check switch flag
5011 // Avoid partial and undef register update stalls unless optimizing for size.
5012 if (!MF.getFunction().hasOptSize() &&
5013 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5014 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5017 // Don't fold subreg spills, or reloads that use a high subreg.
5018 for (auto Op : Ops) {
5019 MachineOperand &MO = MI.getOperand(Op);
5020 auto SubReg = MO.getSubReg();
5021 if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
5025 const MachineFrameInfo &MFI = MF.getFrameInfo();
5026 unsigned Size = MFI.getObjectSize(FrameIndex);
5027 unsigned Alignment = MFI.getObjectAlignment(FrameIndex);
5028 // If the function stack isn't realigned we don't want to fold instructions
5029 // that need increased alignment.
5030 if (!RI.needsStackRealignment(MF))
5032 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment());
5033 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5034 unsigned NewOpc = 0;
5035 unsigned RCSize = 0;
5036 switch (MI.getOpcode()) {
5037 default: return nullptr;
5038 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
5039 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
5040 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
5041 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
5043 // Check if it's safe to fold the load. If the size of the object is
5044 // narrower than the load width, then it's not.
5047 // Change to CMPXXri r, 0 first.
5048 MI.setDesc(get(NewOpc));
5049 MI.getOperand(1).ChangeToImmediate(0);
5050 } else if (Ops.size() != 1)
5053 return foldMemoryOperandImpl(MF, MI, Ops[0],
5054 MachineOperand::CreateFI(FrameIndex), InsertPt,
5055 Size, Alignment, /*AllowCommute=*/true);
5058 /// Check if \p LoadMI is a partial register load that we can't fold into \p MI
5059 /// because the latter uses contents that wouldn't be defined in the folded
5060 /// version. For instance, this transformation isn't legal:
5061 /// movss (%rdi), %xmm0
5062 /// addps %xmm0, %xmm0
5064 /// addps (%rdi), %xmm0
5066 /// But this one is:
5067 /// movss (%rdi), %xmm0
5068 /// addss %xmm0, %xmm0
5070 /// addss (%rdi), %xmm0
5072 static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
5073 const MachineInstr &UserMI,
5074 const MachineFunction &MF) {
5075 unsigned Opc = LoadMI.getOpcode();
5076 unsigned UserOpc = UserMI.getOpcode();
5077 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5078 const TargetRegisterClass *RC =
5079 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
5080 unsigned RegSize = TRI.getRegSizeInBits(*RC);
5082 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
5083 Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
5084 Opc == X86::VMOVSSZrm_alt) &&
5086 // These instructions only load 32 bits, we can't fold them if the
5087 // destination register is wider than 32 bits (4 bytes), and its user
5088 // instruction isn't scalar (SS).
5090 case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
5091 case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
5092 case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
5093 case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
5094 case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
5095 case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
5096 case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
5097 case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
5098 case X86::VCMPSSZrr_Intk:
5099 case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
5100 case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
5101 case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
5102 case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
5103 case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
5104 case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int:
5105 case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int:
5106 case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int:
5107 case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int:
5108 case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int:
5109 case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int:
5110 case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int:
5111 case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int:
5112 case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
5113 case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
5114 case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
5115 case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
5116 case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
5117 case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
5118 case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
5119 case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
5120 case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
5121 case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
5122 case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
5123 case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
5124 case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
5125 case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
5126 case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
5127 case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
5128 case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
5129 case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
5136 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
5137 Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
5138 Opc == X86::VMOVSDZrm_alt) &&
5140 // These instructions only load 64 bits, we can't fold them if the
5141 // destination register is wider than 64 bits (8 bytes), and its user
5142 // instruction isn't scalar (SD).
5144 case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
5145 case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
5146 case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
5147 case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
5148 case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
5149 case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
5150 case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
5151 case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
5152 case X86::VCMPSDZrr_Intk:
5153 case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
5154 case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
5155 case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
5156 case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
5157 case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
5158 case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int:
5159 case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int:
5160 case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int:
5161 case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int:
5162 case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int:
5163 case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int:
5164 case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int:
5165 case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int:
5166 case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
5167 case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
5168 case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
5169 case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
5170 case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
5171 case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
5172 case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
5173 case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
5174 case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
5175 case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
5176 case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
5177 case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
5178 case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
5179 case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
5180 case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
5181 case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
5182 case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
5183 case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
5193 MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
5194 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
5195 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
5196 LiveIntervals *LIS) const {
5198 // TODO: Support the case where LoadMI loads a wide register, but MI
5199 // only uses a subreg.
5200 for (auto Op : Ops) {
5201 if (MI.getOperand(Op).getSubReg())
5205 // If loading from a FrameIndex, fold directly from the FrameIndex.
5206 unsigned NumOps = LoadMI.getDesc().getNumOperands();
5208 if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
5209 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
5211 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
5214 // Check switch flag
5215 if (NoFusing) return nullptr;
5217 // Avoid partial and undef register update stalls unless optimizing for size.
5218 if (!MF.getFunction().hasOptSize() &&
5219 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
5220 shouldPreventUndefRegUpdateMemFold(MF, MI)))
5223 // Determine the alignment of the load.
5224 unsigned Alignment = 0;
5225 if (LoadMI.hasOneMemOperand())
5226 Alignment = (*LoadMI.memoperands_begin())->getAlignment();
5228 switch (LoadMI.getOpcode()) {
5229 case X86::AVX512_512_SET0:
5230 case X86::AVX512_512_SETALLONES:
5233 case X86::AVX2_SETALLONES:
5234 case X86::AVX1_SETALLONES:
5236 case X86::AVX512_256_SET0:
5240 case X86::V_SETALLONES:
5241 case X86::AVX512_128_SET0:
5242 case X86::FsFLD0F128:
5243 case X86::AVX512_FsFLD0F128:
5248 case X86::AVX512_FsFLD0SD:
5252 case X86::AVX512_FsFLD0SS:
5258 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
5259 unsigned NewOpc = 0;
5260 switch (MI.getOpcode()) {
5261 default: return nullptr;
5262 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
5263 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
5264 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
5265 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
5267 // Change to CMPXXri r, 0 first.
5268 MI.setDesc(get(NewOpc));
5269 MI.getOperand(1).ChangeToImmediate(0);
5270 } else if (Ops.size() != 1)
5273 // Make sure the subregisters match.
5274 // Otherwise we risk changing the size of the load.
5275 if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
5278 SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
5279 switch (LoadMI.getOpcode()) {
5282 case X86::V_SETALLONES:
5283 case X86::AVX2_SETALLONES:
5284 case X86::AVX1_SETALLONES:
5286 case X86::AVX512_128_SET0:
5287 case X86::AVX512_256_SET0:
5288 case X86::AVX512_512_SET0:
5289 case X86::AVX512_512_SETALLONES:
5291 case X86::AVX512_FsFLD0SD:
5293 case X86::AVX512_FsFLD0SS:
5294 case X86::FsFLD0F128:
5295 case X86::AVX512_FsFLD0F128: {
5296 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
5297 // Create a constant-pool entry and operands to load from it.
5299 // Medium and large mode can't fold loads this way.
5300 if (MF.getTarget().getCodeModel() != CodeModel::Small &&
5301 MF.getTarget().getCodeModel() != CodeModel::Kernel)
5304 // x86-32 PIC requires a PIC base register for constant pools.
5305 unsigned PICBase = 0;
5306 if (MF.getTarget().isPositionIndependent()) {
5307 if (Subtarget.is64Bit())
5310 // FIXME: PICBase = getGlobalBaseReg(&MF);
5311 // This doesn't work for several reasons.
5312 // 1. GlobalBaseReg may have been spilled.
5313 // 2. It may not be live at MI.
5317 // Create a constant-pool entry.
5318 MachineConstantPool &MCP = *MF.getConstantPool();
5320 unsigned Opc = LoadMI.getOpcode();
5321 if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
5322 Ty = Type::getFloatTy(MF.getFunction().getContext());
5323 else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
5324 Ty = Type::getDoubleTy(MF.getFunction().getContext());
5325 else if (Opc == X86::FsFLD0F128 || Opc == X86::AVX512_FsFLD0F128)
5326 Ty = Type::getFP128Ty(MF.getFunction().getContext());
5327 else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
5328 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16);
5329 else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
5330 Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
5331 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8);
5332 else if (Opc == X86::MMX_SET0)
5333 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 2);
5335 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4);
5337 bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
5338 Opc == X86::AVX512_512_SETALLONES ||
5339 Opc == X86::AVX1_SETALLONES);
5340 const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
5341 Constant::getNullValue(Ty);
5342 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
5344 // Create operands to load from the constant pool entry.
5345 MOs.push_back(MachineOperand::CreateReg(PICBase, false));
5346 MOs.push_back(MachineOperand::CreateImm(1));
5347 MOs.push_back(MachineOperand::CreateReg(0, false));
5348 MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
5349 MOs.push_back(MachineOperand::CreateReg(0, false));
5353 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
5356 // Folding a normal load. Just copy the load's address operands.
5357 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
5358 LoadMI.operands_begin() + NumOps);
5362 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
5363 /*Size=*/0, Alignment, /*AllowCommute=*/true);
5366 static SmallVector<MachineMemOperand *, 2>
5367 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
5368 SmallVector<MachineMemOperand *, 2> LoadMMOs;
5370 for (MachineMemOperand *MMO : MMOs) {
5374 if (!MMO->isStore()) {
5376 LoadMMOs.push_back(MMO);
5378 // Clone the MMO and unset the store flag.
5379 LoadMMOs.push_back(MF.getMachineMemOperand(
5380 MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
5387 static SmallVector<MachineMemOperand *, 2>
5388 extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
5389 SmallVector<MachineMemOperand *, 2> StoreMMOs;
5391 for (MachineMemOperand *MMO : MMOs) {
5392 if (!MMO->isStore())
5395 if (!MMO->isLoad()) {
5397 StoreMMOs.push_back(MMO);
5399 // Clone the MMO and unset the load flag.
5400 StoreMMOs.push_back(MF.getMachineMemOperand(
5401 MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
5408 static unsigned getBroadcastOpcode(const X86MemoryFoldTableEntry *I,
5409 const TargetRegisterClass *RC,
5410 const X86Subtarget &STI) {
5411 assert(STI.hasAVX512() && "Expected at least AVX512!");
5412 unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
5413 assert((SpillSize == 64 || STI.hasVLX()) &&
5414 "Can't broadcast less than 64 bytes without AVX512VL!");
5416 switch (I->Flags & TB_BCAST_MASK) {
5417 default: llvm_unreachable("Unexpected broadcast type!");
5419 switch (SpillSize) {
5420 default: llvm_unreachable("Unknown spill size");
5421 case 16: return X86::VPBROADCASTDZ128m;
5422 case 32: return X86::VPBROADCASTDZ256m;
5423 case 64: return X86::VPBROADCASTDZm;
5427 switch (SpillSize) {
5428 default: llvm_unreachable("Unknown spill size");
5429 case 16: return X86::VPBROADCASTQZ128m;
5430 case 32: return X86::VPBROADCASTQZ256m;
5431 case 64: return X86::VPBROADCASTQZm;
5435 switch (SpillSize) {
5436 default: llvm_unreachable("Unknown spill size");
5437 case 16: return X86::VBROADCASTSSZ128m;
5438 case 32: return X86::VBROADCASTSSZ256m;
5439 case 64: return X86::VBROADCASTSSZm;
5443 switch (SpillSize) {
5444 default: llvm_unreachable("Unknown spill size");
5445 case 16: return X86::VMOVDDUPZ128rm;
5446 case 32: return X86::VBROADCASTSDZ256m;
5447 case 64: return X86::VBROADCASTSDZm;
5453 bool X86InstrInfo::unfoldMemoryOperand(
5454 MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
5455 bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
5456 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
5459 unsigned Opc = I->DstOp;
5460 unsigned Index = I->Flags & TB_INDEX_MASK;
5461 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
5462 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
5463 bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
5464 if (UnfoldLoad && !FoldedLoad)
5466 UnfoldLoad &= FoldedLoad;
5467 if (UnfoldStore && !FoldedStore)
5469 UnfoldStore &= FoldedStore;
5471 const MCInstrDesc &MCID = get(Opc);
5473 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
5474 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5475 // TODO: Check if 32-byte or greater accesses are slow too?
5476 if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
5477 Subtarget.isUnalignedMem16Slow())
5478 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
5479 // conservatively assume the address is unaligned. That's bad for
5482 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
5483 SmallVector<MachineOperand,2> BeforeOps;
5484 SmallVector<MachineOperand,2> AfterOps;
5485 SmallVector<MachineOperand,4> ImpOps;
5486 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
5487 MachineOperand &Op = MI.getOperand(i);
5488 if (i >= Index && i < Index + X86::AddrNumOperands)
5489 AddrOps.push_back(Op);
5490 else if (Op.isReg() && Op.isImplicit())
5491 ImpOps.push_back(Op);
5493 BeforeOps.push_back(Op);
5495 AfterOps.push_back(Op);
5498 // Emit the load or broadcast instruction.
5500 auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
5504 Opc = getBroadcastOpcode(I, RC, Subtarget);
5506 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
5507 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
5508 Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
5512 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
5513 for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
5514 MIB.add(AddrOps[i]);
5515 MIB.setMemRefs(MMOs);
5516 NewMIs.push_back(MIB);
5519 // Address operands cannot be marked isKill.
5520 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
5521 MachineOperand &MO = NewMIs[0]->getOperand(i);
5523 MO.setIsKill(false);
5528 // Emit the data processing instruction.
5529 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
5530 MachineInstrBuilder MIB(MF, DataMI);
5533 MIB.addReg(Reg, RegState::Define);
5534 for (MachineOperand &BeforeOp : BeforeOps)
5538 for (MachineOperand &AfterOp : AfterOps)
5540 for (MachineOperand &ImpOp : ImpOps) {
5541 MIB.addReg(ImpOp.getReg(),
5542 getDefRegState(ImpOp.isDef()) |
5543 RegState::Implicit |
5544 getKillRegState(ImpOp.isKill()) |
5545 getDeadRegState(ImpOp.isDead()) |
5546 getUndefRegState(ImpOp.isUndef()));
5548 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
5549 switch (DataMI->getOpcode()) {
5551 case X86::CMP64ri32:
5558 MachineOperand &MO0 = DataMI->getOperand(0);
5559 MachineOperand &MO1 = DataMI->getOperand(1);
5560 if (MO1.getImm() == 0) {
5562 switch (DataMI->getOpcode()) {
5563 default: llvm_unreachable("Unreachable!");
5565 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
5567 case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
5569 case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
5570 case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
5572 DataMI->setDesc(get(NewOpc));
5573 MO1.ChangeToRegister(MO0.getReg(), false);
5577 NewMIs.push_back(DataMI);
5579 // Emit the store instruction.
5581 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
5582 auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
5583 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
5584 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
5585 unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
5587 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
5588 for (unsigned i = 0, e = AddrOps.size(); i != e; ++i)
5589 MIB.add(AddrOps[i]);
5590 MIB.addReg(Reg, RegState::Kill);
5591 MIB.setMemRefs(MMOs);
5592 NewMIs.push_back(MIB);
5599 X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
5600 SmallVectorImpl<SDNode*> &NewNodes) const {
5601 if (!N->isMachineOpcode())
5604 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
5607 unsigned Opc = I->DstOp;
5608 unsigned Index = I->Flags & TB_INDEX_MASK;
5609 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
5610 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
5611 bool FoldedBCast = I->Flags & TB_FOLDED_BCAST;
5612 const MCInstrDesc &MCID = get(Opc);
5613 MachineFunction &MF = DAG.getMachineFunction();
5614 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
5615 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
5616 unsigned NumDefs = MCID.NumDefs;
5617 std::vector<SDValue> AddrOps;
5618 std::vector<SDValue> BeforeOps;
5619 std::vector<SDValue> AfterOps;
5621 unsigned NumOps = N->getNumOperands();
5622 for (unsigned i = 0; i != NumOps-1; ++i) {
5623 SDValue Op = N->getOperand(i);
5624 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
5625 AddrOps.push_back(Op);
5626 else if (i < Index-NumDefs)
5627 BeforeOps.push_back(Op);
5628 else if (i > Index-NumDefs)
5629 AfterOps.push_back(Op);
5631 SDValue Chain = N->getOperand(NumOps-1);
5632 AddrOps.push_back(Chain);
5634 // Emit the load instruction.
5635 SDNode *Load = nullptr;
5637 EVT VT = *TRI.legalclasstypes_begin(*RC);
5638 auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
5639 if (MMOs.empty() && RC == &X86::VR128RegClass &&
5640 Subtarget.isUnalignedMem16Slow())
5641 // Do not introduce a slow unaligned load.
5643 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
5644 // memory access is slow above.
5648 Opc = getBroadcastOpcode(I, RC, Subtarget);
5650 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
5651 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
5652 Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
5655 Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
5656 NewNodes.push_back(Load);
5658 // Preserve memory reference information.
5659 DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
5662 // Emit the data processing instruction.
5663 std::vector<EVT> VTs;
5664 const TargetRegisterClass *DstRC = nullptr;
5665 if (MCID.getNumDefs() > 0) {
5666 DstRC = getRegClass(MCID, 0, &RI, MF);
5667 VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
5669 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
5670 EVT VT = N->getValueType(i);
5671 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
5675 BeforeOps.push_back(SDValue(Load, 0));
5676 BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end());
5677 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
5680 case X86::CMP64ri32:
5687 if (isNullConstant(BeforeOps[1])) {
5689 default: llvm_unreachable("Unreachable!");
5691 case X86::CMP64ri32: Opc = X86::TEST64rr; break;
5693 case X86::CMP32ri: Opc = X86::TEST32rr; break;
5695 case X86::CMP16ri: Opc = X86::TEST16rr; break;
5696 case X86::CMP8ri: Opc = X86::TEST8rr; break;
5698 BeforeOps[1] = BeforeOps[0];
5701 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
5702 NewNodes.push_back(NewNode);
5704 // Emit the store instruction.
5707 AddrOps.push_back(SDValue(NewNode, 0));
5708 AddrOps.push_back(Chain);
5709 auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
5710 if (MMOs.empty() && RC == &X86::VR128RegClass &&
5711 Subtarget.isUnalignedMem16Slow())
5712 // Do not introduce a slow unaligned store.
5714 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
5715 // memory access is slow above.
5716 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
5717 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
5719 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
5720 dl, MVT::Other, AddrOps);
5721 NewNodes.push_back(Store);
5723 // Preserve memory reference information.
5724 DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
5730 unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
5731 bool UnfoldLoad, bool UnfoldStore,
5732 unsigned *LoadRegIndex) const {
5733 const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
5736 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
5737 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
5738 if (UnfoldLoad && !FoldedLoad)
5740 if (UnfoldStore && !FoldedStore)
5743 *LoadRegIndex = I->Flags & TB_INDEX_MASK;
5748 X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
5749 int64_t &Offset1, int64_t &Offset2) const {
5750 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
5752 unsigned Opc1 = Load1->getMachineOpcode();
5753 unsigned Opc2 = Load2->getMachineOpcode();
5755 default: return false;
5764 case X86::MOVSSrm_alt:
5766 case X86::MOVSDrm_alt:
5767 case X86::MMX_MOVD64rm:
5768 case X86::MMX_MOVQ64rm:
5775 // AVX load instructions
5777 case X86::VMOVSSrm_alt:
5779 case X86::VMOVSDrm_alt:
5780 case X86::VMOVAPSrm:
5781 case X86::VMOVUPSrm:
5782 case X86::VMOVAPDrm:
5783 case X86::VMOVUPDrm:
5784 case X86::VMOVDQArm:
5785 case X86::VMOVDQUrm:
5786 case X86::VMOVAPSYrm:
5787 case X86::VMOVUPSYrm:
5788 case X86::VMOVAPDYrm:
5789 case X86::VMOVUPDYrm:
5790 case X86::VMOVDQAYrm:
5791 case X86::VMOVDQUYrm:
5792 // AVX512 load instructions
5793 case X86::VMOVSSZrm:
5794 case X86::VMOVSSZrm_alt:
5795 case X86::VMOVSDZrm:
5796 case X86::VMOVSDZrm_alt:
5797 case X86::VMOVAPSZ128rm:
5798 case X86::VMOVUPSZ128rm:
5799 case X86::VMOVAPSZ128rm_NOVLX:
5800 case X86::VMOVUPSZ128rm_NOVLX:
5801 case X86::VMOVAPDZ128rm:
5802 case X86::VMOVUPDZ128rm:
5803 case X86::VMOVDQU8Z128rm:
5804 case X86::VMOVDQU16Z128rm:
5805 case X86::VMOVDQA32Z128rm:
5806 case X86::VMOVDQU32Z128rm:
5807 case X86::VMOVDQA64Z128rm:
5808 case X86::VMOVDQU64Z128rm:
5809 case X86::VMOVAPSZ256rm:
5810 case X86::VMOVUPSZ256rm:
5811 case X86::VMOVAPSZ256rm_NOVLX:
5812 case X86::VMOVUPSZ256rm_NOVLX:
5813 case X86::VMOVAPDZ256rm:
5814 case X86::VMOVUPDZ256rm:
5815 case X86::VMOVDQU8Z256rm:
5816 case X86::VMOVDQU16Z256rm:
5817 case X86::VMOVDQA32Z256rm:
5818 case X86::VMOVDQU32Z256rm:
5819 case X86::VMOVDQA64Z256rm:
5820 case X86::VMOVDQU64Z256rm:
5821 case X86::VMOVAPSZrm:
5822 case X86::VMOVUPSZrm:
5823 case X86::VMOVAPDZrm:
5824 case X86::VMOVUPDZrm:
5825 case X86::VMOVDQU8Zrm:
5826 case X86::VMOVDQU16Zrm:
5827 case X86::VMOVDQA32Zrm:
5828 case X86::VMOVDQU32Zrm:
5829 case X86::VMOVDQA64Zrm:
5830 case X86::VMOVDQU64Zrm:
5838 default: return false;
5847 case X86::MOVSSrm_alt:
5849 case X86::MOVSDrm_alt:
5850 case X86::MMX_MOVD64rm:
5851 case X86::MMX_MOVQ64rm:
5858 // AVX load instructions
5860 case X86::VMOVSSrm_alt:
5862 case X86::VMOVSDrm_alt:
5863 case X86::VMOVAPSrm:
5864 case X86::VMOVUPSrm:
5865 case X86::VMOVAPDrm:
5866 case X86::VMOVUPDrm:
5867 case X86::VMOVDQArm:
5868 case X86::VMOVDQUrm:
5869 case X86::VMOVAPSYrm:
5870 case X86::VMOVUPSYrm:
5871 case X86::VMOVAPDYrm:
5872 case X86::VMOVUPDYrm:
5873 case X86::VMOVDQAYrm:
5874 case X86::VMOVDQUYrm:
5875 // AVX512 load instructions
5876 case X86::VMOVSSZrm:
5877 case X86::VMOVSSZrm_alt:
5878 case X86::VMOVSDZrm:
5879 case X86::VMOVSDZrm_alt:
5880 case X86::VMOVAPSZ128rm:
5881 case X86::VMOVUPSZ128rm:
5882 case X86::VMOVAPSZ128rm_NOVLX:
5883 case X86::VMOVUPSZ128rm_NOVLX:
5884 case X86::VMOVAPDZ128rm:
5885 case X86::VMOVUPDZ128rm:
5886 case X86::VMOVDQU8Z128rm:
5887 case X86::VMOVDQU16Z128rm:
5888 case X86::VMOVDQA32Z128rm:
5889 case X86::VMOVDQU32Z128rm:
5890 case X86::VMOVDQA64Z128rm:
5891 case X86::VMOVDQU64Z128rm:
5892 case X86::VMOVAPSZ256rm:
5893 case X86::VMOVUPSZ256rm:
5894 case X86::VMOVAPSZ256rm_NOVLX:
5895 case X86::VMOVUPSZ256rm_NOVLX:
5896 case X86::VMOVAPDZ256rm:
5897 case X86::VMOVUPDZ256rm:
5898 case X86::VMOVDQU8Z256rm:
5899 case X86::VMOVDQU16Z256rm:
5900 case X86::VMOVDQA32Z256rm:
5901 case X86::VMOVDQU32Z256rm:
5902 case X86::VMOVDQA64Z256rm:
5903 case X86::VMOVDQU64Z256rm:
5904 case X86::VMOVAPSZrm:
5905 case X86::VMOVUPSZrm:
5906 case X86::VMOVAPDZrm:
5907 case X86::VMOVUPDZrm:
5908 case X86::VMOVDQU8Zrm:
5909 case X86::VMOVDQU16Zrm:
5910 case X86::VMOVDQA32Zrm:
5911 case X86::VMOVDQU32Zrm:
5912 case X86::VMOVDQA64Zrm:
5913 case X86::VMOVDQU64Zrm:
5921 // Lambda to check if both the loads have the same value for an operand index.
5922 auto HasSameOp = [&](int I) {
5923 return Load1->getOperand(I) == Load2->getOperand(I);
5926 // All operands except the displacement should match.
5927 if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
5928 !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
5931 // Chain Operand must be the same.
5935 // Now let's examine if the displacements are constants.
5936 auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
5937 auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
5938 if (!Disp1 || !Disp2)
5941 Offset1 = Disp1->getSExtValue();
5942 Offset2 = Disp2->getSExtValue();
5946 bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
5947 int64_t Offset1, int64_t Offset2,
5948 unsigned NumLoads) const {
5949 assert(Offset2 > Offset1);
5950 if ((Offset2 - Offset1) / 8 > 64)
5953 unsigned Opc1 = Load1->getMachineOpcode();
5954 unsigned Opc2 = Load2->getMachineOpcode();
5956 return false; // FIXME: overly conservative?
5963 case X86::MMX_MOVD64rm:
5964 case X86::MMX_MOVQ64rm:
5968 EVT VT = Load1->getValueType(0);
5969 switch (VT.getSimpleVT().SimpleTy) {
5971 // XMM registers. In 64-bit mode we can be a bit more aggressive since we
5972 // have 16 of them to play with.
5973 if (Subtarget.is64Bit()) {
5976 } else if (NumLoads) {
5995 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
5996 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
5997 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
5998 Cond[0].setImm(GetOppositeBranchCondition(CC));
6003 isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
6004 // FIXME: Return false for x87 stack register classes for now. We can't
6005 // allow any loads of these registers before FpGet_ST0_80.
6006 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
6007 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
6008 RC == &X86::RFP80RegClass);
6011 /// Return a virtual register initialized with the
6012 /// the global base register value. Output instructions required to
6013 /// initialize the register in the function entry block, if necessary.
6015 /// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
6017 unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
6018 assert((!Subtarget.is64Bit() ||
6019 MF->getTarget().getCodeModel() == CodeModel::Medium ||
6020 MF->getTarget().getCodeModel() == CodeModel::Large) &&
6021 "X86-64 PIC uses RIP relative addressing");
6023 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
6024 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
6025 if (GlobalBaseReg != 0)
6026 return GlobalBaseReg;
6028 // Create the register. The code to initialize it is inserted
6029 // later, by the CGBR pass (below).
6030 MachineRegisterInfo &RegInfo = MF->getRegInfo();
6031 GlobalBaseReg = RegInfo.createVirtualRegister(
6032 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
6033 X86FI->setGlobalBaseReg(GlobalBaseReg);
6034 return GlobalBaseReg;
6037 // These are the replaceable SSE instructions. Some of these have Int variants
6038 // that we don't include here. We don't want to replace instructions selected
6040 static const uint16_t ReplaceableInstrs[][3] = {
6041 //PackedSingle PackedDouble PackedInt
6042 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
6043 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
6044 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
6045 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
6046 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
6047 { X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
6048 { X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr },
6049 { X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr },
6050 { X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm },
6051 { X86::MOVSDrm_alt,X86::MOVSDrm_alt,X86::MOVQI2PQIrm },
6052 { X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm },
6053 { X86::MOVSSrm_alt,X86::MOVSSrm_alt,X86::MOVDI2PDIrm },
6054 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
6055 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
6056 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
6057 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
6058 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
6059 { X86::ORPSrm, X86::ORPDrm, X86::PORrm },
6060 { X86::ORPSrr, X86::ORPDrr, X86::PORrr },
6061 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
6062 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
6063 { X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
6064 { X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
6065 { X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
6066 { X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
6067 { X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
6068 { X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
6069 { X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
6070 { X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
6071 { X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
6072 { X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
6073 // AVX 128-bit support
6074 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
6075 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
6076 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
6077 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
6078 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
6079 { X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr },
6080 { X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr },
6081 { X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr },
6082 { X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm },
6083 { X86::VMOVSDrm_alt,X86::VMOVSDrm_alt,X86::VMOVQI2PQIrm },
6084 { X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm },
6085 { X86::VMOVSSrm_alt,X86::VMOVSSrm_alt,X86::VMOVDI2PDIrm },
6086 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
6087 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
6088 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
6089 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
6090 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
6091 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
6092 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
6093 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
6094 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
6095 { X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
6096 { X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
6097 { X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
6098 { X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
6099 { X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
6100 { X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
6101 { X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
6102 { X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
6103 { X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
6104 { X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
6105 // AVX 256-bit support
6106 { X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr },
6107 { X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm },
6108 { X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr },
6109 { X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr },
6110 { X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm },
6111 { X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr },
6112 { X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm },
6113 { X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr },
6114 { X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi },
6115 { X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri },
6117 { X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr },
6118 { X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
6119 { X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
6120 { X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr },
6121 { X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr },
6122 { X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr },
6123 { X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm },
6124 { X86::VMOVSDZrm_alt, X86::VMOVSDZrm_alt, X86::VMOVQI2PQIZrm },
6125 { X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm },
6126 { X86::VMOVSSZrm_alt, X86::VMOVSSZrm_alt, X86::VMOVDI2PDIZrm },
6127 { X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128r, X86::VPBROADCASTDZ128r },
6128 { X86::VBROADCASTSSZ128m, X86::VBROADCASTSSZ128m, X86::VPBROADCASTDZ128m },
6129 { X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256r, X86::VPBROADCASTDZ256r },
6130 { X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m },
6131 { X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr },
6132 { X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm },
6133 { X86::VMOVDDUPZ128rr, X86::VMOVDDUPZ128rr, X86::VPBROADCASTQZ128r },
6134 { X86::VMOVDDUPZ128rm, X86::VMOVDDUPZ128rm, X86::VPBROADCASTQZ128m },
6135 { X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r },
6136 { X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m },
6137 { X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr },
6138 { X86::VBROADCASTSDZm, X86::VBROADCASTSDZm, X86::VPBROADCASTQZm },
6139 { X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr },
6140 { X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm },
6141 { X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr },
6142 { X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm },
6143 { X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr },
6144 { X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm },
6145 { X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr },
6146 { X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm },
6147 { X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
6148 { X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
6149 { X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
6150 { X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
6151 { X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr },
6152 { X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr },
6153 { X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr },
6154 { X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr },
6155 { X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr },
6156 { X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr },
6157 { X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr },
6158 { X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr },
6159 { X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
6160 { X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
6161 { X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
6162 { X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
6163 { X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi },
6164 { X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri },
6165 { X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi },
6166 { X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri },
6167 { X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi },
6168 { X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri },
6169 { X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi },
6170 { X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri },
6171 { X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm },
6172 { X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr },
6173 { X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi },
6174 { X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri },
6175 { X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm },
6176 { X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr },
6177 { X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm },
6178 { X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr },
6179 { X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi },
6180 { X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri },
6181 { X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm },
6182 { X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr },
6183 { X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm },
6184 { X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr },
6185 { X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm },
6186 { X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr },
6187 { X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm },
6188 { X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr },
6189 { X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm },
6190 { X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr },
6191 { X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm },
6192 { X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr },
6193 { X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm },
6194 { X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr },
6195 { X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm },
6196 { X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr },
6197 { X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm },
6198 { X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr },
6199 { X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm },
6200 { X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr },
6201 { X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm },
6202 { X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr },
6203 { X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm },
6204 { X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr },
6205 { X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm },
6206 { X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr },
6207 { X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr },
6208 { X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr },
6211 static const uint16_t ReplaceableInstrsAVX2[][3] = {
6212 //PackedSingle PackedDouble PackedInt
6213 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm },
6214 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr },
6215 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm },
6216 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr },
6217 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm },
6218 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr },
6219 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm },
6220 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr },
6221 { X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
6222 { X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
6223 { X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
6224 { X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
6225 { X86::VMOVDDUPrm, X86::VMOVDDUPrm, X86::VPBROADCASTQrm},
6226 { X86::VMOVDDUPrr, X86::VMOVDDUPrr, X86::VPBROADCASTQrr},
6227 { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
6228 { X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
6229 { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
6230 { X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
6231 { X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 },
6232 { X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri },
6233 { X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi },
6234 { X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi },
6235 { X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri },
6236 { X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm },
6237 { X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr },
6238 { X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm },
6239 { X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr },
6240 { X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm },
6241 { X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr },
6242 { X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm },
6243 { X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr },
6246 static const uint16_t ReplaceableInstrsFP[][3] = {
6247 //PackedSingle PackedDouble
6248 { X86::MOVLPSrm, X86::MOVLPDrm, X86::INSTRUCTION_LIST_END },
6249 { X86::MOVHPSrm, X86::MOVHPDrm, X86::INSTRUCTION_LIST_END },
6250 { X86::MOVHPSmr, X86::MOVHPDmr, X86::INSTRUCTION_LIST_END },
6251 { X86::VMOVLPSrm, X86::VMOVLPDrm, X86::INSTRUCTION_LIST_END },
6252 { X86::VMOVHPSrm, X86::VMOVHPDrm, X86::INSTRUCTION_LIST_END },
6253 { X86::VMOVHPSmr, X86::VMOVHPDmr, X86::INSTRUCTION_LIST_END },
6254 { X86::VMOVLPSZ128rm, X86::VMOVLPDZ128rm, X86::INSTRUCTION_LIST_END },
6255 { X86::VMOVHPSZ128rm, X86::VMOVHPDZ128rm, X86::INSTRUCTION_LIST_END },
6256 { X86::VMOVHPSZ128mr, X86::VMOVHPDZ128mr, X86::INSTRUCTION_LIST_END },
6259 static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
6260 //PackedSingle PackedDouble PackedInt
6261 { X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
6262 { X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
6263 { X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
6264 { X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
6267 static const uint16_t ReplaceableInstrsAVX512[][4] = {
6268 // Two integer columns for 64-bit and 32-bit elements.
6269 //PackedSingle PackedDouble PackedInt PackedInt
6270 { X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr },
6271 { X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm },
6272 { X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr },
6273 { X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr },
6274 { X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm },
6275 { X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr },
6276 { X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm },
6277 { X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr },
6278 { X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr },
6279 { X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm },
6280 { X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr },
6281 { X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm },
6282 { X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr },
6283 { X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr },
6284 { X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm },
6287 static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
6288 // Two integer columns for 64-bit and 32-bit elements.
6289 //PackedSingle PackedDouble PackedInt PackedInt
6290 { X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
6291 { X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
6292 { X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
6293 { X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
6294 { X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm },
6295 { X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr },
6296 { X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
6297 { X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
6298 { X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
6299 { X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
6300 { X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
6301 { X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
6302 { X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm },
6303 { X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr },
6304 { X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
6305 { X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
6306 { X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm },
6307 { X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr },
6308 { X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm },
6309 { X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr },
6310 { X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm },
6311 { X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr },
6312 { X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm },
6313 { X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr },
6316 static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
6317 // Two integer columns for 64-bit and 32-bit elements.
6318 //PackedSingle PackedDouble
6319 //PackedInt PackedInt
6320 { X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk,
6321 X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk },
6322 { X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
6323 X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
6324 { X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk,
6325 X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk },
6326 { X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
6327 X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
6328 { X86::VANDPSZ128rmk, X86::VANDPDZ128rmk,
6329 X86::VPANDQZ128rmk, X86::VPANDDZ128rmk },
6330 { X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz,
6331 X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz },
6332 { X86::VANDPSZ128rrk, X86::VANDPDZ128rrk,
6333 X86::VPANDQZ128rrk, X86::VPANDDZ128rrk },
6334 { X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz,
6335 X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz },
6336 { X86::VORPSZ128rmk, X86::VORPDZ128rmk,
6337 X86::VPORQZ128rmk, X86::VPORDZ128rmk },
6338 { X86::VORPSZ128rmkz, X86::VORPDZ128rmkz,
6339 X86::VPORQZ128rmkz, X86::VPORDZ128rmkz },
6340 { X86::VORPSZ128rrk, X86::VORPDZ128rrk,
6341 X86::VPORQZ128rrk, X86::VPORDZ128rrk },
6342 { X86::VORPSZ128rrkz, X86::VORPDZ128rrkz,
6343 X86::VPORQZ128rrkz, X86::VPORDZ128rrkz },
6344 { X86::VXORPSZ128rmk, X86::VXORPDZ128rmk,
6345 X86::VPXORQZ128rmk, X86::VPXORDZ128rmk },
6346 { X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz,
6347 X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz },
6348 { X86::VXORPSZ128rrk, X86::VXORPDZ128rrk,
6349 X86::VPXORQZ128rrk, X86::VPXORDZ128rrk },
6350 { X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz,
6351 X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz },
6352 { X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk,
6353 X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk },
6354 { X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
6355 X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
6356 { X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk,
6357 X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk },
6358 { X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
6359 X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
6360 { X86::VANDPSZ256rmk, X86::VANDPDZ256rmk,
6361 X86::VPANDQZ256rmk, X86::VPANDDZ256rmk },
6362 { X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz,
6363 X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz },
6364 { X86::VANDPSZ256rrk, X86::VANDPDZ256rrk,
6365 X86::VPANDQZ256rrk, X86::VPANDDZ256rrk },
6366 { X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz,
6367 X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz },
6368 { X86::VORPSZ256rmk, X86::VORPDZ256rmk,
6369 X86::VPORQZ256rmk, X86::VPORDZ256rmk },
6370 { X86::VORPSZ256rmkz, X86::VORPDZ256rmkz,
6371 X86::VPORQZ256rmkz, X86::VPORDZ256rmkz },
6372 { X86::VORPSZ256rrk, X86::VORPDZ256rrk,
6373 X86::VPORQZ256rrk, X86::VPORDZ256rrk },
6374 { X86::VORPSZ256rrkz, X86::VORPDZ256rrkz,
6375 X86::VPORQZ256rrkz, X86::VPORDZ256rrkz },
6376 { X86::VXORPSZ256rmk, X86::VXORPDZ256rmk,
6377 X86::VPXORQZ256rmk, X86::VPXORDZ256rmk },
6378 { X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz,
6379 X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz },
6380 { X86::VXORPSZ256rrk, X86::VXORPDZ256rrk,
6381 X86::VPXORQZ256rrk, X86::VPXORDZ256rrk },
6382 { X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz,
6383 X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz },
6384 { X86::VANDNPSZrmk, X86::VANDNPDZrmk,
6385 X86::VPANDNQZrmk, X86::VPANDNDZrmk },
6386 { X86::VANDNPSZrmkz, X86::VANDNPDZrmkz,
6387 X86::VPANDNQZrmkz, X86::VPANDNDZrmkz },
6388 { X86::VANDNPSZrrk, X86::VANDNPDZrrk,
6389 X86::VPANDNQZrrk, X86::VPANDNDZrrk },
6390 { X86::VANDNPSZrrkz, X86::VANDNPDZrrkz,
6391 X86::VPANDNQZrrkz, X86::VPANDNDZrrkz },
6392 { X86::VANDPSZrmk, X86::VANDPDZrmk,
6393 X86::VPANDQZrmk, X86::VPANDDZrmk },
6394 { X86::VANDPSZrmkz, X86::VANDPDZrmkz,
6395 X86::VPANDQZrmkz, X86::VPANDDZrmkz },
6396 { X86::VANDPSZrrk, X86::VANDPDZrrk,
6397 X86::VPANDQZrrk, X86::VPANDDZrrk },
6398 { X86::VANDPSZrrkz, X86::VANDPDZrrkz,
6399 X86::VPANDQZrrkz, X86::VPANDDZrrkz },
6400 { X86::VORPSZrmk, X86::VORPDZrmk,
6401 X86::VPORQZrmk, X86::VPORDZrmk },
6402 { X86::VORPSZrmkz, X86::VORPDZrmkz,
6403 X86::VPORQZrmkz, X86::VPORDZrmkz },
6404 { X86::VORPSZrrk, X86::VORPDZrrk,
6405 X86::VPORQZrrk, X86::VPORDZrrk },
6406 { X86::VORPSZrrkz, X86::VORPDZrrkz,
6407 X86::VPORQZrrkz, X86::VPORDZrrkz },
6408 { X86::VXORPSZrmk, X86::VXORPDZrmk,
6409 X86::VPXORQZrmk, X86::VPXORDZrmk },
6410 { X86::VXORPSZrmkz, X86::VXORPDZrmkz,
6411 X86::VPXORQZrmkz, X86::VPXORDZrmkz },
6412 { X86::VXORPSZrrk, X86::VXORPDZrrk,
6413 X86::VPXORQZrrk, X86::VPXORDZrrk },
6414 { X86::VXORPSZrrkz, X86::VXORPDZrrkz,
6415 X86::VPXORQZrrkz, X86::VPXORDZrrkz },
6416 // Broadcast loads can be handled the same as masked operations to avoid
6417 // changing element size.
6418 { X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb,
6419 X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb },
6420 { X86::VANDPSZ128rmb, X86::VANDPDZ128rmb,
6421 X86::VPANDQZ128rmb, X86::VPANDDZ128rmb },
6422 { X86::VORPSZ128rmb, X86::VORPDZ128rmb,
6423 X86::VPORQZ128rmb, X86::VPORDZ128rmb },
6424 { X86::VXORPSZ128rmb, X86::VXORPDZ128rmb,
6425 X86::VPXORQZ128rmb, X86::VPXORDZ128rmb },
6426 { X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb,
6427 X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb },
6428 { X86::VANDPSZ256rmb, X86::VANDPDZ256rmb,
6429 X86::VPANDQZ256rmb, X86::VPANDDZ256rmb },
6430 { X86::VORPSZ256rmb, X86::VORPDZ256rmb,
6431 X86::VPORQZ256rmb, X86::VPORDZ256rmb },
6432 { X86::VXORPSZ256rmb, X86::VXORPDZ256rmb,
6433 X86::VPXORQZ256rmb, X86::VPXORDZ256rmb },
6434 { X86::VANDNPSZrmb, X86::VANDNPDZrmb,
6435 X86::VPANDNQZrmb, X86::VPANDNDZrmb },
6436 { X86::VANDPSZrmb, X86::VANDPDZrmb,
6437 X86::VPANDQZrmb, X86::VPANDDZrmb },
6438 { X86::VANDPSZrmb, X86::VANDPDZrmb,
6439 X86::VPANDQZrmb, X86::VPANDDZrmb },
6440 { X86::VORPSZrmb, X86::VORPDZrmb,
6441 X86::VPORQZrmb, X86::VPORDZrmb },
6442 { X86::VXORPSZrmb, X86::VXORPDZrmb,
6443 X86::VPXORQZrmb, X86::VPXORDZrmb },
6444 { X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
6445 X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
6446 { X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk,
6447 X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk },
6448 { X86::VORPSZ128rmbk, X86::VORPDZ128rmbk,
6449 X86::VPORQZ128rmbk, X86::VPORDZ128rmbk },
6450 { X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk,
6451 X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk },
6452 { X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
6453 X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
6454 { X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk,
6455 X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk },
6456 { X86::VORPSZ256rmbk, X86::VORPDZ256rmbk,
6457 X86::VPORQZ256rmbk, X86::VPORDZ256rmbk },
6458 { X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk,
6459 X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk },
6460 { X86::VANDNPSZrmbk, X86::VANDNPDZrmbk,
6461 X86::VPANDNQZrmbk, X86::VPANDNDZrmbk },
6462 { X86::VANDPSZrmbk, X86::VANDPDZrmbk,
6463 X86::VPANDQZrmbk, X86::VPANDDZrmbk },
6464 { X86::VANDPSZrmbk, X86::VANDPDZrmbk,
6465 X86::VPANDQZrmbk, X86::VPANDDZrmbk },
6466 { X86::VORPSZrmbk, X86::VORPDZrmbk,
6467 X86::VPORQZrmbk, X86::VPORDZrmbk },
6468 { X86::VXORPSZrmbk, X86::VXORPDZrmbk,
6469 X86::VPXORQZrmbk, X86::VPXORDZrmbk },
6470 { X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
6471 X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
6472 { X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
6473 X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
6474 { X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz,
6475 X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz },
6476 { X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
6477 X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
6478 { X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
6479 X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
6480 { X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
6481 X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
6482 { X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz,
6483 X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz },
6484 { X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
6485 X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
6486 { X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz,
6487 X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz },
6488 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
6489 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
6490 { X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
6491 X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
6492 { X86::VORPSZrmbkz, X86::VORPDZrmbkz,
6493 X86::VPORQZrmbkz, X86::VPORDZrmbkz },
6494 { X86::VXORPSZrmbkz, X86::VXORPDZrmbkz,
6495 X86::VPXORQZrmbkz, X86::VPXORDZrmbkz },
6498 // NOTE: These should only be used by the custom domain methods.
6499 static const uint16_t ReplaceableBlendInstrs[][3] = {
6500 //PackedSingle PackedDouble PackedInt
6501 { X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi },
6502 { X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri },
6503 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi },
6504 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri },
6505 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi },
6506 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri },
6508 static const uint16_t ReplaceableBlendAVX2Instrs[][3] = {
6509 //PackedSingle PackedDouble PackedInt
6510 { X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi },
6511 { X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri },
6512 { X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi },
6513 { X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri },
6516 // Special table for changing EVEX logic instructions to VEX.
6517 // TODO: Should we run EVEX->VEX earlier?
6518 static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
6519 // Two integer columns for 64-bit and 32-bit elements.
6520 //PackedSingle PackedDouble PackedInt PackedInt
6521 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
6522 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
6523 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
6524 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
6525 { X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm },
6526 { X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr },
6527 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
6528 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
6529 { X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
6530 { X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
6531 { X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
6532 { X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
6533 { X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm },
6534 { X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr },
6535 { X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
6536 { X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
6539 // FIXME: Some shuffle and unpack instructions have equivalents in different
6540 // domains, but they require a bit more work than just switching opcodes.
6542 static const uint16_t *lookup(unsigned opcode, unsigned domain,
6543 ArrayRef<uint16_t[3]> Table) {
6544 for (const uint16_t (&Row)[3] : Table)
6545 if (Row[domain-1] == opcode)
6550 static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
6551 ArrayRef<uint16_t[4]> Table) {
6552 // If this is the integer domain make sure to check both integer columns.
6553 for (const uint16_t (&Row)[4] : Table)
6554 if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
6559 // Helper to attempt to widen/narrow blend masks.
6560 static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
6561 unsigned NewWidth, unsigned *pNewMask = nullptr) {
6562 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
6563 "Illegal blend mask scale");
6564 unsigned NewMask = 0;
6566 if ((OldWidth % NewWidth) == 0) {
6567 unsigned Scale = OldWidth / NewWidth;
6568 unsigned SubMask = (1u << Scale) - 1;
6569 for (unsigned i = 0; i != NewWidth; ++i) {
6570 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
6572 NewMask |= (1u << i);
6573 else if (Sub != 0x0)
6577 unsigned Scale = NewWidth / OldWidth;
6578 unsigned SubMask = (1u << Scale) - 1;
6579 for (unsigned i = 0; i != OldWidth; ++i) {
6580 if (OldMask & (1 << i)) {
6581 NewMask |= (SubMask << (i * Scale));
6587 *pNewMask = NewMask;
6591 uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
6592 unsigned Opcode = MI.getOpcode();
6593 unsigned NumOperands = MI.getDesc().getNumOperands();
6595 auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
6596 uint16_t validDomains = 0;
6597 if (MI.getOperand(NumOperands - 1).isImm()) {
6598 unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
6599 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
6600 validDomains |= 0x2; // PackedSingle
6601 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
6602 validDomains |= 0x4; // PackedDouble
6603 if (!Is256 || Subtarget.hasAVX2())
6604 validDomains |= 0x8; // PackedInt
6606 return validDomains;
6610 case X86::BLENDPDrmi:
6611 case X86::BLENDPDrri:
6612 case X86::VBLENDPDrmi:
6613 case X86::VBLENDPDrri:
6614 return GetBlendDomains(2, false);
6615 case X86::VBLENDPDYrmi:
6616 case X86::VBLENDPDYrri:
6617 return GetBlendDomains(4, true);
6618 case X86::BLENDPSrmi:
6619 case X86::BLENDPSrri:
6620 case X86::VBLENDPSrmi:
6621 case X86::VBLENDPSrri:
6622 case X86::VPBLENDDrmi:
6623 case X86::VPBLENDDrri:
6624 return GetBlendDomains(4, false);
6625 case X86::VBLENDPSYrmi:
6626 case X86::VBLENDPSYrri:
6627 case X86::VPBLENDDYrmi:
6628 case X86::VPBLENDDYrri:
6629 return GetBlendDomains(8, true);
6630 case X86::PBLENDWrmi:
6631 case X86::PBLENDWrri:
6632 case X86::VPBLENDWrmi:
6633 case X86::VPBLENDWrri:
6634 // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
6635 case X86::VPBLENDWYrmi:
6636 case X86::VPBLENDWYrri:
6637 return GetBlendDomains(8, false);
6638 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
6639 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
6640 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
6641 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
6642 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
6643 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
6644 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
6645 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
6646 case X86::VPORDZ128rr: case X86::VPORDZ128rm:
6647 case X86::VPORDZ256rr: case X86::VPORDZ256rm:
6648 case X86::VPORQZ128rr: case X86::VPORQZ128rm:
6649 case X86::VPORQZ256rr: case X86::VPORQZ256rm:
6650 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
6651 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
6652 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
6653 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm:
6654 // If we don't have DQI see if we can still switch from an EVEX integer
6655 // instruction to a VEX floating point instruction.
6656 if (Subtarget.hasDQI())
6659 if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
6661 if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
6663 // Register forms will have 3 operands. Memory form will have more.
6664 if (NumOperands == 3 &&
6665 RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
6668 // All domains are valid.
6670 case X86::MOVHLPSrr:
6671 // We can swap domains when both inputs are the same register.
6672 // FIXME: This doesn't catch all the cases we would like. If the input
6673 // register isn't KILLed by the instruction, the two address instruction
6674 // pass puts a COPY on one input. The other input uses the original
6675 // register. This prevents the same physical register from being used by
6677 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
6678 MI.getOperand(0).getSubReg() == 0 &&
6679 MI.getOperand(1).getSubReg() == 0 &&
6680 MI.getOperand(2).getSubReg() == 0)
6683 case X86::SHUFPDrri:
6689 bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
6690 unsigned Domain) const {
6691 assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
6692 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
6693 assert(dom && "Not an SSE instruction");
6695 unsigned Opcode = MI.getOpcode();
6696 unsigned NumOperands = MI.getDesc().getNumOperands();
6698 auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
6699 if (MI.getOperand(NumOperands - 1).isImm()) {
6700 unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
6701 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
6702 unsigned NewImm = Imm;
6704 const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
6706 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
6708 if (Domain == 1) { // PackedSingle
6709 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
6710 } else if (Domain == 2) { // PackedDouble
6711 AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
6712 } else if (Domain == 3) { // PackedInt
6713 if (Subtarget.hasAVX2()) {
6714 // If we are already VPBLENDW use that, else use VPBLENDD.
6715 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
6716 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
6717 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
6720 assert(!Is256 && "128-bit vector expected");
6721 AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
6725 assert(table && table[Domain - 1] && "Unknown domain op");
6726 MI.setDesc(get(table[Domain - 1]));
6727 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
6733 case X86::BLENDPDrmi:
6734 case X86::BLENDPDrri:
6735 case X86::VBLENDPDrmi:
6736 case X86::VBLENDPDrri:
6737 return SetBlendDomain(2, false);
6738 case X86::VBLENDPDYrmi:
6739 case X86::VBLENDPDYrri:
6740 return SetBlendDomain(4, true);
6741 case X86::BLENDPSrmi:
6742 case X86::BLENDPSrri:
6743 case X86::VBLENDPSrmi:
6744 case X86::VBLENDPSrri:
6745 case X86::VPBLENDDrmi:
6746 case X86::VPBLENDDrri:
6747 return SetBlendDomain(4, false);
6748 case X86::VBLENDPSYrmi:
6749 case X86::VBLENDPSYrri:
6750 case X86::VPBLENDDYrmi:
6751 case X86::VPBLENDDYrri:
6752 return SetBlendDomain(8, true);
6753 case X86::PBLENDWrmi:
6754 case X86::PBLENDWrri:
6755 case X86::VPBLENDWrmi:
6756 case X86::VPBLENDWrri:
6757 return SetBlendDomain(8, false);
6758 case X86::VPBLENDWYrmi:
6759 case X86::VPBLENDWYrri:
6760 return SetBlendDomain(16, true);
6761 case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
6762 case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
6763 case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
6764 case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
6765 case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
6766 case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
6767 case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
6768 case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
6769 case X86::VPORDZ128rr: case X86::VPORDZ128rm:
6770 case X86::VPORDZ256rr: case X86::VPORDZ256rm:
6771 case X86::VPORQZ128rr: case X86::VPORQZ128rm:
6772 case X86::VPORQZ256rr: case X86::VPORQZ256rm:
6773 case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
6774 case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
6775 case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
6776 case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: {
6777 // Without DQI, convert EVEX instructions to VEX instructions.
6778 if (Subtarget.hasDQI())
6781 const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
6782 ReplaceableCustomAVX512LogicInstrs);
6783 assert(table && "Instruction not found in table?");
6784 // Don't change integer Q instructions to D instructions and
6785 // use D intructions if we started with a PS instruction.
6786 if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
6788 MI.setDesc(get(table[Domain - 1]));
6791 case X86::UNPCKHPDrr:
6792 case X86::MOVHLPSrr:
6793 // We just need to commute the instruction which will switch the domains.
6794 if (Domain != dom && Domain != 3 &&
6795 MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
6796 MI.getOperand(0).getSubReg() == 0 &&
6797 MI.getOperand(1).getSubReg() == 0 &&
6798 MI.getOperand(2).getSubReg() == 0) {
6799 commuteInstruction(MI, false);
6802 // We must always return true for MOVHLPSrr.
6803 if (Opcode == X86::MOVHLPSrr)
6806 case X86::SHUFPDrri: {
6808 unsigned Imm = MI.getOperand(3).getImm();
6809 unsigned NewImm = 0x44;
6810 if (Imm & 1) NewImm |= 0x0a;
6811 if (Imm & 2) NewImm |= 0xa0;
6812 MI.getOperand(3).setImm(NewImm);
6813 MI.setDesc(get(X86::SHUFPSrri));
6821 std::pair<uint16_t, uint16_t>
6822 X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
6823 uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
6824 unsigned opcode = MI.getOpcode();
6825 uint16_t validDomains = 0;
6827 // Attempt to match for custom instructions.
6828 validDomains = getExecutionDomainCustom(MI);
6830 return std::make_pair(domain, validDomains);
6832 if (lookup(opcode, domain, ReplaceableInstrs)) {
6834 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
6835 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
6836 } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
6838 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
6839 // Insert/extract instructions should only effect domain if AVX2
6841 if (!Subtarget.hasAVX2())
6842 return std::make_pair(0, 0);
6844 } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
6846 } else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
6847 ReplaceableInstrsAVX512DQ)) {
6849 } else if (Subtarget.hasDQI()) {
6850 if (const uint16_t *table = lookupAVX512(opcode, domain,
6851 ReplaceableInstrsAVX512DQMasked)) {
6852 if (domain == 1 || (domain == 3 && table[3] == opcode))
6859 return std::make_pair(domain, validDomains);
6862 void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
6863 assert(Domain>0 && Domain<4 && "Invalid execution domain");
6864 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
6865 assert(dom && "Not an SSE instruction");
6867 // Attempt to match for custom instructions.
6868 if (setExecutionDomainCustom(MI, Domain))
6871 const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
6872 if (!table) { // try the other table
6873 assert((Subtarget.hasAVX2() || Domain < 3) &&
6874 "256-bit vector operations only available in AVX2");
6875 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
6877 if (!table) { // try the FP table
6878 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
6879 assert((!table || Domain < 3) &&
6880 "Can only select PackedSingle or PackedDouble");
6882 if (!table) { // try the other table
6883 assert(Subtarget.hasAVX2() &&
6884 "256-bit insert/extract only available in AVX2");
6885 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
6887 if (!table) { // try the AVX512 table
6888 assert(Subtarget.hasAVX512() && "Requires AVX-512");
6889 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
6890 // Don't change integer Q instructions to D instructions.
6891 if (table && Domain == 3 && table[3] == MI.getOpcode())
6894 if (!table) { // try the AVX512DQ table
6895 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
6896 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
6897 // Don't change integer Q instructions to D instructions and
6898 // use D intructions if we started with a PS instruction.
6899 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
6902 if (!table) { // try the AVX512DQMasked table
6903 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
6904 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
6905 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
6908 assert(table && "Cannot change domain");
6909 MI.setDesc(get(table[Domain - 1]));
6912 /// Return the noop instruction to use for a noop.
6913 void X86InstrInfo::getNoop(MCInst &NopInst) const {
6914 NopInst.setOpcode(X86::NOOP);
6917 bool X86InstrInfo::isHighLatencyDef(int opc) const {
6919 default: return false;
6925 case X86::DIVSDrm_Int:
6927 case X86::DIVSDrr_Int:
6929 case X86::DIVSSrm_Int:
6931 case X86::DIVSSrr_Int:
6937 case X86::SQRTSDm_Int:
6939 case X86::SQRTSDr_Int:
6941 case X86::SQRTSSm_Int:
6943 case X86::SQRTSSr_Int:
6944 // AVX instructions with high latency
6947 case X86::VDIVPDYrm:
6948 case X86::VDIVPDYrr:
6951 case X86::VDIVPSYrm:
6952 case X86::VDIVPSYrr:
6954 case X86::VDIVSDrm_Int:
6956 case X86::VDIVSDrr_Int:
6958 case X86::VDIVSSrm_Int:
6960 case X86::VDIVSSrr_Int:
6963 case X86::VSQRTPDYm:
6964 case X86::VSQRTPDYr:
6967 case X86::VSQRTPSYm:
6968 case X86::VSQRTPSYr:
6970 case X86::VSQRTSDm_Int:
6972 case X86::VSQRTSDr_Int:
6974 case X86::VSQRTSSm_Int:
6976 case X86::VSQRTSSr_Int:
6977 // AVX512 instructions with high latency
6978 case X86::VDIVPDZ128rm:
6979 case X86::VDIVPDZ128rmb:
6980 case X86::VDIVPDZ128rmbk:
6981 case X86::VDIVPDZ128rmbkz:
6982 case X86::VDIVPDZ128rmk:
6983 case X86::VDIVPDZ128rmkz:
6984 case X86::VDIVPDZ128rr:
6985 case X86::VDIVPDZ128rrk:
6986 case X86::VDIVPDZ128rrkz:
6987 case X86::VDIVPDZ256rm:
6988 case X86::VDIVPDZ256rmb:
6989 case X86::VDIVPDZ256rmbk:
6990 case X86::VDIVPDZ256rmbkz:
6991 case X86::VDIVPDZ256rmk:
6992 case X86::VDIVPDZ256rmkz:
6993 case X86::VDIVPDZ256rr:
6994 case X86::VDIVPDZ256rrk:
6995 case X86::VDIVPDZ256rrkz:
6996 case X86::VDIVPDZrrb:
6997 case X86::VDIVPDZrrbk:
6998 case X86::VDIVPDZrrbkz:
6999 case X86::VDIVPDZrm:
7000 case X86::VDIVPDZrmb:
7001 case X86::VDIVPDZrmbk:
7002 case X86::VDIVPDZrmbkz:
7003 case X86::VDIVPDZrmk:
7004 case X86::VDIVPDZrmkz:
7005 case X86::VDIVPDZrr:
7006 case X86::VDIVPDZrrk:
7007 case X86::VDIVPDZrrkz:
7008 case X86::VDIVPSZ128rm:
7009 case X86::VDIVPSZ128rmb:
7010 case X86::VDIVPSZ128rmbk:
7011 case X86::VDIVPSZ128rmbkz:
7012 case X86::VDIVPSZ128rmk:
7013 case X86::VDIVPSZ128rmkz:
7014 case X86::VDIVPSZ128rr:
7015 case X86::VDIVPSZ128rrk:
7016 case X86::VDIVPSZ128rrkz:
7017 case X86::VDIVPSZ256rm:
7018 case X86::VDIVPSZ256rmb:
7019 case X86::VDIVPSZ256rmbk:
7020 case X86::VDIVPSZ256rmbkz:
7021 case X86::VDIVPSZ256rmk:
7022 case X86::VDIVPSZ256rmkz:
7023 case X86::VDIVPSZ256rr:
7024 case X86::VDIVPSZ256rrk:
7025 case X86::VDIVPSZ256rrkz:
7026 case X86::VDIVPSZrrb:
7027 case X86::VDIVPSZrrbk:
7028 case X86::VDIVPSZrrbkz:
7029 case X86::VDIVPSZrm:
7030 case X86::VDIVPSZrmb:
7031 case X86::VDIVPSZrmbk:
7032 case X86::VDIVPSZrmbkz:
7033 case X86::VDIVPSZrmk:
7034 case X86::VDIVPSZrmkz:
7035 case X86::VDIVPSZrr:
7036 case X86::VDIVPSZrrk:
7037 case X86::VDIVPSZrrkz:
7038 case X86::VDIVSDZrm:
7039 case X86::VDIVSDZrr:
7040 case X86::VDIVSDZrm_Int:
7041 case X86::VDIVSDZrm_Intk:
7042 case X86::VDIVSDZrm_Intkz:
7043 case X86::VDIVSDZrr_Int:
7044 case X86::VDIVSDZrr_Intk:
7045 case X86::VDIVSDZrr_Intkz:
7046 case X86::VDIVSDZrrb_Int:
7047 case X86::VDIVSDZrrb_Intk:
7048 case X86::VDIVSDZrrb_Intkz:
7049 case X86::VDIVSSZrm:
7050 case X86::VDIVSSZrr:
7051 case X86::VDIVSSZrm_Int:
7052 case X86::VDIVSSZrm_Intk:
7053 case X86::VDIVSSZrm_Intkz:
7054 case X86::VDIVSSZrr_Int:
7055 case X86::VDIVSSZrr_Intk:
7056 case X86::VDIVSSZrr_Intkz:
7057 case X86::VDIVSSZrrb_Int:
7058 case X86::VDIVSSZrrb_Intk:
7059 case X86::VDIVSSZrrb_Intkz:
7060 case X86::VSQRTPDZ128m:
7061 case X86::VSQRTPDZ128mb:
7062 case X86::VSQRTPDZ128mbk:
7063 case X86::VSQRTPDZ128mbkz:
7064 case X86::VSQRTPDZ128mk:
7065 case X86::VSQRTPDZ128mkz:
7066 case X86::VSQRTPDZ128r:
7067 case X86::VSQRTPDZ128rk:
7068 case X86::VSQRTPDZ128rkz:
7069 case X86::VSQRTPDZ256m:
7070 case X86::VSQRTPDZ256mb:
7071 case X86::VSQRTPDZ256mbk:
7072 case X86::VSQRTPDZ256mbkz:
7073 case X86::VSQRTPDZ256mk:
7074 case X86::VSQRTPDZ256mkz:
7075 case X86::VSQRTPDZ256r:
7076 case X86::VSQRTPDZ256rk:
7077 case X86::VSQRTPDZ256rkz:
7078 case X86::VSQRTPDZm:
7079 case X86::VSQRTPDZmb:
7080 case X86::VSQRTPDZmbk:
7081 case X86::VSQRTPDZmbkz:
7082 case X86::VSQRTPDZmk:
7083 case X86::VSQRTPDZmkz:
7084 case X86::VSQRTPDZr:
7085 case X86::VSQRTPDZrb:
7086 case X86::VSQRTPDZrbk:
7087 case X86::VSQRTPDZrbkz:
7088 case X86::VSQRTPDZrk:
7089 case X86::VSQRTPDZrkz:
7090 case X86::VSQRTPSZ128m:
7091 case X86::VSQRTPSZ128mb:
7092 case X86::VSQRTPSZ128mbk:
7093 case X86::VSQRTPSZ128mbkz:
7094 case X86::VSQRTPSZ128mk:
7095 case X86::VSQRTPSZ128mkz:
7096 case X86::VSQRTPSZ128r:
7097 case X86::VSQRTPSZ128rk:
7098 case X86::VSQRTPSZ128rkz:
7099 case X86::VSQRTPSZ256m:
7100 case X86::VSQRTPSZ256mb:
7101 case X86::VSQRTPSZ256mbk:
7102 case X86::VSQRTPSZ256mbkz:
7103 case X86::VSQRTPSZ256mk:
7104 case X86::VSQRTPSZ256mkz:
7105 case X86::VSQRTPSZ256r:
7106 case X86::VSQRTPSZ256rk:
7107 case X86::VSQRTPSZ256rkz:
7108 case X86::VSQRTPSZm:
7109 case X86::VSQRTPSZmb:
7110 case X86::VSQRTPSZmbk:
7111 case X86::VSQRTPSZmbkz:
7112 case X86::VSQRTPSZmk:
7113 case X86::VSQRTPSZmkz:
7114 case X86::VSQRTPSZr:
7115 case X86::VSQRTPSZrb:
7116 case X86::VSQRTPSZrbk:
7117 case X86::VSQRTPSZrbkz:
7118 case X86::VSQRTPSZrk:
7119 case X86::VSQRTPSZrkz:
7120 case X86::VSQRTSDZm:
7121 case X86::VSQRTSDZm_Int:
7122 case X86::VSQRTSDZm_Intk:
7123 case X86::VSQRTSDZm_Intkz:
7124 case X86::VSQRTSDZr:
7125 case X86::VSQRTSDZr_Int:
7126 case X86::VSQRTSDZr_Intk:
7127 case X86::VSQRTSDZr_Intkz:
7128 case X86::VSQRTSDZrb_Int:
7129 case X86::VSQRTSDZrb_Intk:
7130 case X86::VSQRTSDZrb_Intkz:
7131 case X86::VSQRTSSZm:
7132 case X86::VSQRTSSZm_Int:
7133 case X86::VSQRTSSZm_Intk:
7134 case X86::VSQRTSSZm_Intkz:
7135 case X86::VSQRTSSZr:
7136 case X86::VSQRTSSZr_Int:
7137 case X86::VSQRTSSZr_Intk:
7138 case X86::VSQRTSSZr_Intkz:
7139 case X86::VSQRTSSZrb_Int:
7140 case X86::VSQRTSSZrb_Intk:
7141 case X86::VSQRTSSZrb_Intkz:
7143 case X86::VGATHERDPDYrm:
7144 case X86::VGATHERDPDZ128rm:
7145 case X86::VGATHERDPDZ256rm:
7146 case X86::VGATHERDPDZrm:
7147 case X86::VGATHERDPDrm:
7148 case X86::VGATHERDPSYrm:
7149 case X86::VGATHERDPSZ128rm:
7150 case X86::VGATHERDPSZ256rm:
7151 case X86::VGATHERDPSZrm:
7152 case X86::VGATHERDPSrm:
7153 case X86::VGATHERPF0DPDm:
7154 case X86::VGATHERPF0DPSm:
7155 case X86::VGATHERPF0QPDm:
7156 case X86::VGATHERPF0QPSm:
7157 case X86::VGATHERPF1DPDm:
7158 case X86::VGATHERPF1DPSm:
7159 case X86::VGATHERPF1QPDm:
7160 case X86::VGATHERPF1QPSm:
7161 case X86::VGATHERQPDYrm:
7162 case X86::VGATHERQPDZ128rm:
7163 case X86::VGATHERQPDZ256rm:
7164 case X86::VGATHERQPDZrm:
7165 case X86::VGATHERQPDrm:
7166 case X86::VGATHERQPSYrm:
7167 case X86::VGATHERQPSZ128rm:
7168 case X86::VGATHERQPSZ256rm:
7169 case X86::VGATHERQPSZrm:
7170 case X86::VGATHERQPSrm:
7171 case X86::VPGATHERDDYrm:
7172 case X86::VPGATHERDDZ128rm:
7173 case X86::VPGATHERDDZ256rm:
7174 case X86::VPGATHERDDZrm:
7175 case X86::VPGATHERDDrm:
7176 case X86::VPGATHERDQYrm:
7177 case X86::VPGATHERDQZ128rm:
7178 case X86::VPGATHERDQZ256rm:
7179 case X86::VPGATHERDQZrm:
7180 case X86::VPGATHERDQrm:
7181 case X86::VPGATHERQDYrm:
7182 case X86::VPGATHERQDZ128rm:
7183 case X86::VPGATHERQDZ256rm:
7184 case X86::VPGATHERQDZrm:
7185 case X86::VPGATHERQDrm:
7186 case X86::VPGATHERQQYrm:
7187 case X86::VPGATHERQQZ128rm:
7188 case X86::VPGATHERQQZ256rm:
7189 case X86::VPGATHERQQZrm:
7190 case X86::VPGATHERQQrm:
7191 case X86::VSCATTERDPDZ128mr:
7192 case X86::VSCATTERDPDZ256mr:
7193 case X86::VSCATTERDPDZmr:
7194 case X86::VSCATTERDPSZ128mr:
7195 case X86::VSCATTERDPSZ256mr:
7196 case X86::VSCATTERDPSZmr:
7197 case X86::VSCATTERPF0DPDm:
7198 case X86::VSCATTERPF0DPSm:
7199 case X86::VSCATTERPF0QPDm:
7200 case X86::VSCATTERPF0QPSm:
7201 case X86::VSCATTERPF1DPDm:
7202 case X86::VSCATTERPF1DPSm:
7203 case X86::VSCATTERPF1QPDm:
7204 case X86::VSCATTERPF1QPSm:
7205 case X86::VSCATTERQPDZ128mr:
7206 case X86::VSCATTERQPDZ256mr:
7207 case X86::VSCATTERQPDZmr:
7208 case X86::VSCATTERQPSZ128mr:
7209 case X86::VSCATTERQPSZ256mr:
7210 case X86::VSCATTERQPSZmr:
7211 case X86::VPSCATTERDDZ128mr:
7212 case X86::VPSCATTERDDZ256mr:
7213 case X86::VPSCATTERDDZmr:
7214 case X86::VPSCATTERDQZ128mr:
7215 case X86::VPSCATTERDQZ256mr:
7216 case X86::VPSCATTERDQZmr:
7217 case X86::VPSCATTERQDZ128mr:
7218 case X86::VPSCATTERQDZ256mr:
7219 case X86::VPSCATTERQDZmr:
7220 case X86::VPSCATTERQQZ128mr:
7221 case X86::VPSCATTERQQZ256mr:
7222 case X86::VPSCATTERQQZmr:
7227 bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
7228 const MachineRegisterInfo *MRI,
7229 const MachineInstr &DefMI,
7231 const MachineInstr &UseMI,
7232 unsigned UseIdx) const {
7233 return isHighLatencyDef(DefMI.getOpcode());
7236 bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
7237 const MachineBasicBlock *MBB) const {
7238 assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
7239 Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
7241 // Integer binary math/logic instructions have a third source operand:
7242 // the EFLAGS register. That operand must be both defined here and never
7243 // used; ie, it must be dead. If the EFLAGS operand is live, then we can
7244 // not change anything because rearranging the operands could affect other
7245 // instructions that depend on the exact status flags (zero, sign, etc.)
7246 // that are set by using these particular operands with this operation.
7247 const MachineOperand *FlagDef = Inst.findRegisterDefOperand(X86::EFLAGS);
7248 assert((Inst.getNumDefs() == 1 || FlagDef) &&
7249 "Implicit def isn't flags?");
7250 if (FlagDef && !FlagDef->isDead())
7253 return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
7256 // TODO: There are many more machine instruction opcodes to match:
7257 // 1. Other data types (integer, vectors)
7258 // 2. Other math / logic operations (xor, or)
7259 // 3. Other forms of the same operation (intrinsics and other variants)
7260 bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
7261 switch (Inst.getOpcode()) {
7306 case X86::VPANDDZ128rr:
7307 case X86::VPANDDZ256rr:
7308 case X86::VPANDDZrr:
7309 case X86::VPANDQZ128rr:
7310 case X86::VPANDQZ256rr:
7311 case X86::VPANDQZrr:
7314 case X86::VPORDZ128rr:
7315 case X86::VPORDZ256rr:
7317 case X86::VPORQZ128rr:
7318 case X86::VPORQZ256rr:
7322 case X86::VPXORDZ128rr:
7323 case X86::VPXORDZ256rr:
7324 case X86::VPXORDZrr:
7325 case X86::VPXORQZ128rr:
7326 case X86::VPXORQZ256rr:
7327 case X86::VPXORQZrr:
7330 case X86::VANDPDYrr:
7331 case X86::VANDPSYrr:
7332 case X86::VANDPDZ128rr:
7333 case X86::VANDPSZ128rr:
7334 case X86::VANDPDZ256rr:
7335 case X86::VANDPSZ256rr:
7336 case X86::VANDPDZrr:
7337 case X86::VANDPSZrr:
7342 case X86::VORPDZ128rr:
7343 case X86::VORPSZ128rr:
7344 case X86::VORPDZ256rr:
7345 case X86::VORPSZ256rr:
7350 case X86::VXORPDYrr:
7351 case X86::VXORPSYrr:
7352 case X86::VXORPDZ128rr:
7353 case X86::VXORPSZ128rr:
7354 case X86::VXORPDZ256rr:
7355 case X86::VXORPSZ256rr:
7356 case X86::VXORPDZrr:
7357 case X86::VXORPSZrr:
7378 case X86::VPADDBYrr:
7379 case X86::VPADDWYrr:
7380 case X86::VPADDDYrr:
7381 case X86::VPADDQYrr:
7382 case X86::VPADDBZ128rr:
7383 case X86::VPADDWZ128rr:
7384 case X86::VPADDDZ128rr:
7385 case X86::VPADDQZ128rr:
7386 case X86::VPADDBZ256rr:
7387 case X86::VPADDWZ256rr:
7388 case X86::VPADDDZ256rr:
7389 case X86::VPADDQZ256rr:
7390 case X86::VPADDBZrr:
7391 case X86::VPADDWZrr:
7392 case X86::VPADDDZrr:
7393 case X86::VPADDQZrr:
7394 case X86::VPMULLWrr:
7395 case X86::VPMULLWYrr:
7396 case X86::VPMULLWZ128rr:
7397 case X86::VPMULLWZ256rr:
7398 case X86::VPMULLWZrr:
7399 case X86::VPMULLDrr:
7400 case X86::VPMULLDYrr:
7401 case X86::VPMULLDZ128rr:
7402 case X86::VPMULLDZ256rr:
7403 case X86::VPMULLDZrr:
7404 case X86::VPMULLQZ128rr:
7405 case X86::VPMULLQZ256rr:
7406 case X86::VPMULLQZrr:
7407 case X86::VPMAXSBrr:
7408 case X86::VPMAXSBYrr:
7409 case X86::VPMAXSBZ128rr:
7410 case X86::VPMAXSBZ256rr:
7411 case X86::VPMAXSBZrr:
7412 case X86::VPMAXSDrr:
7413 case X86::VPMAXSDYrr:
7414 case X86::VPMAXSDZ128rr:
7415 case X86::VPMAXSDZ256rr:
7416 case X86::VPMAXSDZrr:
7417 case X86::VPMAXSQZ128rr:
7418 case X86::VPMAXSQZ256rr:
7419 case X86::VPMAXSQZrr:
7420 case X86::VPMAXSWrr:
7421 case X86::VPMAXSWYrr:
7422 case X86::VPMAXSWZ128rr:
7423 case X86::VPMAXSWZ256rr:
7424 case X86::VPMAXSWZrr:
7425 case X86::VPMAXUBrr:
7426 case X86::VPMAXUBYrr:
7427 case X86::VPMAXUBZ128rr:
7428 case X86::VPMAXUBZ256rr:
7429 case X86::VPMAXUBZrr:
7430 case X86::VPMAXUDrr:
7431 case X86::VPMAXUDYrr:
7432 case X86::VPMAXUDZ128rr:
7433 case X86::VPMAXUDZ256rr:
7434 case X86::VPMAXUDZrr:
7435 case X86::VPMAXUQZ128rr:
7436 case X86::VPMAXUQZ256rr:
7437 case X86::VPMAXUQZrr:
7438 case X86::VPMAXUWrr:
7439 case X86::VPMAXUWYrr:
7440 case X86::VPMAXUWZ128rr:
7441 case X86::VPMAXUWZ256rr:
7442 case X86::VPMAXUWZrr:
7443 case X86::VPMINSBrr:
7444 case X86::VPMINSBYrr:
7445 case X86::VPMINSBZ128rr:
7446 case X86::VPMINSBZ256rr:
7447 case X86::VPMINSBZrr:
7448 case X86::VPMINSDrr:
7449 case X86::VPMINSDYrr:
7450 case X86::VPMINSDZ128rr:
7451 case X86::VPMINSDZ256rr:
7452 case X86::VPMINSDZrr:
7453 case X86::VPMINSQZ128rr:
7454 case X86::VPMINSQZ256rr:
7455 case X86::VPMINSQZrr:
7456 case X86::VPMINSWrr:
7457 case X86::VPMINSWYrr:
7458 case X86::VPMINSWZ128rr:
7459 case X86::VPMINSWZ256rr:
7460 case X86::VPMINSWZrr:
7461 case X86::VPMINUBrr:
7462 case X86::VPMINUBYrr:
7463 case X86::VPMINUBZ128rr:
7464 case X86::VPMINUBZ256rr:
7465 case X86::VPMINUBZrr:
7466 case X86::VPMINUDrr:
7467 case X86::VPMINUDYrr:
7468 case X86::VPMINUDZ128rr:
7469 case X86::VPMINUDZ256rr:
7470 case X86::VPMINUDZrr:
7471 case X86::VPMINUQZ128rr:
7472 case X86::VPMINUQZ256rr:
7473 case X86::VPMINUQZrr:
7474 case X86::VPMINUWrr:
7475 case X86::VPMINUWYrr:
7476 case X86::VPMINUWZ128rr:
7477 case X86::VPMINUWZ256rr:
7478 case X86::VPMINUWZrr:
7479 // Normal min/max instructions are not commutative because of NaN and signed
7480 // zero semantics, but these are. Thus, there's no need to check for global
7481 // relaxed math; the instructions themselves have the properties we need.
7490 case X86::VMAXCPDrr:
7491 case X86::VMAXCPSrr:
7492 case X86::VMAXCPDYrr:
7493 case X86::VMAXCPSYrr:
7494 case X86::VMAXCPDZ128rr:
7495 case X86::VMAXCPSZ128rr:
7496 case X86::VMAXCPDZ256rr:
7497 case X86::VMAXCPSZ256rr:
7498 case X86::VMAXCPDZrr:
7499 case X86::VMAXCPSZrr:
7500 case X86::VMAXCSDrr:
7501 case X86::VMAXCSSrr:
7502 case X86::VMAXCSDZrr:
7503 case X86::VMAXCSSZrr:
7504 case X86::VMINCPDrr:
7505 case X86::VMINCPSrr:
7506 case X86::VMINCPDYrr:
7507 case X86::VMINCPSYrr:
7508 case X86::VMINCPDZ128rr:
7509 case X86::VMINCPSZ128rr:
7510 case X86::VMINCPDZ256rr:
7511 case X86::VMINCPSZ256rr:
7512 case X86::VMINCPDZrr:
7513 case X86::VMINCPSZrr:
7514 case X86::VMINCSDrr:
7515 case X86::VMINCSSrr:
7516 case X86::VMINCSDZrr:
7517 case X86::VMINCSSZrr:
7529 case X86::VADDPDYrr:
7530 case X86::VADDPSYrr:
7531 case X86::VADDPDZ128rr:
7532 case X86::VADDPSZ128rr:
7533 case X86::VADDPDZ256rr:
7534 case X86::VADDPSZ256rr:
7535 case X86::VADDPDZrr:
7536 case X86::VADDPSZrr:
7539 case X86::VADDSDZrr:
7540 case X86::VADDSSZrr:
7543 case X86::VMULPDYrr:
7544 case X86::VMULPSYrr:
7545 case X86::VMULPDZ128rr:
7546 case X86::VMULPSZ128rr:
7547 case X86::VMULPDZ256rr:
7548 case X86::VMULPSZ256rr:
7549 case X86::VMULPDZrr:
7550 case X86::VMULPSZrr:
7553 case X86::VMULSDZrr:
7554 case X86::VMULSSZrr:
7555 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
7561 /// If \p DescribedReg overlaps with the MOVrr instruction's destination
7562 /// register then, if possible, describe the value in terms of the source
7564 static Optional<ParamLoadedValue>
7565 describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
7566 const TargetRegisterInfo *TRI) {
7567 Register DestReg = MI.getOperand(0).getReg();
7568 Register SrcReg = MI.getOperand(1).getReg();
7570 auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
7572 // If the described register is the destination, just return the source.
7573 if (DestReg == DescribedReg)
7574 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
7576 // If the described register is a sub-register of the destination register,
7577 // then pick out the source register's corresponding sub-register.
7578 if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
7579 unsigned SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
7580 return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
7583 // The remaining case to consider is when the described register is a
7584 // super-register of the destination register. MOV8rr and MOV16rr does not
7585 // write to any of the other bytes in the register, meaning that we'd have to
7586 // describe the value using a combination of the source register and the
7587 // non-overlapping bits in the described register, which is not currently
7589 if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
7590 !TRI->isSuperRegister(DestReg, DescribedReg))
7593 assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
7594 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
7597 Optional<ParamLoadedValue>
7598 X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {
7599 const MachineOperand *Op = nullptr;
7600 DIExpression *Expr = nullptr;
7602 const TargetRegisterInfo *TRI = &getRegisterInfo();
7604 switch (MI.getOpcode()) {
7607 case X86::LEA64_32r: {
7608 // We may need to describe a 64-bit parameter with a 32-bit LEA.
7609 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
7612 // Operand 4 could be global address. For now we do not support
7614 if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
7617 const MachineOperand &Op1 = MI.getOperand(1);
7618 const MachineOperand &Op2 = MI.getOperand(3);
7619 assert(Op2.isReg() && (Op2.getReg() == X86::NoRegister ||
7620 Register::isPhysicalRegister(Op2.getReg())));
7622 // Omit situations like:
7623 // %rsi = lea %rsi, 4, ...
7624 if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
7625 Op2.getReg() == MI.getOperand(0).getReg())
7627 else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
7628 TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
7629 (Op2.getReg() != X86::NoRegister &&
7630 TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
7633 int64_t Coef = MI.getOperand(2).getImm();
7634 int64_t Offset = MI.getOperand(4).getImm();
7635 SmallVector<uint64_t, 8> Ops;
7637 if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
7639 } else if (Op1.isFI())
7642 if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
7643 Ops.push_back(dwarf::DW_OP_constu);
7644 Ops.push_back(Coef + 1);
7645 Ops.push_back(dwarf::DW_OP_mul);
7647 if (Op && Op2.getReg() != X86::NoRegister) {
7648 int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
7651 else if (dwarfReg < 32) {
7652 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
7655 Ops.push_back(dwarf::DW_OP_bregx);
7656 Ops.push_back(dwarfReg);
7660 assert(Op2.getReg() != X86::NoRegister);
7665 assert(Op2.getReg() != X86::NoRegister);
7666 Ops.push_back(dwarf::DW_OP_constu);
7667 Ops.push_back(Coef);
7668 Ops.push_back(dwarf::DW_OP_mul);
7671 if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
7672 Op2.getReg() != X86::NoRegister) {
7673 Ops.push_back(dwarf::DW_OP_plus);
7677 DIExpression::appendOffset(Ops, Offset);
7678 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
7680 return ParamLoadedValue(*Op, Expr);;
7684 case X86::MOV64ri32:
7685 // MOV32ri may be used for producing zero-extended 32-bit immediates in
7686 // 64-bit parameters, so we need to consider super-registers.
7687 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
7689 return ParamLoadedValue(MI.getOperand(1), Expr);
7694 return describeMOVrrLoadedValue(MI, Reg, TRI);
7695 case X86::XOR32rr: {
7696 // 64-bit parameters are zero-materialized using XOR32rr, so also consider
7698 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
7700 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
7701 return ParamLoadedValue(MachineOperand::CreateImm(0), Expr);
7704 case X86::MOVSX64rr32: {
7705 // We may need to describe the lower 32 bits of the MOVSX; for example, in
7709 // $rdi = MOVSX64rr32 $ebx
7710 // $esi = MOV32rr $edi
7711 if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
7714 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
7716 // If the described register is the destination register we need to
7717 // sign-extend the source register from 32 bits. The other case we handle
7718 // is when the described register is the 32-bit sub-register of the
7719 // destination register, in case we just need to return the source
7721 if (Reg == MI.getOperand(0).getReg())
7722 Expr = DIExpression::appendExt(Expr, 32, 64, true);
7724 assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
7725 "Unhandled sub-register case for MOVSX64rr32");
7727 return ParamLoadedValue(MI.getOperand(1), Expr);
7730 assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
7731 return TargetInstrInfo::describeLoadedValue(MI, Reg);
7735 /// This is an architecture-specific helper function of reassociateOps.
7736 /// Set special operand attributes for new instructions after reassociation.
7737 void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
7738 MachineInstr &OldMI2,
7739 MachineInstr &NewMI1,
7740 MachineInstr &NewMI2) const {
7741 // Integer instructions may define an implicit EFLAGS dest register operand.
7742 MachineOperand *OldFlagDef1 = OldMI1.findRegisterDefOperand(X86::EFLAGS);
7743 MachineOperand *OldFlagDef2 = OldMI2.findRegisterDefOperand(X86::EFLAGS);
7745 assert(!OldFlagDef1 == !OldFlagDef2 &&
7746 "Unexpected instruction type for reassociation");
7748 if (!OldFlagDef1 || !OldFlagDef2)
7751 assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
7752 "Must have dead EFLAGS operand in reassociable instruction");
7754 MachineOperand *NewFlagDef1 = NewMI1.findRegisterDefOperand(X86::EFLAGS);
7755 MachineOperand *NewFlagDef2 = NewMI2.findRegisterDefOperand(X86::EFLAGS);
7757 assert(NewFlagDef1 && NewFlagDef2 &&
7758 "Unexpected operand in reassociable instruction");
7760 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
7761 // of this pass or other passes. The EFLAGS operands must be dead in these new
7762 // instructions because the EFLAGS operands in the original instructions must
7763 // be dead in order for reassociation to occur.
7764 NewFlagDef1->setIsDead();
7765 NewFlagDef2->setIsDead();
7768 std::pair<unsigned, unsigned>
7769 X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
7770 return std::make_pair(TF, 0u);
7773 ArrayRef<std::pair<unsigned, const char *>>
7774 X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
7775 using namespace X86II;
7776 static const std::pair<unsigned, const char *> TargetFlags[] = {
7777 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
7778 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
7779 {MO_GOT, "x86-got"},
7780 {MO_GOTOFF, "x86-gotoff"},
7781 {MO_GOTPCREL, "x86-gotpcrel"},
7782 {MO_PLT, "x86-plt"},
7783 {MO_TLSGD, "x86-tlsgd"},
7784 {MO_TLSLD, "x86-tlsld"},
7785 {MO_TLSLDM, "x86-tlsldm"},
7786 {MO_GOTTPOFF, "x86-gottpoff"},
7787 {MO_INDNTPOFF, "x86-indntpoff"},
7788 {MO_TPOFF, "x86-tpoff"},
7789 {MO_DTPOFF, "x86-dtpoff"},
7790 {MO_NTPOFF, "x86-ntpoff"},
7791 {MO_GOTNTPOFF, "x86-gotntpoff"},
7792 {MO_DLLIMPORT, "x86-dllimport"},
7793 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
7794 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
7795 {MO_TLVP, "x86-tlvp"},
7796 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
7797 {MO_SECREL, "x86-secrel"},
7798 {MO_COFFSTUB, "x86-coffstub"}};
7799 return makeArrayRef(TargetFlags);
7803 /// Create Global Base Reg pass. This initializes the PIC
7804 /// global base register for x86-32.
7805 struct CGBR : public MachineFunctionPass {
7807 CGBR() : MachineFunctionPass(ID) {}
7809 bool runOnMachineFunction(MachineFunction &MF) override {
7810 const X86TargetMachine *TM =
7811 static_cast<const X86TargetMachine *>(&MF.getTarget());
7812 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
7814 // Don't do anything in the 64-bit small and kernel code models. They use
7815 // RIP-relative addressing for everything.
7816 if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
7817 TM->getCodeModel() == CodeModel::Kernel))
7820 // Only emit a global base reg in PIC mode.
7821 if (!TM->isPositionIndependent())
7824 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
7825 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
7827 // If we didn't need a GlobalBaseReg, don't insert code.
7828 if (GlobalBaseReg == 0)
7831 // Insert the set of GlobalBaseReg into the first MBB of the function
7832 MachineBasicBlock &FirstMBB = MF.front();
7833 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
7834 DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
7835 MachineRegisterInfo &RegInfo = MF.getRegInfo();
7836 const X86InstrInfo *TII = STI.getInstrInfo();
7839 if (STI.isPICStyleGOT())
7840 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
7844 if (STI.is64Bit()) {
7845 if (TM->getCodeModel() == CodeModel::Medium) {
7846 // In the medium code model, use a RIP-relative LEA to materialize the
7848 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
7852 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
7854 } else if (TM->getCodeModel() == CodeModel::Large) {
7855 // In the large code model, we are aiming for this code, though the
7856 // register allocation may vary:
7857 // leaq .LN$pb(%rip), %rax
7858 // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
7860 // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
7861 Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
7862 Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
7863 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
7867 .addSym(MF.getPICBaseSymbol())
7869 std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
7870 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
7871 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
7872 X86II::MO_PIC_BASE_OFFSET);
7873 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
7874 .addReg(PBReg, RegState::Kill)
7875 .addReg(GOTReg, RegState::Kill);
7877 llvm_unreachable("unexpected code model");
7880 // Operand of MovePCtoStack is completely ignored by asm printer. It's
7881 // only used in JIT code emission as displacement to pc.
7882 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
7884 // If we're using vanilla 'GOT' PIC style, we should use relative
7885 // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
7886 if (STI.isPICStyleGOT()) {
7887 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
7889 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
7891 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
7892 X86II::MO_GOT_ABSOLUTE_ADDRESS);
7899 StringRef getPassName() const override {
7900 return "X86 PIC Global Base Reg Initialization";
7903 void getAnalysisUsage(AnalysisUsage &AU) const override {
7904 AU.setPreservesCFG();
7905 MachineFunctionPass::getAnalysisUsage(AU);
7912 llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
7915 struct LDTLSCleanup : public MachineFunctionPass {
7917 LDTLSCleanup() : MachineFunctionPass(ID) {}
7919 bool runOnMachineFunction(MachineFunction &MF) override {
7920 if (skipFunction(MF.getFunction()))
7923 X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
7924 if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
7925 // No point folding accesses if there isn't at least two.
7929 MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
7930 return VisitNode(DT->getRootNode(), 0);
7933 // Visit the dominator subtree rooted at Node in pre-order.
7934 // If TLSBaseAddrReg is non-null, then use that to replace any
7935 // TLS_base_addr instructions. Otherwise, create the register
7936 // when the first such instruction is seen, and then use it
7937 // as we encounter more instructions.
7938 bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
7939 MachineBasicBlock *BB = Node->getBlock();
7940 bool Changed = false;
7942 // Traverse the current block.
7943 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
7945 switch (I->getOpcode()) {
7946 case X86::TLS_base_addr32:
7947 case X86::TLS_base_addr64:
7949 I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
7951 I = SetRegister(*I, &TLSBaseAddrReg);
7959 // Visit the children of this block in the dominator tree.
7960 for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
7962 Changed |= VisitNode(*I, TLSBaseAddrReg);
7968 // Replace the TLS_base_addr instruction I with a copy from
7969 // TLSBaseAddrReg, returning the new instruction.
7970 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
7971 unsigned TLSBaseAddrReg) {
7972 MachineFunction *MF = I.getParent()->getParent();
7973 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
7974 const bool is64Bit = STI.is64Bit();
7975 const X86InstrInfo *TII = STI.getInstrInfo();
7977 // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
7978 MachineInstr *Copy =
7979 BuildMI(*I.getParent(), I, I.getDebugLoc(),
7980 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
7981 .addReg(TLSBaseAddrReg);
7983 // Erase the TLS_base_addr instruction.
7984 I.eraseFromParent();
7989 // Create a virtual register in *TLSBaseAddrReg, and populate it by
7990 // inserting a copy instruction after I. Returns the new instruction.
7991 MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
7992 MachineFunction *MF = I.getParent()->getParent();
7993 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
7994 const bool is64Bit = STI.is64Bit();
7995 const X86InstrInfo *TII = STI.getInstrInfo();
7997 // Create a virtual register for the TLS base address.
7998 MachineRegisterInfo &RegInfo = MF->getRegInfo();
7999 *TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
8000 ? &X86::GR64RegClass
8001 : &X86::GR32RegClass);
8003 // Insert a copy from RAX/EAX to TLSBaseAddrReg.
8004 MachineInstr *Next = I.getNextNode();
8005 MachineInstr *Copy =
8006 BuildMI(*I.getParent(), Next, I.getDebugLoc(),
8007 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
8008 .addReg(is64Bit ? X86::RAX : X86::EAX);
8013 StringRef getPassName() const override {
8014 return "Local Dynamic TLS Access Clean-up";
8017 void getAnalysisUsage(AnalysisUsage &AU) const override {
8018 AU.setPreservesCFG();
8019 AU.addRequired<MachineDominatorTree>();
8020 MachineFunctionPass::getAnalysisUsage(AU);
8025 char LDTLSCleanup::ID = 0;
8027 llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
8029 /// Constants defining how certain sequences should be outlined.
8031 /// \p MachineOutlinerDefault implies that the function is called with a call
8032 /// instruction, and a return must be emitted for the outlined function frame.
8036 /// I1 OUTLINED_FUNCTION:
8037 /// I2 --> call OUTLINED_FUNCTION I1
8042 /// * Call construction overhead: 1 (call instruction)
8043 /// * Frame construction overhead: 1 (return instruction)
8045 /// \p MachineOutlinerTailCall implies that the function is being tail called.
8046 /// A jump is emitted instead of a call, and the return is already present in
8047 /// the outlined sequence. That is,
8049 /// I1 OUTLINED_FUNCTION:
8050 /// I2 --> jmp OUTLINED_FUNCTION I1
8054 /// * Call construction overhead: 1 (jump instruction)
8055 /// * Frame construction overhead: 0 (don't need to return)
8057 enum MachineOutlinerClass {
8058 MachineOutlinerDefault,
8059 MachineOutlinerTailCall
8062 outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
8063 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
8064 unsigned SequenceSize =
8065 std::accumulate(RepeatedSequenceLocs[0].front(),
8066 std::next(RepeatedSequenceLocs[0].back()), 0,
8067 [](unsigned Sum, const MachineInstr &MI) {
8068 // FIXME: x86 doesn't implement getInstSizeInBytes, so
8069 // we can't tell the cost. Just assume each instruction
8071 if (MI.isDebugInstr() || MI.isKill())
8076 // FIXME: Use real size in bytes for call and ret instructions.
8077 if (RepeatedSequenceLocs[0].back()->isTerminator()) {
8078 for (outliner::Candidate &C : RepeatedSequenceLocs)
8079 C.setCallInfo(MachineOutlinerTailCall, 1);
8081 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
8082 0, // Number of bytes to emit frame.
8083 MachineOutlinerTailCall // Type of frame.
8087 for (outliner::Candidate &C : RepeatedSequenceLocs)
8088 C.setCallInfo(MachineOutlinerDefault, 1);
8090 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
8091 MachineOutlinerDefault);
8094 bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
8095 bool OutlineFromLinkOnceODRs) const {
8096 const Function &F = MF.getFunction();
8098 // Does the function use a red zone? If it does, then we can't risk messing
8100 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
8101 // It could have a red zone. If it does, then we don't want to touch it.
8102 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
8103 if (!X86FI || X86FI->getUsesRedZone())
8107 // If we *don't* want to outline from things that could potentially be deduped
8108 // then return false.
8109 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
8112 // This function is viable for outlining, so return true.
8117 X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
8118 MachineInstr &MI = *MIT;
8119 // Don't allow debug values to impact outlining type.
8120 if (MI.isDebugInstr() || MI.isIndirectDebugValue())
8121 return outliner::InstrType::Invisible;
8123 // At this point, KILL instructions don't really tell us much so we can go
8124 // ahead and skip over them.
8126 return outliner::InstrType::Invisible;
8128 // Is this a tail call? If yes, we can outline as a tail call.
8130 return outliner::InstrType::Legal;
8132 // Is this the terminator of a basic block?
8133 if (MI.isTerminator() || MI.isReturn()) {
8135 // Does its parent have any successors in its MachineFunction?
8136 if (MI.getParent()->succ_empty())
8137 return outliner::InstrType::Legal;
8139 // It does, so we can't tail call it.
8140 return outliner::InstrType::Illegal;
8143 // Don't outline anything that modifies or reads from the stack pointer.
8145 // FIXME: There are instructions which are being manually built without
8146 // explicit uses/defs so we also have to check the MCInstrDesc. We should be
8147 // able to remove the extra checks once those are fixed up. For example,
8148 // sometimes we might get something like %rax = POP64r 1. This won't be
8149 // caught by modifiesRegister or readsRegister even though the instruction
8150 // really ought to be formed so that modifiesRegister/readsRegister would
8152 if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
8153 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
8154 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
8155 return outliner::InstrType::Illegal;
8157 // Outlined calls change the instruction pointer, so don't read from it.
8158 if (MI.readsRegister(X86::RIP, &RI) ||
8159 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
8160 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
8161 return outliner::InstrType::Illegal;
8163 // Positions can't safely be outlined.
8164 if (MI.isPosition())
8165 return outliner::InstrType::Illegal;
8167 // Make sure none of the operands of this instruction do anything tricky.
8168 for (const MachineOperand &MOP : MI.operands())
8169 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
8170 MOP.isTargetIndex())
8171 return outliner::InstrType::Illegal;
8173 return outliner::InstrType::Legal;
8176 void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
8177 MachineFunction &MF,
8178 const outliner::OutlinedFunction &OF)
8180 // If we're a tail call, we already have a return, so don't do anything.
8181 if (OF.FrameConstructionID == MachineOutlinerTailCall)
8184 // We're a normal call, so our sequence doesn't have a return instruction.
8186 MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ));
8187 MBB.insert(MBB.end(), retq);
8190 MachineBasicBlock::iterator
8191 X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
8192 MachineBasicBlock::iterator &It,
8193 MachineFunction &MF,
8194 const outliner::Candidate &C) const {
8195 // Is it a tail call?
8196 if (C.CallConstructionID == MachineOutlinerTailCall) {
8197 // Yes, just insert a JMP.
8199 BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
8200 .addGlobalAddress(M.getNamedValue(MF.getName())));
8202 // No, insert a call.
8204 BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
8205 .addGlobalAddress(M.getNamedValue(MF.getName())));
8211 #define GET_INSTRINFO_HELPERS
8212 #include "X86GenInstrInfo.inc"