1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
17 #include "X86FrameLowering.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86Subtarget.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/TargetFrameLowering.h"
27 #include "llvm/CodeGen/TargetInstrInfo.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
38 #define GET_REGINFO_TARGET_DESC
39 #include "X86GenRegisterInfo.inc"
42 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
43 cl::desc("Enable use of a base pointer for complex stack frames"));
45 X86RegisterInfo::X86RegisterInfo(const Triple &TT)
46 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
47 X86_MC::getDwarfRegFlavour(TT, false),
48 X86_MC::getDwarfRegFlavour(TT, true),
49 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
50 X86_MC::initLLVMToSEHAndCVRegMapping(this);
52 // Cache some information.
53 Is64Bit = TT.isArch64Bit();
54 IsWin64 = Is64Bit && TT.isOSWindows();
56 // Use a callee-saved register as the base pointer. These registers must
57 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
58 // requires GOT in the EBX register before function calls via PLT GOT pointer.
61 // This matches the simplified 32-bit pointer code in the data layout
63 // FIXME: Should use the data layout?
64 bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
65 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
66 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
67 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
77 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
78 // ExecutionDomainFix, BreakFalseDeps and PostRAScheduler require liveness.
83 X86RegisterInfo::getSEHRegNum(unsigned i) const {
84 return getEncodingValue(i);
87 const TargetRegisterClass *
88 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
90 // The sub_8bit sub-register index is more constrained in 32-bit mode.
91 // It behaves just like the sub_8bit_hi index.
92 if (!Is64Bit && Idx == X86::sub_8bit)
93 Idx = X86::sub_8bit_hi;
95 // Forward to TableGen's default version.
96 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
99 const TargetRegisterClass *
100 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
101 const TargetRegisterClass *B,
102 unsigned SubIdx) const {
103 // The sub_8bit sub-register index is more constrained in 32-bit mode.
104 if (!Is64Bit && SubIdx == X86::sub_8bit) {
105 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
109 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
112 const TargetRegisterClass *
113 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
114 const MachineFunction &MF) const {
115 // Don't allow super-classes of GR8_NOREX. This class is only used after
116 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
117 // to the full GR8 register class in 64-bit mode, so we cannot allow the
118 // reigster class inflation.
120 // The GR8_NOREX class is always used in a way that won't be constrained to a
121 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
123 if (RC == &X86::GR8_NOREXRegClass)
126 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
128 const TargetRegisterClass *Super = RC;
129 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
131 switch (Super->getID()) {
132 case X86::FR32RegClassID:
133 case X86::FR64RegClassID:
134 // If AVX-512 isn't supported we should only inflate to these classes.
135 if (!Subtarget.hasAVX512() &&
136 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
139 case X86::VR128RegClassID:
140 case X86::VR256RegClassID:
141 // If VLX isn't supported we should only inflate to these classes.
142 if (!Subtarget.hasVLX() &&
143 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
146 case X86::VR128XRegClassID:
147 case X86::VR256XRegClassID:
148 // If VLX isn't support we shouldn't inflate to these classes.
149 if (Subtarget.hasVLX() &&
150 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
153 case X86::FR32XRegClassID:
154 case X86::FR64XRegClassID:
155 // If AVX-512 isn't support we shouldn't inflate to these classes.
156 if (Subtarget.hasAVX512() &&
157 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
160 case X86::GR8RegClassID:
161 case X86::GR16RegClassID:
162 case X86::GR32RegClassID:
163 case X86::GR64RegClassID:
164 case X86::RFP32RegClassID:
165 case X86::RFP64RegClassID:
166 case X86::RFP80RegClassID:
167 case X86::VR512RegClassID:
168 // Don't return a super-class that would shrink the spill size.
169 // That can happen with the vector and float classes.
170 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
178 const TargetRegisterClass *
179 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
180 unsigned Kind) const {
181 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
183 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
184 case 0: // Normal GPRs.
185 if (Subtarget.isTarget64BitLP64())
186 return &X86::GR64RegClass;
187 // If the target is 64bit but we have been told to use 32bit addresses,
188 // we can still use 64-bit register as long as we know the high bits
190 // Reflect that in the returned register class.
192 // When the target also allows 64-bit frame pointer and we do have a
193 // frame, this is fine to use it for the address accesses as well.
194 const X86FrameLowering *TFI = getFrameLowering(MF);
195 return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
196 ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
197 : &X86::LOW32_ADDR_ACCESSRegClass;
199 return &X86::GR32RegClass;
200 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
201 if (Subtarget.isTarget64BitLP64())
202 return &X86::GR64_NOSPRegClass;
203 // NOSP does not contain RIP, so no special case here.
204 return &X86::GR32_NOSPRegClass;
205 case 2: // NOREX GPRs.
206 if (Subtarget.isTarget64BitLP64())
207 return &X86::GR64_NOREXRegClass;
208 return &X86::GR32_NOREXRegClass;
209 case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
210 if (Subtarget.isTarget64BitLP64())
211 return &X86::GR64_NOREX_NOSPRegClass;
212 // NOSP does not contain RIP, so no special case here.
213 return &X86::GR32_NOREX_NOSPRegClass;
214 case 4: // Available for tailcall (not callee-saved GPRs).
215 return getGPRsForTailCall(MF);
219 const TargetRegisterClass *
220 X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
221 const Function &F = MF.getFunction();
222 if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
223 return &X86::GR64_TCW64RegClass;
225 return &X86::GR64_TCRegClass;
227 bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
229 return &X86::GR32RegClass;
230 return &X86::GR32_TCRegClass;
233 const TargetRegisterClass *
234 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
235 if (RC == &X86::CCRRegClass) {
237 return &X86::GR64RegClass;
239 return &X86::GR32RegClass;
245 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
246 MachineFunction &MF) const {
247 const X86FrameLowering *TFI = getFrameLowering(MF);
249 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
250 switch (RC->getID()) {
253 case X86::GR32RegClassID:
255 case X86::GR64RegClassID:
257 case X86::VR128RegClassID:
258 return Is64Bit ? 10 : 4;
259 case X86::VR64RegClassID:
265 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
266 assert(MF && "MachineFunction required");
268 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
269 const Function &F = MF->getFunction();
270 bool HasSSE = Subtarget.hasSSE1();
271 bool HasAVX = Subtarget.hasAVX();
272 bool HasAVX512 = Subtarget.hasAVX512();
273 bool CallsEHReturn = MF->callsEHReturn();
275 CallingConv::ID CC = F.getCallingConv();
277 // If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
278 // convention because it has the CSR list.
279 if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
280 CC = CallingConv::X86_INTR;
283 case CallingConv::GHC:
284 case CallingConv::HiPE:
285 return CSR_NoRegs_SaveList;
286 case CallingConv::AnyReg:
288 return CSR_64_AllRegs_AVX_SaveList;
289 return CSR_64_AllRegs_SaveList;
290 case CallingConv::PreserveMost:
291 return CSR_64_RT_MostRegs_SaveList;
292 case CallingConv::PreserveAll:
294 return CSR_64_RT_AllRegs_AVX_SaveList;
295 return CSR_64_RT_AllRegs_SaveList;
296 case CallingConv::CXX_FAST_TLS:
298 return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
299 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
301 case CallingConv::Intel_OCL_BI: {
302 if (HasAVX512 && IsWin64)
303 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
304 if (HasAVX512 && Is64Bit)
305 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
306 if (HasAVX && IsWin64)
307 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
308 if (HasAVX && Is64Bit)
309 return CSR_64_Intel_OCL_BI_AVX_SaveList;
310 if (!HasAVX && !IsWin64 && Is64Bit)
311 return CSR_64_Intel_OCL_BI_SaveList;
314 case CallingConv::HHVM:
315 return CSR_64_HHVM_SaveList;
316 case CallingConv::X86_RegCall:
319 return (HasSSE ? CSR_Win64_RegCall_SaveList :
320 CSR_Win64_RegCall_NoSSE_SaveList);
322 return (HasSSE ? CSR_SysV64_RegCall_SaveList :
323 CSR_SysV64_RegCall_NoSSE_SaveList);
326 return (HasSSE ? CSR_32_RegCall_SaveList :
327 CSR_32_RegCall_NoSSE_SaveList);
329 case CallingConv::Cold:
331 return CSR_64_MostRegs_SaveList;
333 case CallingConv::Win64:
335 return CSR_Win64_NoSSE_SaveList;
336 return CSR_Win64_SaveList;
337 case CallingConv::X86_64_SysV:
339 return CSR_64EHRet_SaveList;
340 return CSR_64_SaveList;
341 case CallingConv::X86_INTR:
344 return CSR_64_AllRegs_AVX512_SaveList;
346 return CSR_64_AllRegs_AVX_SaveList;
348 return CSR_64_AllRegs_SaveList;
349 return CSR_64_AllRegs_NoSSE_SaveList;
352 return CSR_32_AllRegs_AVX512_SaveList;
354 return CSR_32_AllRegs_AVX_SaveList;
356 return CSR_32_AllRegs_SSE_SaveList;
357 return CSR_32_AllRegs_SaveList;
364 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
365 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
367 return IsWin64 ? CSR_Win64_SwiftError_SaveList
368 : CSR_64_SwiftError_SaveList;
371 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
373 return CSR_64EHRet_SaveList;
374 return CSR_64_SaveList;
377 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;
380 const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
381 const MachineFunction *MF) const {
382 assert(MF && "Invalid MachineFunction pointer.");
383 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
384 MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
385 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
390 X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
391 CallingConv::ID CC) const {
392 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
393 bool HasSSE = Subtarget.hasSSE1();
394 bool HasAVX = Subtarget.hasAVX();
395 bool HasAVX512 = Subtarget.hasAVX512();
398 case CallingConv::GHC:
399 case CallingConv::HiPE:
400 return CSR_NoRegs_RegMask;
401 case CallingConv::AnyReg:
403 return CSR_64_AllRegs_AVX_RegMask;
404 return CSR_64_AllRegs_RegMask;
405 case CallingConv::PreserveMost:
406 return CSR_64_RT_MostRegs_RegMask;
407 case CallingConv::PreserveAll:
409 return CSR_64_RT_AllRegs_AVX_RegMask;
410 return CSR_64_RT_AllRegs_RegMask;
411 case CallingConv::CXX_FAST_TLS:
413 return CSR_64_TLS_Darwin_RegMask;
415 case CallingConv::Intel_OCL_BI: {
416 if (HasAVX512 && IsWin64)
417 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
418 if (HasAVX512 && Is64Bit)
419 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
420 if (HasAVX && IsWin64)
421 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
422 if (HasAVX && Is64Bit)
423 return CSR_64_Intel_OCL_BI_AVX_RegMask;
424 if (!HasAVX && !IsWin64 && Is64Bit)
425 return CSR_64_Intel_OCL_BI_RegMask;
428 case CallingConv::HHVM:
429 return CSR_64_HHVM_RegMask;
430 case CallingConv::X86_RegCall:
433 return (HasSSE ? CSR_Win64_RegCall_RegMask :
434 CSR_Win64_RegCall_NoSSE_RegMask);
436 return (HasSSE ? CSR_SysV64_RegCall_RegMask :
437 CSR_SysV64_RegCall_NoSSE_RegMask);
440 return (HasSSE ? CSR_32_RegCall_RegMask :
441 CSR_32_RegCall_NoSSE_RegMask);
443 case CallingConv::Cold:
445 return CSR_64_MostRegs_RegMask;
447 case CallingConv::Win64:
448 return CSR_Win64_RegMask;
449 case CallingConv::X86_64_SysV:
450 return CSR_64_RegMask;
451 case CallingConv::X86_INTR:
454 return CSR_64_AllRegs_AVX512_RegMask;
456 return CSR_64_AllRegs_AVX_RegMask;
458 return CSR_64_AllRegs_RegMask;
459 return CSR_64_AllRegs_NoSSE_RegMask;
462 return CSR_32_AllRegs_AVX512_RegMask;
464 return CSR_32_AllRegs_AVX_RegMask;
466 return CSR_32_AllRegs_SSE_RegMask;
467 return CSR_32_AllRegs_RegMask;
473 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
476 const Function &F = MF.getFunction();
477 bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
478 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
480 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
481 return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
484 return CSR_32_RegMask;
488 X86RegisterInfo::getNoPreservedMask() const {
489 return CSR_NoRegs_RegMask;
492 const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
493 return CSR_64_TLS_Darwin_RegMask;
496 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
497 BitVector Reserved(getNumRegs());
498 const X86FrameLowering *TFI = getFrameLowering(MF);
500 // Set the floating point control register as reserved.
501 Reserved.set(X86::FPCW);
503 // Set the stack-pointer register and its aliases as reserved.
504 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
508 // Set the Shadow Stack Pointer as reserved.
509 Reserved.set(X86::SSP);
511 // Set the instruction pointer register and its aliases as reserved.
512 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
516 // Set the frame-pointer register and its aliases as reserved if needed.
517 if (TFI->hasFP(MF)) {
518 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
523 // Set the base-pointer register and its aliases as reserved if needed.
524 if (hasBasePointer(MF)) {
525 CallingConv::ID CC = MF.getFunction().getCallingConv();
526 const uint32_t *RegMask = getCallPreservedMask(MF, CC);
527 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
529 "Stack realignment in presence of dynamic allocas is not supported with"
530 "this calling convention.");
532 unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
533 for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
538 // Mark the segment registers as reserved.
539 Reserved.set(X86::CS);
540 Reserved.set(X86::SS);
541 Reserved.set(X86::DS);
542 Reserved.set(X86::ES);
543 Reserved.set(X86::FS);
544 Reserved.set(X86::GS);
546 // Mark the floating point stack registers as reserved.
547 for (unsigned n = 0; n != 8; ++n)
548 Reserved.set(X86::ST0 + n);
550 // Reserve the registers that only exist in 64-bit mode.
552 // These 8-bit registers are part of the x86-64 extension even though their
553 // super-registers are old 32-bits.
554 Reserved.set(X86::SIL);
555 Reserved.set(X86::DIL);
556 Reserved.set(X86::BPL);
557 Reserved.set(X86::SPL);
558 Reserved.set(X86::SIH);
559 Reserved.set(X86::DIH);
560 Reserved.set(X86::BPH);
561 Reserved.set(X86::SPH);
563 for (unsigned n = 0; n != 8; ++n) {
565 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
569 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
573 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
574 for (unsigned n = 16; n != 32; ++n) {
575 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
580 assert(checkAllSuperRegsMarked(Reserved,
581 {X86::SIL, X86::DIL, X86::BPL, X86::SPL,
582 X86::SIH, X86::DIH, X86::BPH, X86::SPH}));
586 void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
587 // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
588 // because the calling convention defines the EFLAGS register as NOT
591 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
592 // an assert to track this and clear the register afterwards to avoid
593 // unnecessary crashes during release builds.
594 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
595 "EFLAGS are not live-out from a patchpoint.");
597 // Also clean other registers that don't need preserving (IP).
598 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
599 Mask[Reg / 32] &= ~(1U << (Reg % 32));
602 //===----------------------------------------------------------------------===//
603 // Stack Frame Processing methods
604 //===----------------------------------------------------------------------===//
606 static bool CantUseSP(const MachineFrameInfo &MFI) {
607 return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
610 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
611 const MachineFrameInfo &MFI = MF.getFrameInfo();
613 if (!EnableBasePointer)
616 // When we need stack realignment, we can't address the stack from the frame
617 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
618 // can't address variables from the stack pointer. MS inline asm can
619 // reference locals while also adjusting the stack pointer. When we can't
620 // use both the SP and the FP, we need a separate base pointer register.
621 bool CantUseFP = needsStackRealignment(MF);
622 return CantUseFP && CantUseSP(MFI);
625 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
626 if (!TargetRegisterInfo::canRealignStack(MF))
629 const MachineFrameInfo &MFI = MF.getFrameInfo();
630 const MachineRegisterInfo *MRI = &MF.getRegInfo();
632 // Stack realignment requires a frame pointer. If we already started
633 // register allocation with frame pointer elimination, it is too late now.
634 if (!MRI->canReserveReg(FramePtr))
637 // If a base pointer is necessary. Check that it isn't too late to reserve
640 return MRI->canReserveReg(BasePtr);
644 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
645 unsigned Reg, int &FrameIdx) const {
646 // Since X86 defines assignCalleeSavedSpillSlots which always return true
647 // this function neither used nor tested.
648 llvm_unreachable("Unused function on X86. Otherwise need a test case.");
651 // tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
652 // of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
653 // TODO: In this case we should be really trying first to entirely eliminate
654 // this instruction which is a plain copy.
655 static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II) {
656 MachineInstr &MI = *II;
657 unsigned Opc = II->getOpcode();
658 // Check if this is a LEA of the form 'lea (%esp), %ebx'
659 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
660 MI.getOperand(2).getImm() != 1 ||
661 MI.getOperand(3).getReg() != X86::NoRegister ||
662 MI.getOperand(4).getImm() != 0 ||
663 MI.getOperand(5).getReg() != X86::NoRegister)
665 unsigned BasePtr = MI.getOperand(1).getReg();
666 // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
667 // be replaced with a 32-bit operand MOV which will zero extend the upper
668 // 32-bits of the super register.
669 if (Opc == X86::LEA64_32r)
670 BasePtr = getX86SubSuperRegister(BasePtr, 32);
671 unsigned NewDestReg = MI.getOperand(0).getReg();
672 const X86InstrInfo *TII =
673 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
674 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
675 MI.getOperand(1).isKill());
676 MI.eraseFromParent();
681 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
682 int SPAdj, unsigned FIOperandNum,
683 RegScavenger *RS) const {
684 MachineInstr &MI = *II;
685 MachineFunction &MF = *MI.getParent()->getParent();
686 const X86FrameLowering *TFI = getFrameLowering(MF);
687 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
689 // Determine base register and offset.
693 assert((!needsStackRealignment(MF) ||
694 MF.getFrameInfo().isFixedObjectIndex(FrameIndex)) &&
695 "Return instruction can only reference SP relative frame objects");
696 FIOffset = TFI->getFrameIndexReferenceSP(MF, FrameIndex, BasePtr, 0);
698 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, BasePtr);
701 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
702 // simple FP case, and doesn't work with stack realignment. On 32-bit, the
703 // offset is from the traditional base pointer location. On 64-bit, the
704 // offset is from the SP at the end of the prologue, not the FP location. This
705 // matches the behavior of llvm.frameaddress.
706 unsigned Opc = MI.getOpcode();
707 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
708 MachineOperand &FI = MI.getOperand(FIOperandNum);
709 FI.ChangeToImmediate(FIOffset);
713 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
714 // register as source operand, semantic is the same and destination is
715 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
716 // Don't change BasePtr since it is used later for stack adjustment.
717 unsigned MachineBasePtr = BasePtr;
718 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
719 MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
721 // This must be part of a four operand memory reference. Replace the
722 // FrameIndex with base register. Add an offset to the offset.
723 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
725 if (BasePtr == StackPtr)
728 // The frame index format for stackmaps and patchpoints is different from the
729 // X86 format. It only has a FI and an offset.
730 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
731 assert(BasePtr == FramePtr && "Expected the FP as base register");
732 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
733 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
737 if (MI.getOperand(FIOperandNum+3).isImm()) {
738 // Offset is a 32-bit integer.
739 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
740 int Offset = FIOffset + Imm;
741 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
742 "Requesting 64-bit offset in 32-bit immediate!");
743 if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
744 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
746 // Offset is symbolic. This is extremely rare.
747 uint64_t Offset = FIOffset +
748 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
749 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
753 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
754 const X86FrameLowering *TFI = getFrameLowering(MF);
755 return TFI->hasFP(MF) ? FramePtr : StackPtr;
759 X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
760 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
761 unsigned FrameReg = getFrameRegister(MF);
762 if (Subtarget.isTarget64BitILP32())
763 FrameReg = getX86SubSuperRegister(FrameReg, 32);