1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMFrameLowering.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "MCTargetDesc/ARMBaseInfo.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/RegisterScavenging.h"
34 #include "llvm/CodeGen/VirtRegMap.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DebugLoc.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/MC/MCInstrDesc.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Target/TargetInstrInfo.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
51 #define DEBUG_TYPE "arm-register-info"
53 #define GET_REGINFO_TARGET_DESC
54 #include "ARMGenRegisterInfo.inc"
58 ARMBaseRegisterInfo::ARMBaseRegisterInfo()
59 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) {}
61 static unsigned getFramePointerReg(const ARMSubtarget &STI) {
62 return STI.useR7AsFramePointer() ? ARM::R7 : ARM::R11;
66 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
67 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
68 bool UseSplitPush = STI.splitFramePushPop(*MF);
69 const MCPhysReg *RegList =
72 : (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList);
74 const Function *F = MF->getFunction();
75 if (F->getCallingConv() == CallingConv::GHC) {
76 // GHC set of callee saved regs is empty as all those regs are
77 // used for passing STG regs around
78 return CSR_NoRegs_SaveList;
79 } else if (F->hasFnAttribute("interrupt")) {
81 // M-class CPUs have hardware which saves the registers needed to allow a
82 // function conforming to the AAPCS to function as a handler.
83 return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList;
84 } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
85 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
86 // need to be saved to restore user-mode state.
87 return CSR_FIQ_SaveList;
89 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
90 // exception handling.
91 return CSR_GenericInt_SaveList;
95 if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() &&
96 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
97 return CSR_iOS_SwiftError_SaveList;
99 if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS)
100 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
101 ? CSR_iOS_CXX_TLS_PE_SaveList
102 : CSR_iOS_CXX_TLS_SaveList;
106 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy(
107 const MachineFunction *MF) const {
108 assert(MF && "Invalid MachineFunction pointer.");
109 if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
110 MF->getInfo<ARMFunctionInfo>()->isSplitCSR())
111 return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
116 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
117 CallingConv::ID CC) const {
118 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
119 if (CC == CallingConv::GHC)
120 // This is academic becase all GHC calls are (supposed to be) tail calls
121 return CSR_NoRegs_RegMask;
123 if (STI.isTargetDarwin() && STI.getTargetLowering()->supportSwiftError() &&
124 MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
125 return CSR_iOS_SwiftError_RegMask;
127 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS)
128 return CSR_iOS_CXX_TLS_RegMask;
129 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
133 ARMBaseRegisterInfo::getNoPreservedMask() const {
134 return CSR_NoRegs_RegMask;
138 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const {
139 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() &&
140 "only know about special TLS call on Darwin");
141 return CSR_iOS_TLSCall_RegMask;
145 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const {
146 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
147 if (!STI.useSoftFloat() && STI.hasVFP2() && !STI.isThumb1Only())
148 return CSR_NoRegs_RegMask;
150 return CSR_FPRegs_RegMask;
154 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
155 CallingConv::ID CC) const {
156 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
157 // This should return a register mask that is the same as that returned by
158 // getCallPreservedMask but that additionally preserves the register used for
159 // the first i32 argument (which must also be the register used to return a
160 // single i32 return value)
162 // In case that the calling convention does not use the same register for
163 // both or otherwise does not want to enable this optimization, the function
164 // should return NULL
165 if (CC == CallingConv::GHC)
166 // This is academic becase all GHC calls are (supposed to be) tail calls
168 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
169 : CSR_AAPCS_ThisReturn_RegMask;
172 BitVector ARMBaseRegisterInfo::
173 getReservedRegs(const MachineFunction &MF) const {
174 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
175 const ARMFrameLowering *TFI = getFrameLowering(MF);
177 // FIXME: avoid re-calculating this every time.
178 BitVector Reserved(getNumRegs());
179 markSuperRegs(Reserved, ARM::SP);
180 markSuperRegs(Reserved, ARM::PC);
181 markSuperRegs(Reserved, ARM::FPSCR);
182 markSuperRegs(Reserved, ARM::APSR_NZCV);
184 markSuperRegs(Reserved, getFramePointerReg(STI));
185 if (hasBasePointer(MF))
186 markSuperRegs(Reserved, BasePtr);
187 // Some targets reserve R9.
188 if (STI.isR9Reserved())
189 markSuperRegs(Reserved, ARM::R9);
190 // Reserve D16-D31 if the subtarget doesn't support them.
191 if (!STI.hasVFP3() || STI.hasD16()) {
192 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!");
193 for (unsigned R = 0; R < 16; ++R)
194 markSuperRegs(Reserved, ARM::D16 + R);
196 const TargetRegisterClass *RC = &ARM::GPRPairRegClass;
197 for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I)
198 for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI)
199 if (Reserved.test(*SI)) markSuperRegs(Reserved, *I);
201 assert(checkAllSuperRegsMarked(Reserved));
205 const TargetRegisterClass *
206 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
207 const MachineFunction &) const {
208 const TargetRegisterClass *Super = RC;
209 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
211 switch (Super->getID()) {
212 case ARM::GPRRegClassID:
213 case ARM::SPRRegClassID:
214 case ARM::DPRRegClassID:
215 case ARM::QPRRegClassID:
216 case ARM::QQPRRegClassID:
217 case ARM::QQQQPRRegClassID:
218 case ARM::GPRPairRegClassID:
226 const TargetRegisterClass *
227 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
229 return &ARM::GPRRegClass;
232 const TargetRegisterClass *
233 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
234 if (RC == &ARM::CCRRegClass)
235 return &ARM::rGPRRegClass; // Can't copy CCR registers.
240 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
241 MachineFunction &MF) const {
242 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
243 const ARMFrameLowering *TFI = getFrameLowering(MF);
245 switch (RC->getID()) {
248 case ARM::tGPRRegClassID:
249 return TFI->hasFP(MF) ? 4 : 5;
250 case ARM::GPRRegClassID: {
251 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
252 return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
254 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
255 case ARM::DPRRegClassID:
260 // Get the other register in a GPRPair.
261 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) {
262 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers)
263 if (ARM::GPRPairRegClass.contains(*Supers))
264 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0);
268 // Resolve the RegPairEven / RegPairOdd register allocator hints.
270 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
271 ArrayRef<MCPhysReg> Order,
272 SmallVectorImpl<MCPhysReg> &Hints,
273 const MachineFunction &MF,
274 const VirtRegMap *VRM,
275 const LiveRegMatrix *Matrix) const {
276 const MachineRegisterInfo &MRI = MF.getRegInfo();
277 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg);
280 switch (Hint.first) {
281 case ARMRI::RegPairEven:
284 case ARMRI::RegPairOdd:
288 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
292 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
293 // Check if the other part of the pair has already been assigned, and provide
294 // the paired register as the first hint.
295 unsigned Paired = Hint.second;
299 unsigned PairedPhys = 0;
300 if (TargetRegisterInfo::isPhysicalRegister(Paired)) {
302 } else if (VRM && VRM->hasPhys(Paired)) {
303 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
306 // First prefer the paired physreg.
307 if (PairedPhys && is_contained(Order, PairedPhys))
308 Hints.push_back(PairedPhys);
310 // Then prefer even or odd registers.
311 for (unsigned I = 0, E = Order.size(); I != E; ++I) {
312 unsigned Reg = Order[I];
313 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
315 // Don't provide hints that are paired to a reserved register.
316 unsigned Paired = getPairedGPR(Reg, !Odd, this);
317 if (!Paired || MRI.isReserved(Paired))
319 Hints.push_back(Reg);
324 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg,
325 MachineFunction &MF) const {
326 MachineRegisterInfo *MRI = &MF.getRegInfo();
327 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
328 if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
329 Hint.first == (unsigned)ARMRI::RegPairEven) &&
330 TargetRegisterInfo::isVirtualRegister(Hint.second)) {
331 // If 'Reg' is one of the even / odd register pair and it's now changed
332 // (e.g. coalesced) into a different register. The other register of the
333 // pair allocation hint must be updated to reflect the relationship
335 unsigned OtherReg = Hint.second;
336 Hint = MRI->getRegAllocationHint(OtherReg);
337 // Make sure the pair has not already divorced.
338 if (Hint.second == Reg) {
339 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
340 if (TargetRegisterInfo::isVirtualRegister(NewReg))
341 MRI->setRegAllocationHint(NewReg,
342 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven
343 : ARMRI::RegPairOdd, OtherReg);
348 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
349 const MachineFrameInfo &MFI = MF.getFrameInfo();
350 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
351 const ARMFrameLowering *TFI = getFrameLowering(MF);
353 // When outgoing call frames are so large that we adjust the stack pointer
354 // around the call, we can no longer use the stack pointer to reach the
355 // emergency spill slot.
356 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
359 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
360 // negative range for ldr/str (255), and thumb1 is positive offsets only.
361 // It's going to be better to use the SP or Base Pointer instead. When there
362 // are variable sized objects, we can't reference off of the SP, so we
363 // reserve a Base Pointer.
364 if (AFI->isThumbFunction() && MFI.hasVarSizedObjects()) {
365 // Conservatively estimate whether the negative offset from the frame
366 // pointer will be sufficient to reach. If a function has a smallish
367 // frame, it's less likely to have lots of spills and callee saved
368 // space, so it's all more likely to be within range of the frame pointer.
369 // If it's wrong, the scavenger will still enable access to work, it just
371 if (AFI->isThumb2Function() && MFI.getLocalFrameSize() < 128)
379 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
380 const MachineRegisterInfo *MRI = &MF.getRegInfo();
381 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
382 const ARMFrameLowering *TFI = getFrameLowering(MF);
383 // We can't realign the stack if:
384 // 1. Dynamic stack realignment is explicitly disabled,
385 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
386 // 3. There are VLAs in the function and the base pointer is disabled.
387 if (!TargetRegisterInfo::canRealignStack(MF))
389 if (AFI->isThumb1OnlyFunction())
391 // Stack realignment requires a frame pointer. If we already started
392 // register allocation with frame pointer elimination, it is too late now.
393 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>())))
395 // We may also need a base pointer if there are dynamic allocas or stack
396 // pointer adjustments around calls.
397 if (TFI->hasReservedCallFrame(MF))
399 // A base pointer is required and allowed. Check that it isn't too late to
401 return MRI->canReserveReg(BasePtr);
404 bool ARMBaseRegisterInfo::
405 cannotEliminateFrame(const MachineFunction &MF) const {
406 const MachineFrameInfo &MFI = MF.getFrameInfo();
407 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
409 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken()
410 || needsStackRealignment(MF);
414 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
415 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
416 const ARMFrameLowering *TFI = getFrameLowering(MF);
419 return getFramePointerReg(STI);
423 /// emitLoadConstPool - Emits a load from constpool to materialize the
424 /// specified immediate.
425 void ARMBaseRegisterInfo::emitLoadConstPool(
426 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
427 const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
428 ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
429 MachineFunction &MF = *MBB.getParent();
430 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
431 MachineConstantPool *ConstantPool = MF.getConstantPool();
433 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
434 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
436 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
437 .addReg(DestReg, getDefRegState(true), SubIdx)
438 .addConstantPoolIndex(Idx)
440 .add(predOps(Pred, PredReg))
441 .setMIFlags(MIFlags);
444 bool ARMBaseRegisterInfo::
445 requiresRegisterScavenging(const MachineFunction &MF) const {
449 bool ARMBaseRegisterInfo::
450 trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
454 bool ARMBaseRegisterInfo::
455 requiresFrameIndexScavenging(const MachineFunction &MF) const {
459 bool ARMBaseRegisterInfo::
460 requiresVirtualBaseRegisters(const MachineFunction &MF) const {
464 int64_t ARMBaseRegisterInfo::
465 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
466 const MCInstrDesc &Desc = MI->getDesc();
467 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
468 int64_t InstrOffs = 0;
472 case ARMII::AddrModeT2_i8:
473 case ARMII::AddrModeT2_i12:
474 case ARMII::AddrMode_i12:
475 InstrOffs = MI->getOperand(Idx+1).getImm();
478 case ARMII::AddrMode5: {
480 const MachineOperand &OffOp = MI->getOperand(Idx+1);
481 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
482 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
483 InstrOffs = -InstrOffs;
487 case ARMII::AddrMode2:
489 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
490 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
491 InstrOffs = -InstrOffs;
493 case ARMII::AddrMode3:
495 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
496 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
497 InstrOffs = -InstrOffs;
499 case ARMII::AddrModeT1_s:
501 InstrOffs = MI->getOperand(ImmIdx).getImm();
505 llvm_unreachable("Unsupported addressing mode!");
508 return InstrOffs * Scale;
511 /// needsFrameBaseReg - Returns true if the instruction's frame index
512 /// reference would be better served by a base register other than FP
513 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
514 /// references it should create new base registers for.
515 bool ARMBaseRegisterInfo::
516 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
517 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
518 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
521 // It's the load/store FI references that cause issues, as it can be difficult
522 // to materialize the offset if it won't fit in the literal field. Estimate
523 // based on the size of the local frame and some conservative assumptions
524 // about the rest of the stack frame (note, this is pre-regalloc, so
525 // we don't know everything for certain yet) whether this offset is likely
526 // to be out of range of the immediate. Return true if so.
528 // We only generate virtual base registers for loads and stores, so
529 // return false for everything else.
530 unsigned Opc = MI->getOpcode();
532 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
533 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
534 case ARM::t2LDRi12: case ARM::t2LDRi8:
535 case ARM::t2STRi12: case ARM::t2STRi8:
536 case ARM::VLDRS: case ARM::VLDRD:
537 case ARM::VSTRS: case ARM::VSTRD:
538 case ARM::tSTRspi: case ARM::tLDRspi:
544 // Without a virtual base register, if the function has variable sized
545 // objects, all fixed-size local references will be via the frame pointer,
546 // Approximate the offset and see if it's legal for the instruction.
547 // Note that the incoming offset is based on the SP value at function entry,
548 // so it'll be negative.
549 MachineFunction &MF = *MI->getParent()->getParent();
550 const ARMFrameLowering *TFI = getFrameLowering(MF);
551 MachineFrameInfo &MFI = MF.getFrameInfo();
552 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
554 // Estimate an offset from the frame pointer.
555 // Conservatively assume all callee-saved registers get pushed. R4-R6
556 // will be earlier than the FP, so we ignore those.
558 int64_t FPOffset = Offset - 8;
559 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
560 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
562 // Estimate an offset from the stack pointer.
563 // The incoming offset is relating to the SP at the start of the function,
564 // but when we access the local it'll be relative to the SP after local
565 // allocation, so adjust our SP-relative offset by that allocation size.
566 Offset += MFI.getLocalFrameSize();
567 // Assume that we'll have at least some spill slots allocated.
568 // FIXME: This is a total SWAG number. We should run some statistics
569 // and pick a real one.
570 Offset += 128; // 128 bytes of spill slots
572 // If there's a frame pointer and the addressing mode allows it, try using it.
573 // The FP is only available if there is no dynamic realignment. We
574 // don't know for sure yet whether we'll need that, so we guess based
575 // on whether there are any local variables that would trigger it.
576 unsigned StackAlign = TFI->getStackAlignment();
577 if (TFI->hasFP(MF) &&
578 !((MFI.getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
579 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
582 // If we can reference via the stack pointer, try that.
583 // FIXME: This (and the code that resolves the references) can be improved
584 // to only disallow SP relative references in the live range of
585 // the VLA(s). In practice, it's unclear how much difference that
586 // would make, but it may be worth doing.
587 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset))
590 // The offset likely isn't legal, we want to allocate a virtual base register.
594 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
595 /// be a pointer to FrameIdx at the beginning of the basic block.
596 void ARMBaseRegisterInfo::
597 materializeFrameBaseRegister(MachineBasicBlock *MBB,
598 unsigned BaseReg, int FrameIdx,
599 int64_t Offset) const {
600 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
601 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
602 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
604 MachineBasicBlock::iterator Ins = MBB->begin();
605 DebugLoc DL; // Defaults to "unknown"
606 if (Ins != MBB->end())
607 DL = Ins->getDebugLoc();
609 const MachineFunction &MF = *MBB->getParent();
610 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
611 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
612 const MCInstrDesc &MCID = TII.get(ADDriOpc);
613 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
615 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
616 .addFrameIndex(FrameIdx).addImm(Offset);
618 if (!AFI->isThumb1OnlyFunction())
619 MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
622 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
623 int64_t Offset) const {
624 MachineBasicBlock &MBB = *MI.getParent();
625 MachineFunction &MF = *MBB.getParent();
626 const ARMBaseInstrInfo &TII =
627 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
628 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
629 int Off = Offset; // ARM doesn't need the general 64-bit offsets
632 assert(!AFI->isThumb1OnlyFunction() &&
633 "This resolveFrameIndex does not support Thumb1!");
635 while (!MI.getOperand(i).isFI()) {
637 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
640 if (!AFI->isThumbFunction())
641 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
643 assert(AFI->isThumb2Function());
644 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
646 assert(Done && "Unable to resolve frame index!");
650 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
651 int64_t Offset) const {
652 const MCInstrDesc &Desc = MI->getDesc();
653 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
656 while (!MI->getOperand(i).isFI()) {
658 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
661 // AddrMode4 and AddrMode6 cannot handle any offset.
662 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
665 unsigned NumBits = 0;
667 bool isSigned = true;
669 case ARMII::AddrModeT2_i8:
670 case ARMII::AddrModeT2_i12:
671 // i8 supports only negative, and i12 supports only positive, so
672 // based on Offset sign, consider the appropriate instruction
681 case ARMII::AddrMode5:
686 case ARMII::AddrMode_i12:
687 case ARMII::AddrMode2:
690 case ARMII::AddrMode3:
693 case ARMII::AddrModeT1_s:
694 NumBits = (BaseReg == ARM::SP ? 8 : 5);
699 llvm_unreachable("Unsupported addressing mode!");
702 Offset += getFrameIndexInstrOffset(MI, i);
703 // Make sure the offset is encodable for instructions that scale the
705 if ((Offset & (Scale-1)) != 0)
708 if (isSigned && Offset < 0)
711 unsigned Mask = (1 << NumBits) - 1;
712 if ((unsigned)Offset <= Mask * Scale)
719 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
720 int SPAdj, unsigned FIOperandNum,
721 RegScavenger *RS) const {
722 MachineInstr &MI = *II;
723 MachineBasicBlock &MBB = *MI.getParent();
724 MachineFunction &MF = *MBB.getParent();
725 const ARMBaseInstrInfo &TII =
726 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
727 const ARMFrameLowering *TFI = getFrameLowering(MF);
728 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
729 assert(!AFI->isThumb1OnlyFunction() &&
730 "This eliminateFrameIndex does not support Thumb1!");
731 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
734 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
736 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
737 // call frame setup/destroy instructions have already been eliminated. That
738 // means the stack pointer cannot be used to access the emergency spill slot
739 // when !hasReservedCallFrame().
741 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
742 assert(TFI->hasReservedCallFrame(MF) &&
743 "Cannot use SP to access the emergency spill slot in "
744 "functions without a reserved call frame");
745 assert(!MF.getFrameInfo().hasVarSizedObjects() &&
746 "Cannot use SP to access the emergency spill slot in "
747 "functions with variable sized frame objects");
751 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
753 // Modify MI as necessary to handle as much of 'Offset' as possible
755 if (!AFI->isThumbFunction())
756 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
758 assert(AFI->isThumb2Function());
759 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
764 // If we get here, the immediate doesn't fit into the instruction. We folded
765 // as much as possible above, handle the rest, providing a register that is
768 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
769 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
770 "This code isn't needed if offset already handled!");
772 unsigned ScratchReg = 0;
773 int PIdx = MI.findFirstPredOperandIdx();
774 ARMCC::CondCodes Pred = (PIdx == -1)
775 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
776 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
778 // Must be addrmode4/6.
779 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
781 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass);
782 if (!AFI->isThumbFunction())
783 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
784 Offset, Pred, PredReg, TII);
786 assert(AFI->isThumb2Function());
787 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
788 Offset, Pred, PredReg, TII);
790 // Update the original instruction to use the scratch register.
791 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
795 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI,
796 const TargetRegisterClass *SrcRC,
798 const TargetRegisterClass *DstRC,
800 const TargetRegisterClass *NewRC) const {
801 auto MBB = MI->getParent();
802 auto MF = MBB->getParent();
803 const MachineRegisterInfo &MRI = MF->getRegInfo();
804 // If not copying into a sub-register this should be ok because we shouldn't
805 // need to split the reg.
808 // Small registers don't frequently cause a problem, so we can coalesce them.
809 if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32)
813 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
815 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
817 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
818 // If the source register class is more expensive than the destination, the
819 // coalescing is probably profitable.
820 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
822 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
825 // If the register allocator isn't constrained, we can always allow coalescing
826 // unfortunately we don't know yet if we will be constrained.
827 // The goal of this heuristic is to restrict how many expensive registers
828 // we allow to coalesce in a given basic block.
829 auto AFI = MF->getInfo<ARMFunctionInfo>();
830 auto It = AFI->getCoalescedWeight(MBB);
832 DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
833 << It->second << "\n");
834 DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
835 << NewRCWeight.RegWeight << "\n");
837 // This number is the largest round number that which meets the criteria:
838 // (1) addresses PR18825
839 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
840 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
841 // In practice the SizeMultiplier will only factor in for straight line code
842 // that uses a lot of NEON vectors, which isn't terribly common.
843 unsigned SizeMultiplier = MBB->size()/100;
844 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
845 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
846 It->second += NewRCWeight.RegWeight;