1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the AArch64 implementation of TargetFrameLowering class.
11 // On AArch64, stack frames are structured as follows:
13 // The stack grows downward.
15 // All of the individual frame areas on the frame below are optional, i.e. it's
16 // possible to create a function so that the particular area isn't present
19 // At function entry, the "frame" looks as follows:
22 // |-----------------------------------|
24 // | arguments passed on the stack |
26 // |-----------------------------------| <- sp
30 // After the prologue has run, the frame has the following general structure.
31 // Note that this doesn't depict the case where a red-zone is used. Also,
32 // technically the last frame area (VLAs) doesn't get created until in the
33 // main function body, after the prologue is run. However, it's depicted here
37 // |-----------------------------------|
39 // | arguments passed on the stack |
41 // |-----------------------------------|
43 // | (Win64 only) varargs from reg |
45 // |-----------------------------------|
47 // | callee-saved gpr registers | <--.
48 // | | | On Darwin platforms these
49 // |- - - - - - - - - - - - - - - - - -| | callee saves are swapped,
50 // | prev_lr | | (frame record first)
52 // | async context if needed |
53 // | (a.k.a. "frame record") |
54 // |-----------------------------------| <- fp(=x29)
56 // | callee-saved fp/simd/SVE regs |
58 // |-----------------------------------|
60 // | SVE stack objects |
62 // |-----------------------------------|
63 // |.empty.space.to.make.part.below....|
64 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at
65 // |.the.standard.16-byte.alignment....| compile time; if present)
66 // |-----------------------------------|
68 // | local variables of fixed size |
69 // | including spill slots |
70 // |-----------------------------------| <- bp(not defined by ABI,
71 // |.variable-sized.local.variables....| LLVM chooses X19)
72 // |.(VLAs)............................| (size of this area is unknown at
73 // |...................................| compile time)
74 // |-----------------------------------| <- sp
78 // To access the data in a frame, at-compile time, a constant offset must be
79 // computable from one of the pointers (fp, bp, sp) to access it. The size
80 // of the areas with a dotted background cannot be computed at compile-time
81 // if they are present, making it required to have all three of fp, bp and
82 // sp to be set up to be able to access all contents in the frame areas,
83 // assuming all of the frame areas are non-empty.
85 // For most functions, some of the frame areas are empty. For those functions,
86 // it may not be necessary to set up fp or bp:
87 // * A base pointer is definitely needed when there are both VLAs and local
88 // variables with more-than-default alignment requirements.
89 // * A frame pointer is definitely needed when there are local variables with
90 // more-than-default alignment requirements.
92 // For Darwin platforms the frame-record (fp, lr) is stored at the top of the
93 // callee-saved area, since the unwind encoding does not allow for encoding
94 // this dynamically and existing tools depend on this layout. For other
95 // platforms, the frame-record is stored at the bottom of the (gpr) callee-saved
96 // area to allow SVE stack objects (allocated directly below the callee-saves,
97 // if available) to be accessed directly from the framepointer.
98 // The SVE spill/fill instructions have VL-scaled addressing modes such
100 // ldr z8, [fp, #-7 mul vl]
101 // For SVE the size of the vector length (VL) is not known at compile-time, so
102 // '#-7 mul vl' is an offset that can only be evaluated at runtime. With this
103 // layout, we don't need to add an unscaled offset to the framepointer before
104 // accessing the SVE object in the frame.
106 // In some cases when a base pointer is not strictly needed, it is generated
107 // anyway when offsets from the frame pointer to access local variables become
108 // so large that the offset can't be encoded in the immediate fields of loads
111 // Outgoing function arguments must be at the bottom of the stack frame when
112 // calling another function. If we do not have variable-sized stack objects, we
113 // can allocate a "reserved call frame" area at the bottom of the local
114 // variable area, large enough for all outgoing calls. If we do have VLAs, then
115 // the stack pointer must be decremented and incremented around each call to
116 // make space for the arguments below the VLAs.
118 // FIXME: also explain the redzone concept.
120 // An example of the prologue:
127 // .cfi_personality 155, ___gxx_personality_v0
129 // .cfi_lsda 16, Lexception33
131 // stp xa,bx, [sp, -#offset]!
133 // stp x28, x27, [sp, #offset-32]
134 // stp fp, lr, [sp, #offset-16]
135 // add fp, sp, #offset - 16
139 // +-------------------------------------------+
140 // 10000 | ........ | ........ | ........ | ........ |
141 // 10004 | ........ | ........ | ........ | ........ |
142 // +-------------------------------------------+
143 // 10008 | ........ | ........ | ........ | ........ |
144 // 1000c | ........ | ........ | ........ | ........ |
145 // +===========================================+
146 // 10010 | X28 Register |
147 // 10014 | X28 Register |
148 // +-------------------------------------------+
149 // 10018 | X27 Register |
150 // 1001c | X27 Register |
151 // +===========================================+
152 // 10020 | Frame Pointer |
153 // 10024 | Frame Pointer |
154 // +-------------------------------------------+
155 // 10028 | Link Register |
156 // 1002c | Link Register |
157 // +===========================================+
158 // 10030 | ........ | ........ | ........ | ........ |
159 // 10034 | ........ | ........ | ........ | ........ |
160 // +-------------------------------------------+
161 // 10038 | ........ | ........ | ........ | ........ |
162 // 1003c | ........ | ........ | ........ | ........ |
163 // +-------------------------------------------+
165 // [sp] = 10030 :: >>initial value<<
166 // sp = 10020 :: stp fp, lr, [sp, #-16]!
167 // fp = sp == 10020 :: mov fp, sp
168 // [sp] == 10020 :: stp x28, x27, [sp, #-16]!
169 // sp == 10010 :: >>final value<<
171 // The frame pointer (w29) points to address 10020. If we use an offset of
172 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
173 // for w27, and -32 for w28:
176 // .cfi_def_cfa w29, 16
178 // .cfi_offset w30, -8
180 // .cfi_offset w29, -16
182 // .cfi_offset w27, -24
184 // .cfi_offset w28, -32
186 //===----------------------------------------------------------------------===//
188 #include "AArch64FrameLowering.h"
189 #include "AArch64InstrInfo.h"
190 #include "AArch64MachineFunctionInfo.h"
191 #include "AArch64RegisterInfo.h"
192 #include "AArch64Subtarget.h"
193 #include "AArch64TargetMachine.h"
194 #include "MCTargetDesc/AArch64AddressingModes.h"
195 #include "MCTargetDesc/AArch64MCTargetDesc.h"
196 #include "llvm/ADT/ScopeExit.h"
197 #include "llvm/ADT/SmallVector.h"
198 #include "llvm/ADT/Statistic.h"
199 #include "llvm/CodeGen/LivePhysRegs.h"
200 #include "llvm/CodeGen/MachineBasicBlock.h"
201 #include "llvm/CodeGen/MachineFrameInfo.h"
202 #include "llvm/CodeGen/MachineFunction.h"
203 #include "llvm/CodeGen/MachineInstr.h"
204 #include "llvm/CodeGen/MachineInstrBuilder.h"
205 #include "llvm/CodeGen/MachineMemOperand.h"
206 #include "llvm/CodeGen/MachineModuleInfo.h"
207 #include "llvm/CodeGen/MachineOperand.h"
208 #include "llvm/CodeGen/MachineRegisterInfo.h"
209 #include "llvm/CodeGen/RegisterScavenging.h"
210 #include "llvm/CodeGen/TargetInstrInfo.h"
211 #include "llvm/CodeGen/TargetRegisterInfo.h"
212 #include "llvm/CodeGen/TargetSubtargetInfo.h"
213 #include "llvm/CodeGen/WinEHFuncInfo.h"
214 #include "llvm/IR/Attributes.h"
215 #include "llvm/IR/CallingConv.h"
216 #include "llvm/IR/DataLayout.h"
217 #include "llvm/IR/DebugLoc.h"
218 #include "llvm/IR/Function.h"
219 #include "llvm/MC/MCAsmInfo.h"
220 #include "llvm/MC/MCDwarf.h"
221 #include "llvm/Support/CommandLine.h"
222 #include "llvm/Support/Debug.h"
223 #include "llvm/Support/ErrorHandling.h"
224 #include "llvm/Support/MathExtras.h"
225 #include "llvm/Support/raw_ostream.h"
226 #include "llvm/Target/TargetMachine.h"
227 #include "llvm/Target/TargetOptions.h"
234 using namespace llvm;
236 #define DEBUG_TYPE "frame-info"
238 static cl::opt<bool> EnableRedZone("aarch64-redzone",
239 cl::desc("enable use of redzone on AArch64"),
240 cl::init(false), cl::Hidden);
243 ReverseCSRRestoreSeq("reverse-csr-restore-seq",
244 cl::desc("reverse the CSR restore sequence"),
245 cl::init(false), cl::Hidden);
247 static cl::opt<bool> StackTaggingMergeSetTag(
248 "stack-tagging-merge-settag",
249 cl::desc("merge settag instruction in function epilog"), cl::init(true),
252 static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects",
253 cl::desc("sort stack allocations"),
254 cl::init(true), cl::Hidden);
256 cl::opt<bool> EnableHomogeneousPrologEpilog(
257 "homogeneous-prolog-epilog", cl::Hidden,
258 cl::desc("Emit homogeneous prologue and epilogue for the size "
259 "optimization (default = off)"));
261 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
263 /// Returns how much of the incoming argument stack area (in bytes) we should
264 /// clean up in an epilogue. For the C calling convention this will be 0, for
265 /// guaranteed tail call conventions it can be positive (a normal return or a
266 /// tail call to a function that uses less stack space for arguments) or
267 /// negative (for a tail call to a function that needs more stack space than us
269 static int64_t getArgumentStackToRestore(MachineFunction &MF,
270 MachineBasicBlock &MBB) {
271 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
272 bool IsTailCallReturn = false;
273 if (MBB.end() != MBBI) {
274 unsigned RetOpcode = MBBI->getOpcode();
275 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
276 RetOpcode == AArch64::TCRETURNri ||
277 RetOpcode == AArch64::TCRETURNriBTI;
279 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
281 int64_t ArgumentPopSize = 0;
282 if (IsTailCallReturn) {
283 MachineOperand &StackAdjust = MBBI->getOperand(1);
285 // For a tail-call in a callee-pops-arguments environment, some or all of
286 // the stack may actually be in use for the call's arguments, this is
287 // calculated during LowerCall and consumed here...
288 ArgumentPopSize = StackAdjust.getImm();
290 // ... otherwise the amount to pop is *all* of the argument space,
291 // conveniently stored in the MachineFunctionInfo by
292 // LowerFormalArguments. This will, of course, be zero for the C calling
294 ArgumentPopSize = AFI->getArgumentStackToRestore();
297 return ArgumentPopSize;
300 static bool produceCompactUnwindFrame(MachineFunction &MF);
301 static bool needsWinCFI(const MachineFunction &MF);
302 static StackOffset getSVEStackSize(const MachineFunction &MF);
303 static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF);
305 /// Returns true if a homogeneous prolog or epilog code can be emitted
306 /// for the size optimization. If possible, a frame helper call is injected.
307 /// When Exit block is given, this check is for epilog.
308 bool AArch64FrameLowering::homogeneousPrologEpilog(
309 MachineFunction &MF, MachineBasicBlock *Exit) const {
310 if (!MF.getFunction().hasMinSize())
312 if (!EnableHomogeneousPrologEpilog)
314 if (ReverseCSRRestoreSeq)
319 // TODO: Window is supported yet.
322 // TODO: SVE is not supported yet.
323 if (getSVEStackSize(MF))
326 // Bail on stack adjustment needed on return for simplicity.
327 const MachineFrameInfo &MFI = MF.getFrameInfo();
328 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
329 if (MFI.hasVarSizedObjects() || RegInfo->hasStackRealignment(MF))
331 if (Exit && getArgumentStackToRestore(MF, *Exit))
337 /// Returns true if CSRs should be paired.
338 bool AArch64FrameLowering::producePairRegisters(MachineFunction &MF) const {
339 return produceCompactUnwindFrame(MF) || homogeneousPrologEpilog(MF);
342 /// This is the biggest offset to the stack pointer we can encode in aarch64
343 /// instructions (without using a separate calculation and a temp register).
344 /// Note that the exception here are vector stores/loads which cannot encode any
345 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()).
346 static const unsigned DefaultSafeSPDisplacement = 255;
348 /// Look at each instruction that references stack frames and return the stack
349 /// size limit beyond which some of these instructions will require a scratch
350 /// register during their expansion later.
351 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
352 // FIXME: For now, just conservatively guestimate based on unscaled indexing
353 // range. We'll end up allocating an unnecessary spill slot a lot, but
354 // realistically that's not a big deal at this stage of the game.
355 for (MachineBasicBlock &MBB : MF) {
356 for (MachineInstr &MI : MBB) {
357 if (MI.isDebugInstr() || MI.isPseudo() ||
358 MI.getOpcode() == AArch64::ADDXri ||
359 MI.getOpcode() == AArch64::ADDSXri)
362 for (const MachineOperand &MO : MI.operands()) {
367 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) ==
368 AArch64FrameOffsetCannotUpdate)
373 return DefaultSafeSPDisplacement;
377 AArch64FrameLowering::getStackIDForScalableVectors() const {
378 return TargetStackID::ScalableVector;
381 /// Returns the size of the fixed object area (allocated next to sp on entry)
382 /// On Win64 this may include a var args area and an UnwindHelp object for EH.
383 static unsigned getFixedObjectSize(const MachineFunction &MF,
384 const AArch64FunctionInfo *AFI, bool IsWin64,
386 if (!IsWin64 || IsFunclet) {
387 return AFI->getTailCallReservedStack();
389 if (AFI->getTailCallReservedStack() != 0)
390 report_fatal_error("cannot generate ABI-changing tail call for Win64");
391 // Var args are stored here in the primary function.
392 const unsigned VarArgsArea = AFI->getVarArgsGPRSize();
393 // To support EH funclets we allocate an UnwindHelp object
394 const unsigned UnwindHelpObject = (MF.hasEHFunclets() ? 8 : 0);
395 return alignTo(VarArgsArea + UnwindHelpObject, 16);
399 /// Returns the size of the entire SVE stackframe (calleesaves + spills).
400 static StackOffset getSVEStackSize(const MachineFunction &MF) {
401 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
402 return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE());
405 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
409 // Don't use the red zone if the function explicitly asks us not to.
410 // This is typically used for kernel code.
411 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
412 const unsigned RedZoneSize =
413 Subtarget.getTargetLowering()->getRedZoneSize(MF.getFunction());
417 const MachineFrameInfo &MFI = MF.getFrameInfo();
418 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
419 uint64_t NumBytes = AFI->getLocalStackSize();
421 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize ||
422 getSVEStackSize(MF));
425 /// hasFP - Return true if the specified function should have a dedicated frame
426 /// pointer register.
427 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
428 const MachineFrameInfo &MFI = MF.getFrameInfo();
429 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
430 // Win64 EH requires a frame pointer if funclets are present, as the locals
431 // are accessed off the frame pointer in both the parent function and the
433 if (MF.hasEHFunclets())
435 // Retain behavior of always omitting the FP for leaf functions when possible.
436 if (MF.getTarget().Options.DisableFramePointerElim(MF))
438 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
439 MFI.hasStackMap() || MFI.hasPatchPoint() ||
440 RegInfo->hasStackRealignment(MF))
442 // With large callframes around we may need to use FP to access the scavenging
443 // emergency spillslot.
445 // Unfortunately some calls to hasFP() like machine verifier ->
446 // getReservedReg() -> hasFP in the middle of global isel are too early
447 // to know the max call frame size. Hopefully conservatively returning "true"
448 // in those cases is fine.
449 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs.
450 if (!MFI.isMaxCallFrameSizeComputed() ||
451 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement)
457 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
458 /// not required, we reserve argument space for call sites in the function
459 /// immediately on entry to the current function. This eliminates the need for
460 /// add/sub sp brackets around call sites. Returns true if the call frame is
461 /// included as part of the stack frame.
463 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
464 return !MF.getFrameInfo().hasVarSizedObjects();
467 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
468 MachineFunction &MF, MachineBasicBlock &MBB,
469 MachineBasicBlock::iterator I) const {
470 const AArch64InstrInfo *TII =
471 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
472 DebugLoc DL = I->getDebugLoc();
473 unsigned Opc = I->getOpcode();
474 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
475 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
477 if (!hasReservedCallFrame(MF)) {
478 int64_t Amount = I->getOperand(0).getImm();
479 Amount = alignTo(Amount, getStackAlign());
483 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
484 // doesn't have to pop anything), then the first operand will be zero too so
485 // this adjustment is a no-op.
486 if (CalleePopAmount == 0) {
487 // FIXME: in-function stack adjustment for calls is limited to 24-bits
488 // because there's no guaranteed temporary register available.
490 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available.
491 // 1) For offset <= 12-bit, we use LSL #0
492 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses
493 // LSL #0, and the other uses LSL #12.
495 // Most call frames will be allocated at the start of a function so
496 // this is OK, but it is a limitation that needs dealing with.
497 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
498 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP,
499 StackOffset::getFixed(Amount), TII);
501 } else if (CalleePopAmount != 0) {
502 // If the calling convention demands that the callee pops arguments from the
503 // stack, we want to add it back if we have a reserved call frame.
504 assert(CalleePopAmount < 0xffffff && "call frame too large");
505 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP,
506 StackOffset::getFixed(-(int64_t)CalleePopAmount), TII);
511 void AArch64FrameLowering::emitCalleeSavedGPRLocations(
512 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
513 MachineFunction &MF = *MBB.getParent();
514 MachineFrameInfo &MFI = MF.getFrameInfo();
516 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
520 const TargetSubtargetInfo &STI = MF.getSubtarget();
521 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
522 const TargetInstrInfo &TII = *STI.getInstrInfo();
523 DebugLoc DL = MBB.findDebugLoc(MBBI);
525 for (const auto &Info : CSI) {
526 if (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)
529 assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
530 unsigned DwarfReg = TRI.getDwarfRegNum(Info.getReg(), true);
533 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
534 unsigned CFIIndex = MF.addFrameInst(
535 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
536 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
537 .addCFIIndex(CFIIndex)
538 .setMIFlags(MachineInstr::FrameSetup);
542 void AArch64FrameLowering::emitCalleeSavedSVELocations(
543 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
544 MachineFunction &MF = *MBB.getParent();
545 MachineFrameInfo &MFI = MF.getFrameInfo();
547 // Add callee saved registers to move list.
548 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
552 const TargetSubtargetInfo &STI = MF.getSubtarget();
553 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
554 const TargetInstrInfo &TII = *STI.getInstrInfo();
555 DebugLoc DL = MBB.findDebugLoc(MBBI);
556 AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
558 for (const auto &Info : CSI) {
559 if (!(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
562 // Not all unwinders may know about SVE registers, so assume the lowest
563 // common demoninator.
564 assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
565 unsigned Reg = Info.getReg();
566 if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
570 StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) -
571 StackOffset::getFixed(AFI.getCalleeSavedStackSize(MFI));
573 unsigned CFIIndex = MF.addFrameInst(createCFAOffset(TRI, Reg, Offset));
574 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
575 .addCFIIndex(CFIIndex)
576 .setMIFlags(MachineInstr::FrameSetup);
580 static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF,
581 MachineBasicBlock &MBB,
582 MachineBasicBlock::iterator InsertPt,
585 MF.addFrameInst(MCCFIInstruction::createSameValue(nullptr, DwarfReg));
586 BuildMI(MBB, InsertPt, DebugLoc(), Desc).addCFIIndex(CFIIndex);
589 void AArch64FrameLowering::resetCFIToInitialState(
590 MachineBasicBlock &MBB) const {
592 MachineFunction &MF = *MBB.getParent();
593 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
594 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
596 static_cast<const AArch64RegisterInfo &>(*Subtarget.getRegisterInfo());
597 const auto &MFI = *MF.getInfo<AArch64FunctionInfo>();
599 const MCInstrDesc &CFIDesc = TII.get(TargetOpcode::CFI_INSTRUCTION);
602 // Reset the CFA to `SP + 0`.
603 MachineBasicBlock::iterator InsertPt = MBB.begin();
604 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
605 nullptr, TRI.getDwarfRegNum(AArch64::SP, true), 0));
606 BuildMI(MBB, InsertPt, DL, CFIDesc).addCFIIndex(CFIIndex);
608 // Flip the RA sign state.
609 if (MFI.shouldSignReturnAddress(MF)) {
610 CFIIndex = MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
611 BuildMI(MBB, InsertPt, DL, CFIDesc).addCFIIndex(CFIIndex);
614 // Shadow call stack uses X18, reset it.
615 if (needsShadowCallStackPrologueEpilogue(MF))
616 insertCFISameValue(CFIDesc, MF, MBB, InsertPt,
617 TRI.getDwarfRegNum(AArch64::X18, true));
619 // Emit .cfi_same_value for callee-saved registers.
620 const std::vector<CalleeSavedInfo> &CSI =
621 MF.getFrameInfo().getCalleeSavedInfo();
622 for (const auto &Info : CSI) {
623 unsigned Reg = Info.getReg();
624 if (!TRI.regNeedsCFI(Reg, Reg))
626 insertCFISameValue(CFIDesc, MF, MBB, InsertPt,
627 TRI.getDwarfRegNum(Reg, true));
631 static void emitCalleeSavedRestores(MachineBasicBlock &MBB,
632 MachineBasicBlock::iterator MBBI,
634 MachineFunction &MF = *MBB.getParent();
635 MachineFrameInfo &MFI = MF.getFrameInfo();
637 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
641 const TargetSubtargetInfo &STI = MF.getSubtarget();
642 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
643 const TargetInstrInfo &TII = *STI.getInstrInfo();
644 DebugLoc DL = MBB.findDebugLoc(MBBI);
646 for (const auto &Info : CSI) {
648 (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
651 unsigned Reg = Info.getReg();
653 !static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg))
656 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore(
657 nullptr, TRI.getDwarfRegNum(Info.getReg(), true)));
658 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
659 .addCFIIndex(CFIIndex)
660 .setMIFlags(MachineInstr::FrameDestroy);
664 void AArch64FrameLowering::emitCalleeSavedGPRRestores(
665 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
666 emitCalleeSavedRestores(MBB, MBBI, false);
669 void AArch64FrameLowering::emitCalleeSavedSVERestores(
670 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
671 emitCalleeSavedRestores(MBB, MBBI, true);
674 static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE) {
677 // The called routine is expected to preserve r19-r28
678 // r29 and r30 are used as frame pointer and link register resp.
683 case AArch64::W##n: \
684 case AArch64::X##n: \
709 case AArch64::B##n: \
710 case AArch64::H##n: \
711 case AArch64::S##n: \
712 case AArch64::D##n: \
713 case AArch64::Q##n: \
714 return HasSVE ? AArch64::Z##n : AArch64::Q##n
751 void AArch64FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,
752 MachineBasicBlock &MBB) const {
754 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
758 if (MBBI != MBB.end())
759 DL = MBBI->getDebugLoc();
761 const MachineFunction &MF = *MBB.getParent();
762 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>();
763 const AArch64RegisterInfo &TRI = *STI.getRegisterInfo();
765 BitVector GPRsToZero(TRI.getNumRegs());
766 BitVector FPRsToZero(TRI.getNumRegs());
767 bool HasSVE = STI.hasSVE();
768 for (MCRegister Reg : RegsToZero.set_bits()) {
769 if (TRI.isGeneralPurposeRegister(MF, Reg)) {
770 // For GPRs, we only care to clear out the 64-bit register.
771 if (MCRegister XReg = getRegisterOrZero(Reg, HasSVE))
772 GPRsToZero.set(XReg);
773 } else if (AArch64::FPR128RegClass.contains(Reg) ||
774 AArch64::FPR64RegClass.contains(Reg) ||
775 AArch64::FPR32RegClass.contains(Reg) ||
776 AArch64::FPR16RegClass.contains(Reg) ||
777 AArch64::FPR8RegClass.contains(Reg)) {
779 if (MCRegister XReg = getRegisterOrZero(Reg, HasSVE))
780 FPRsToZero.set(XReg);
784 const AArch64InstrInfo &TII = *STI.getInstrInfo();
787 for (MCRegister Reg : GPRsToZero.set_bits())
788 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), Reg).addImm(0);
790 // Zero out FP/vector registers.
791 for (MCRegister Reg : FPRsToZero.set_bits())
793 BuildMI(MBB, MBBI, DL, TII.get(AArch64::DUP_ZI_D), Reg)
797 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVIv2d_ns), Reg).addImm(0);
800 for (MCRegister PReg :
801 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
802 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
803 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
805 if (RegsToZero[PReg])
806 BuildMI(MBB, MBBI, DL, TII.get(AArch64::PFALSE), PReg);
811 // Find a scratch register that we can use at the start of the prologue to
812 // re-align the stack pointer. We avoid using callee-save registers since they
813 // may appear to be free when this is called from canUseAsPrologue (during
814 // shrink wrapping), but then no longer be free when this is called from
817 // FIXME: This is a bit conservative, since in the above case we could use one
818 // of the callee-save registers as a scratch temp to re-align the stack pointer,
819 // but we would then have to make sure that we were in fact saving at least one
820 // callee-save register in the prologue, which is additional complexity that
821 // doesn't seem worth the benefit.
822 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) {
823 MachineFunction *MF = MBB->getParent();
825 // If MBB is an entry block, use X9 as the scratch register
826 if (&MF->front() == MBB)
829 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
830 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo();
831 LivePhysRegs LiveRegs(TRI);
832 LiveRegs.addLiveIns(*MBB);
834 // Mark callee saved registers as used so we will not choose them.
835 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs();
836 for (unsigned i = 0; CSRegs[i]; ++i)
837 LiveRegs.addReg(CSRegs[i]);
839 // Prefer X9 since it was historically used for the prologue scratch reg.
840 const MachineRegisterInfo &MRI = MF->getRegInfo();
841 if (LiveRegs.available(MRI, AArch64::X9))
844 for (unsigned Reg : AArch64::GPR64RegClass) {
845 if (LiveRegs.available(MRI, Reg))
848 return AArch64::NoRegister;
851 bool AArch64FrameLowering::canUseAsPrologue(
852 const MachineBasicBlock &MBB) const {
853 const MachineFunction *MF = MBB.getParent();
854 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
855 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
856 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
858 // Don't need a scratch register if we're not going to re-align the stack.
859 if (!RegInfo->hasStackRealignment(*MF))
861 // Otherwise, we can use any block as long as it has a scratch register
863 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister;
866 static bool windowsRequiresStackProbe(MachineFunction &MF,
867 uint64_t StackSizeInBytes) {
868 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
869 if (!Subtarget.isTargetWindows())
871 const Function &F = MF.getFunction();
872 // TODO: When implementing stack protectors, take that into account
873 // for the probe threshold.
874 unsigned StackProbeSize =
875 F.getFnAttributeAsParsedInteger("stack-probe-size", 4096);
876 return (StackSizeInBytes >= StackProbeSize) &&
877 !F.hasFnAttribute("no-stack-arg-probe");
880 static bool needsWinCFI(const MachineFunction &MF) {
881 const Function &F = MF.getFunction();
882 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
883 F.needsUnwindTableEntry();
886 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
887 MachineFunction &MF, uint64_t StackBumpBytes) const {
888 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
889 const MachineFrameInfo &MFI = MF.getFrameInfo();
890 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
891 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
892 if (homogeneousPrologEpilog(MF))
895 if (AFI->getLocalStackSize() == 0)
898 // For WinCFI, if optimizing for size, prefer to not combine the stack bump
899 // (to force a stp with predecrement) to match the packed unwind format,
900 // provided that there actually are any callee saved registers to merge the
902 // This is potentially marginally slower, but allows using the packed
903 // unwind format for functions that both have a local area and callee saved
904 // registers. Using the packed unwind format notably reduces the size of
906 if (needsWinCFI(MF) && AFI->getCalleeSavedStackSize() > 0 &&
907 MF.getFunction().hasOptSize())
910 // 512 is the maximum immediate for stp/ldp that will be used for
911 // callee-save save/restores
912 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes))
915 if (MFI.hasVarSizedObjects())
918 if (RegInfo->hasStackRealignment(MF))
921 // This isn't strictly necessary, but it simplifies things a bit since the
922 // current RedZone handling code assumes the SP is adjusted by the
923 // callee-save save/restore code.
924 if (canUseRedZone(MF))
927 // When there is an SVE area on the stack, always allocate the
928 // callee-saves and spills/locals separately.
929 if (getSVEStackSize(MF))
935 bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
936 MachineBasicBlock &MBB, unsigned StackBumpBytes) const {
937 if (!shouldCombineCSRLocalStackBump(*MBB.getParent(), StackBumpBytes))
943 // Disable combined SP bump if the last instruction is an MTE tag store. It
944 // is almost always better to merge SP adjustment into those instructions.
945 MachineBasicBlock::iterator LastI = MBB.getFirstTerminator();
946 MachineBasicBlock::iterator Begin = MBB.begin();
947 while (LastI != Begin) {
949 if (LastI->isTransient())
951 if (!LastI->getFlag(MachineInstr::FrameDestroy))
954 switch (LastI->getOpcode()) {
955 case AArch64::STGloop:
956 case AArch64::STZGloop:
960 case AArch64::STZ2Gi:
965 llvm_unreachable("unreachable");
968 // Given a load or a store instruction, generate an appropriate unwinding SEH
970 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
971 const TargetInstrInfo &TII,
972 MachineInstr::MIFlag Flag) {
973 unsigned Opc = MBBI->getOpcode();
974 MachineBasicBlock *MBB = MBBI->getParent();
975 MachineFunction &MF = *MBB->getParent();
976 DebugLoc DL = MBBI->getDebugLoc();
977 unsigned ImmIdx = MBBI->getNumOperands() - 1;
978 int Imm = MBBI->getOperand(ImmIdx).getImm();
979 MachineInstrBuilder MIB;
980 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
981 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
985 llvm_unreachable("No SEH Opcode for this instruction");
986 case AArch64::LDPDpost:
989 case AArch64::STPDpre: {
990 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
991 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg());
992 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X))
999 case AArch64::LDPXpost:
1002 case AArch64::STPXpre: {
1003 Register Reg0 = MBBI->getOperand(1).getReg();
1004 Register Reg1 = MBBI->getOperand(2).getReg();
1005 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1006 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X))
1010 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X))
1011 .addImm(RegInfo->getSEHRegNum(Reg0))
1012 .addImm(RegInfo->getSEHRegNum(Reg1))
1017 case AArch64::LDRDpost:
1020 case AArch64::STRDpre: {
1021 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
1022 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X))
1028 case AArch64::LDRXpost:
1031 case AArch64::STRXpre: {
1032 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
1033 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X))
1039 case AArch64::STPDi:
1040 case AArch64::LDPDi: {
1041 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
1042 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
1043 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP))
1050 case AArch64::STPXi:
1051 case AArch64::LDPXi: {
1052 Register Reg0 = MBBI->getOperand(0).getReg();
1053 Register Reg1 = MBBI->getOperand(1).getReg();
1054 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1055 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR))
1059 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP))
1060 .addImm(RegInfo->getSEHRegNum(Reg0))
1061 .addImm(RegInfo->getSEHRegNum(Reg1))
1066 case AArch64::STRXui:
1067 case AArch64::LDRXui: {
1068 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
1069 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg))
1075 case AArch64::STRDui:
1076 case AArch64::LDRDui: {
1077 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
1078 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg))
1085 auto I = MBB->insertAfter(MBBI, MIB);
1089 // Fix up the SEH opcode associated with the save/restore instruction.
1090 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI,
1091 unsigned LocalStackSize) {
1092 MachineOperand *ImmOpnd = nullptr;
1093 unsigned ImmIdx = MBBI->getNumOperands() - 1;
1094 switch (MBBI->getOpcode()) {
1096 llvm_unreachable("Fix the offset in the SEH instruction");
1097 case AArch64::SEH_SaveFPLR:
1098 case AArch64::SEH_SaveRegP:
1099 case AArch64::SEH_SaveReg:
1100 case AArch64::SEH_SaveFRegP:
1101 case AArch64::SEH_SaveFReg:
1102 ImmOpnd = &MBBI->getOperand(ImmIdx);
1106 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize);
1109 // Convert callee-save register save/restore instruction to do stack pointer
1110 // decrement/increment to allocate/deallocate the callee-save stack area by
1111 // converting store/load to use pre/post increment version.
1112 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
1113 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
1114 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc,
1115 bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI,
1116 MachineInstr::MIFlag FrameFlag = MachineInstr::FrameSetup,
1117 int CFAOffset = 0) {
1119 switch (MBBI->getOpcode()) {
1121 llvm_unreachable("Unexpected callee-save save/restore opcode!");
1122 case AArch64::STPXi:
1123 NewOpc = AArch64::STPXpre;
1125 case AArch64::STPDi:
1126 NewOpc = AArch64::STPDpre;
1128 case AArch64::STPQi:
1129 NewOpc = AArch64::STPQpre;
1131 case AArch64::STRXui:
1132 NewOpc = AArch64::STRXpre;
1134 case AArch64::STRDui:
1135 NewOpc = AArch64::STRDpre;
1137 case AArch64::STRQui:
1138 NewOpc = AArch64::STRQpre;
1140 case AArch64::LDPXi:
1141 NewOpc = AArch64::LDPXpost;
1143 case AArch64::LDPDi:
1144 NewOpc = AArch64::LDPDpost;
1146 case AArch64::LDPQi:
1147 NewOpc = AArch64::LDPQpost;
1149 case AArch64::LDRXui:
1150 NewOpc = AArch64::LDRXpost;
1152 case AArch64::LDRDui:
1153 NewOpc = AArch64::LDRDpost;
1155 case AArch64::LDRQui:
1156 NewOpc = AArch64::LDRQpost;
1159 // Get rid of the SEH code associated with the old instruction.
1161 auto SEH = std::next(MBBI);
1162 if (AArch64InstrInfo::isSEHInstruction(*SEH))
1163 SEH->eraseFromParent();
1166 TypeSize Scale = TypeSize::Fixed(1);
1168 int64_t MinOffset, MaxOffset;
1169 bool Success = static_cast<const AArch64InstrInfo *>(TII)->getMemOpInfo(
1170 NewOpc, Scale, Width, MinOffset, MaxOffset);
1172 assert(Success && "unknown load/store opcode");
1174 // If the first store isn't right where we want SP then we can't fold the
1175 // update in so create a normal arithmetic instruction instead.
1176 MachineFunction &MF = *MBB.getParent();
1177 if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 ||
1178 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1179 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
1180 StackOffset::getFixed(CSStackSizeInc), TII, FrameFlag,
1181 false, false, nullptr, EmitCFI,
1182 StackOffset::getFixed(CFAOffset));
1184 return std::prev(MBBI);
1187 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
1188 MIB.addReg(AArch64::SP, RegState::Define);
1190 // Copy all operands other than the immediate offset.
1191 unsigned OpndIdx = 0;
1192 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1194 MIB.add(MBBI->getOperand(OpndIdx));
1196 assert(MBBI->getOperand(OpndIdx).getImm() == 0 &&
1197 "Unexpected immediate offset in first/last callee-save save/restore "
1199 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1200 "Unexpected base register in callee-save save/restore instruction!");
1201 assert(CSStackSizeInc % Scale == 0);
1202 MIB.addImm(CSStackSizeInc / (int)Scale);
1204 MIB.setMIFlags(MBBI->getFlags());
1205 MIB.setMemRefs(MBBI->memoperands());
1207 // Generate a new SEH code that corresponds to the new instruction.
1210 InsertSEH(*MIB, *TII, FrameFlag);
1214 unsigned CFIIndex = MF.addFrameInst(
1215 MCCFIInstruction::cfiDefCfaOffset(nullptr, CFAOffset - CSStackSizeInc));
1216 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1217 .addCFIIndex(CFIIndex)
1218 .setMIFlags(FrameFlag);
1221 return std::prev(MBB.erase(MBBI));
1224 // Fixup callee-save register save/restore instructions to take into account
1225 // combined SP bump by adding the local stack size to the stack offsets.
1226 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI,
1227 uint64_t LocalStackSize,
1230 if (AArch64InstrInfo::isSEHInstruction(MI))
1233 unsigned Opc = MI.getOpcode();
1236 case AArch64::STPXi:
1237 case AArch64::STRXui:
1238 case AArch64::STPDi:
1239 case AArch64::STRDui:
1240 case AArch64::LDPXi:
1241 case AArch64::LDRXui:
1242 case AArch64::LDPDi:
1243 case AArch64::LDRDui:
1246 case AArch64::STPQi:
1247 case AArch64::STRQui:
1248 case AArch64::LDPQi:
1249 case AArch64::LDRQui:
1253 llvm_unreachable("Unexpected callee-save save/restore opcode!");
1256 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1;
1257 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1258 "Unexpected base register in callee-save save/restore instruction!");
1259 // Last operand is immediate offset that needs fixing.
1260 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx);
1261 // All generated opcodes have scaled offsets.
1262 assert(LocalStackSize % Scale == 0);
1263 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale);
1267 auto MBBI = std::next(MachineBasicBlock::iterator(MI));
1268 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction");
1269 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) &&
1270 "Expecting a SEH instruction");
1271 fixupSEHOpcode(MBBI, LocalStackSize);
1275 static bool isTargetWindows(const MachineFunction &MF) {
1276 return MF.getSubtarget<AArch64Subtarget>().isTargetWindows();
1279 // Convenience function to determine whether I is an SVE callee save.
1280 static bool IsSVECalleeSave(MachineBasicBlock::iterator I) {
1281 switch (I->getOpcode()) {
1284 case AArch64::STR_ZXI:
1285 case AArch64::STR_PXI:
1286 case AArch64::LDR_ZXI:
1287 case AArch64::LDR_PXI:
1288 return I->getFlag(MachineInstr::FrameSetup) ||
1289 I->getFlag(MachineInstr::FrameDestroy);
1293 static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) {
1295 MF.getFrameInfo().getCalleeSavedInfo(),
1296 [](const auto &Info) { return Info.getReg() == AArch64::LR; }) &&
1297 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)))
1300 if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18))
1301 report_fatal_error("Must reserve x18 to use shadow call stack");
1306 static void emitShadowCallStackPrologue(const TargetInstrInfo &TII,
1307 MachineFunction &MF,
1308 MachineBasicBlock &MBB,
1309 MachineBasicBlock::iterator MBBI,
1310 const DebugLoc &DL, bool NeedsWinCFI,
1311 bool NeedsUnwindInfo) {
1312 // Shadow call stack prolog: str x30, [x18], #8
1313 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STRXpost))
1314 .addReg(AArch64::X18, RegState::Define)
1315 .addReg(AArch64::LR)
1316 .addReg(AArch64::X18)
1318 .setMIFlag(MachineInstr::FrameSetup);
1320 // This instruction also makes x18 live-in to the entry block.
1321 MBB.addLiveIn(AArch64::X18);
1324 BuildMI(MBB, MBBI, DL, TII.get(AArch64::SEH_Nop))
1325 .setMIFlag(MachineInstr::FrameSetup);
1327 if (NeedsUnwindInfo) {
1328 // Emit a CFI instruction that causes 8 to be subtracted from the value of
1329 // x18 when unwinding past this frame.
1330 static const char CFIInst[] = {
1331 dwarf::DW_CFA_val_expression,
1334 static_cast<char>(unsigned(dwarf::DW_OP_breg18)),
1335 static_cast<char>(-8) & 0x7f, // addend (sleb128)
1337 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape(
1338 nullptr, StringRef(CFIInst, sizeof(CFIInst))));
1339 BuildMI(MBB, MBBI, DL, TII.get(AArch64::CFI_INSTRUCTION))
1340 .addCFIIndex(CFIIndex)
1341 .setMIFlag(MachineInstr::FrameSetup);
1345 static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII,
1346 MachineFunction &MF,
1347 MachineBasicBlock &MBB,
1348 MachineBasicBlock::iterator MBBI,
1349 const DebugLoc &DL) {
1350 // Shadow call stack epilog: ldr x30, [x18, #-8]!
1351 BuildMI(MBB, MBBI, DL, TII.get(AArch64::LDRXpre))
1352 .addReg(AArch64::X18, RegState::Define)
1353 .addReg(AArch64::LR, RegState::Define)
1354 .addReg(AArch64::X18)
1356 .setMIFlag(MachineInstr::FrameDestroy);
1358 if (MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF)) {
1360 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, 18));
1361 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1362 .addCFIIndex(CFIIndex)
1363 .setMIFlags(MachineInstr::FrameDestroy);
1367 // Define the current CFA rule to use the provided FP.
1368 static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB,
1369 MachineBasicBlock::iterator MBBI,
1370 const DebugLoc &DL, unsigned FixedObject) {
1371 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>();
1372 const AArch64RegisterInfo *TRI = STI.getRegisterInfo();
1373 const TargetInstrInfo *TII = STI.getInstrInfo();
1374 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1376 const int OffsetToFirstCalleeSaveFromFP =
1377 AFI->getCalleeSaveBaseToFrameRecordOffset() -
1378 AFI->getCalleeSavedStackSize();
1379 Register FramePtr = TRI->getFrameRegister(MF);
1380 unsigned Reg = TRI->getDwarfRegNum(FramePtr, true);
1381 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
1382 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1383 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1384 .addCFIIndex(CFIIndex)
1385 .setMIFlags(MachineInstr::FrameSetup);
1388 void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
1389 MachineBasicBlock &MBB) const {
1390 MachineBasicBlock::iterator MBBI = MBB.begin();
1391 const MachineFrameInfo &MFI = MF.getFrameInfo();
1392 const Function &F = MF.getFunction();
1393 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1394 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1395 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1396 MachineModuleInfo &MMI = MF.getMMI();
1397 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1398 bool EmitCFI = AFI->needsDwarfUnwindInfo(MF);
1399 bool EmitAsyncCFI = AFI->needsAsyncDwarfUnwindInfo(MF);
1400 bool HasFP = hasFP(MF);
1401 bool NeedsWinCFI = needsWinCFI(MF);
1402 bool HasWinCFI = false;
1403 auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); });
1405 bool IsFunclet = MBB.isEHFuncletEntry();
1407 // At this point, we're going to decide whether or not the function uses a
1408 // redzone. In most cases, the function doesn't have a redzone so let's
1409 // assume that's false and set it to true in the case that there's a redzone.
1410 AFI->setHasRedZone(false);
1412 // Debug location must be unknown since the first debug location is used
1413 // to determine the end of the prologue.
1416 const auto &MFnI = *MF.getInfo<AArch64FunctionInfo>();
1417 if (needsShadowCallStackPrologueEpilogue(MF))
1418 emitShadowCallStackPrologue(*TII, MF, MBB, MBBI, DL, NeedsWinCFI,
1419 MFnI.needsDwarfUnwindInfo(MF));
1421 if (MFnI.shouldSignReturnAddress(MF)) {
1422 if (MFnI.shouldSignWithBKey()) {
1423 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY))
1424 .setMIFlag(MachineInstr::FrameSetup);
1427 // No SEH opcode for this one; it doesn't materialize into an
1428 // instruction on Windows.
1429 BuildMI(MBB, MBBI, DL,
1430 TII->get(MFnI.shouldSignWithBKey() ? AArch64::PACIBSP
1431 : AArch64::PACIASP))
1432 .setMIFlag(MachineInstr::FrameSetup);
1436 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
1437 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1438 .addCFIIndex(CFIIndex)
1439 .setMIFlags(MachineInstr::FrameSetup);
1440 } else if (NeedsWinCFI) {
1442 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PACSignLR))
1443 .setMIFlag(MachineInstr::FrameSetup);
1446 if (EmitCFI && MFnI.isMTETagged()) {
1447 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITMTETAGGED))
1448 .setMIFlag(MachineInstr::FrameSetup);
1451 // We signal the presence of a Swift extended frame to external tools by
1452 // storing FP with 0b0001 in bits 63:60. In normal userland operation a simple
1453 // ORR is sufficient, it is assumed a Swift kernel would initialize the TBI
1454 // bits so that is still true.
1455 if (HasFP && AFI->hasSwiftAsyncContext()) {
1456 switch (MF.getTarget().Options.SwiftAsyncFramePointer) {
1457 case SwiftAsyncFramePointerMode::DeploymentBased:
1458 if (Subtarget.swiftAsyncContextIsDynamicallySet()) {
1459 // The special symbol below is absolute and has a *value* that can be
1460 // combined with the frame pointer to signal an extended frame.
1461 BuildMI(MBB, MBBI, DL, TII->get(AArch64::LOADgot), AArch64::X16)
1462 .addExternalSymbol("swift_async_extendedFramePointerFlags",
1464 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), AArch64::FP)
1465 .addUse(AArch64::FP)
1466 .addUse(AArch64::X16)
1467 .addImm(Subtarget.isTargetILP32() ? 32 : 0);
1472 case SwiftAsyncFramePointerMode::Always:
1473 // ORR x29, x29, #0x1000_0000_0000_0000
1474 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXri), AArch64::FP)
1475 .addUse(AArch64::FP)
1477 .setMIFlag(MachineInstr::FrameSetup);
1480 case SwiftAsyncFramePointerMode::Never:
1485 // All calls are tail calls in GHC calling conv, and functions have no
1486 // prologue/epilogue.
1487 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1490 // Set tagged base pointer to the requested stack slot.
1491 // Ideally it should match SP value after prologue.
1492 std::optional<int> TBPI = AFI->getTaggedBasePointerIndex();
1494 AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI));
1496 AFI->setTaggedBasePointerOffset(MFI.getStackSize());
1498 const StackOffset &SVEStackSize = getSVEStackSize(MF);
1500 // getStackSize() includes all the locals in its size calculation. We don't
1501 // include these locals when computing the stack size of a funclet, as they
1502 // are allocated in the parent's stack frame and accessed via the frame
1503 // pointer from the funclet. We only save the callee saved registers in the
1504 // funclet, which are really the callee saved registers of the parent
1505 // function, including the funclet.
1506 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF)
1507 : MFI.getStackSize();
1508 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) {
1509 assert(!HasFP && "unexpected function without stack frame but with FP");
1510 assert(!SVEStackSize &&
1511 "unexpected function without stack frame but with SVE objects");
1512 // All of the stack allocation is for locals.
1513 AFI->setLocalStackSize(NumBytes);
1516 // REDZONE: If the stack size is less than 128 bytes, we don't need
1517 // to actually allocate.
1518 if (canUseRedZone(MF)) {
1519 AFI->setHasRedZone(true);
1520 ++NumRedZoneFunctions;
1522 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
1523 StackOffset::getFixed(-NumBytes), TII,
1524 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
1526 // Label used to tie together the PROLOG_LABEL and the MachineMoves.
1527 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
1528 // Encode the stack size of the leaf function.
1529 unsigned CFIIndex = MF.addFrameInst(
1530 MCCFIInstruction::cfiDefCfaOffset(FrameLabel, NumBytes));
1531 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1532 .addCFIIndex(CFIIndex)
1533 .setMIFlags(MachineInstr::FrameSetup);
1539 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd))
1540 .setMIFlag(MachineInstr::FrameSetup);
1547 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
1548 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet);
1550 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
1551 // All of the remaining stack allocations are for locals.
1552 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1553 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1554 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1555 if (CombineSPBump) {
1556 assert(!SVEStackSize && "Cannot combine SP bump with SVE");
1557 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
1558 StackOffset::getFixed(-NumBytes), TII,
1559 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI,
1562 } else if (HomPrologEpilog) {
1563 // Stack has been already adjusted.
1564 NumBytes -= PrologueSaveSize;
1565 } else if (PrologueSaveSize != 0) {
1566 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(
1567 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1569 NumBytes -= PrologueSaveSize;
1571 assert(NumBytes >= 0 && "Negative stack allocation size!?");
1573 // Move past the saves of the callee-saved registers, fixing up the offsets
1574 // and pre-inc if we decided to combine the callee-save and local stack
1575 // pointer bump above.
1576 MachineBasicBlock::iterator End = MBB.end();
1577 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup) &&
1578 !IsSVECalleeSave(MBBI)) {
1580 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(),
1581 NeedsWinCFI, &HasWinCFI);
1585 // For funclets the FP belongs to the containing function.
1586 if (!IsFunclet && HasFP) {
1587 // Only set up FP if we actually need to.
1588 int64_t FPOffset = AFI->getCalleeSaveBaseToFrameRecordOffset();
1591 FPOffset += AFI->getLocalStackSize();
1593 if (AFI->hasSwiftAsyncContext()) {
1594 // Before we update the live FP we have to ensure there's a valid (or
1595 // null) asynchronous context in its slot just before FP in the frame
1596 // record, so store it now.
1597 const auto &Attrs = MF.getFunction().getAttributes();
1598 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1599 if (HaveInitialContext)
1600 MBB.addLiveIn(AArch64::X22);
1601 BuildMI(MBB, MBBI, DL, TII->get(AArch64::StoreSwiftAsyncContext))
1602 .addUse(HaveInitialContext ? AArch64::X22 : AArch64::XZR)
1603 .addUse(AArch64::SP)
1604 .addImm(FPOffset - 8)
1605 .setMIFlags(MachineInstr::FrameSetup);
1608 if (HomPrologEpilog) {
1611 assert(Prolog->getOpcode() == AArch64::HOM_Prolog);
1612 Prolog->addOperand(MachineOperand::CreateImm(FPOffset));
1614 // Issue sub fp, sp, FPOffset or
1615 // mov fp,sp when FPOffset is zero.
1616 // Note: All stores of callee-saved registers are marked as "FrameSetup".
1617 // This code marks the instruction(s) that set the FP also.
1618 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP,
1619 StackOffset::getFixed(FPOffset), TII,
1620 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI);
1621 if (NeedsWinCFI && HasWinCFI) {
1622 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd))
1623 .setMIFlag(MachineInstr::FrameSetup);
1624 // After setting up the FP, the rest of the prolog doesn't need to be
1625 // included in the SEH unwind info.
1626 NeedsWinCFI = false;
1630 emitDefineCFAWithFP(MF, MBB, MBBI, DL, FixedObject);
1633 // Now emit the moves for whatever callee saved regs we have (including FP,
1634 // LR if those are saved). Frame instructions for SVE register are emitted
1635 // later, after the instruction which actually save SVE regs.
1637 emitCalleeSavedGPRLocations(MBB, MBBI);
1639 // Alignment is required for the parent frame, not the funclet
1640 const bool NeedsRealignment =
1641 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
1642 int64_t RealignmentPadding =
1643 (NeedsRealignment && MFI.getMaxAlign() > Align(16))
1644 ? MFI.getMaxAlign().value() - 16
1647 if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) {
1648 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1651 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't
1652 // exceed this amount. We need to move at most 2^24 - 1 into x15.
1653 // This is at most two instructions, MOVZ follwed by MOVK.
1654 // TODO: Fix to use multiple stack alloc unwind codes for stacks
1655 // exceeding 256MB in size.
1656 if (NumBytes >= (1 << 28))
1657 report_fatal_error("Stack size cannot exceed 256MB for stack "
1658 "unwinding purposes");
1660 uint32_t LowNumWords = NumWords & 0xFFFF;
1661 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15)
1662 .addImm(LowNumWords)
1663 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1664 .setMIFlag(MachineInstr::FrameSetup);
1665 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1666 .setMIFlag(MachineInstr::FrameSetup);
1667 if ((NumWords & 0xFFFF0000) != 0) {
1668 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15)
1669 .addReg(AArch64::X15)
1670 .addImm((NumWords & 0xFFFF0000) >> 16) // High half
1671 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16))
1672 .setMIFlag(MachineInstr::FrameSetup);
1673 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1674 .setMIFlag(MachineInstr::FrameSetup);
1677 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15)
1679 .setMIFlags(MachineInstr::FrameSetup);
1682 const char* ChkStk = Subtarget.getChkStkName();
1683 switch (MF.getTarget().getCodeModel()) {
1684 case CodeModel::Tiny:
1685 case CodeModel::Small:
1686 case CodeModel::Medium:
1687 case CodeModel::Kernel:
1688 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL))
1689 .addExternalSymbol(ChkStk)
1690 .addReg(AArch64::X15, RegState::Implicit)
1691 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead)
1692 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead)
1693 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead)
1694 .setMIFlags(MachineInstr::FrameSetup);
1697 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1698 .setMIFlag(MachineInstr::FrameSetup);
1701 case CodeModel::Large:
1702 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT))
1703 .addReg(AArch64::X16, RegState::Define)
1704 .addExternalSymbol(ChkStk)
1705 .addExternalSymbol(ChkStk)
1706 .setMIFlags(MachineInstr::FrameSetup);
1709 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1710 .setMIFlag(MachineInstr::FrameSetup);
1713 BuildMI(MBB, MBBI, DL, TII->get(getBLRCallOpcode(MF)))
1714 .addReg(AArch64::X16, RegState::Kill)
1715 .addReg(AArch64::X15, RegState::Implicit | RegState::Define)
1716 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead)
1717 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead)
1718 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead)
1719 .setMIFlags(MachineInstr::FrameSetup);
1722 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1723 .setMIFlag(MachineInstr::FrameSetup);
1728 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP)
1729 .addReg(AArch64::SP, RegState::Kill)
1730 .addReg(AArch64::X15, RegState::Kill)
1731 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4))
1732 .setMIFlags(MachineInstr::FrameSetup);
1735 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
1737 .setMIFlag(MachineInstr::FrameSetup);
1741 if (RealignmentPadding > 0) {
1742 if (RealignmentPadding >= 4096) {
1743 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm))
1744 .addReg(AArch64::X16, RegState::Define)
1745 .addImm(RealignmentPadding)
1746 .setMIFlags(MachineInstr::FrameSetup);
1747 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXrx64), AArch64::X15)
1748 .addReg(AArch64::SP)
1749 .addReg(AArch64::X16, RegState::Kill)
1750 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 0))
1751 .setMIFlag(MachineInstr::FrameSetup);
1753 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), AArch64::X15)
1754 .addReg(AArch64::SP)
1755 .addImm(RealignmentPadding)
1757 .setMIFlag(MachineInstr::FrameSetup);
1760 uint64_t AndMask = ~(MFI.getMaxAlign().value() - 1);
1761 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
1762 .addReg(AArch64::X15, RegState::Kill)
1763 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64));
1764 AFI->setStackRealigned(true);
1766 // No need for SEH instructions here; if we're realigning the stack,
1767 // we've set a frame pointer and already finished the SEH prologue.
1768 assert(!NeedsWinCFI);
1772 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {};
1773 MachineBasicBlock::iterator CalleeSavesBegin = MBBI, CalleeSavesEnd = MBBI;
1775 // Process the SVE callee-saves to determine what space needs to be
1777 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
1778 // Find callee save instructions in frame.
1779 CalleeSavesBegin = MBBI;
1780 assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction");
1781 while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator())
1783 CalleeSavesEnd = MBBI;
1785 AllocateBefore = StackOffset::getScalable(CalleeSavedSize);
1786 AllocateAfter = SVEStackSize - AllocateBefore;
1789 // Allocate space for the callee saves (if any).
1791 MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP, -AllocateBefore, TII,
1792 MachineInstr::FrameSetup, false, false, nullptr,
1793 EmitAsyncCFI && !HasFP && AllocateBefore,
1794 StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes));
1797 emitCalleeSavedSVELocations(MBB, CalleeSavesEnd);
1799 // Finally allocate remaining SVE stack space.
1800 emitFrameOffset(MBB, CalleeSavesEnd, DL, AArch64::SP, AArch64::SP,
1801 -AllocateAfter, TII, MachineInstr::FrameSetup, false, false,
1802 nullptr, EmitAsyncCFI && !HasFP && AllocateAfter,
1803 AllocateBefore + StackOffset::getFixed(
1804 (int64_t)MFI.getStackSize() - NumBytes));
1806 // Allocate space for the rest of the frame.
1808 unsigned scratchSPReg = AArch64::SP;
1810 if (NeedsRealignment) {
1811 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB);
1812 assert(scratchSPReg != AArch64::NoRegister);
1815 // If we're a leaf function, try using the red zone.
1816 if (!canUseRedZone(MF)) {
1817 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
1818 // the correct value here, as NumBytes also includes padding bytes,
1819 // which shouldn't be counted here.
1821 MBB, MBBI, DL, scratchSPReg, AArch64::SP,
1822 StackOffset::getFixed(-NumBytes), TII, MachineInstr::FrameSetup,
1823 false, NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP,
1825 StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes));
1827 if (NeedsRealignment) {
1828 assert(MFI.getMaxAlign() > Align(1));
1829 assert(scratchSPReg != AArch64::SP);
1831 // SUB X9, SP, NumBytes
1832 // -- X9 is temporary register, so shouldn't contain any live data here,
1833 // -- free to use. This is already produced by emitFrameOffset above.
1834 // AND SP, X9, 0b11111...0000
1835 uint64_t AndMask = ~(MFI.getMaxAlign().value() - 1);
1837 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
1838 .addReg(scratchSPReg, RegState::Kill)
1839 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64));
1840 AFI->setStackRealigned(true);
1842 // No need for SEH instructions here; if we're realigning the stack,
1843 // we've set a frame pointer and already finished the SEH prologue.
1844 assert(!NeedsWinCFI);
1848 // If we need a base pointer, set it up here. It's whatever the value of the
1849 // stack pointer is at this point. Any variable size objects will be allocated
1850 // after this, so we can still use the base pointer to reference locals.
1852 // FIXME: Clarify FrameSetup flags here.
1853 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
1855 // For funclets the BP belongs to the containing function.
1856 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
1857 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP,
1861 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1862 .setMIFlag(MachineInstr::FrameSetup);
1866 // The very last FrameSetup instruction indicates the end of prologue. Emit a
1867 // SEH opcode indicating the prologue end.
1868 if (NeedsWinCFI && HasWinCFI) {
1869 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd))
1870 .setMIFlag(MachineInstr::FrameSetup);
1873 // SEH funclets are passed the frame pointer in X1. If the parent
1874 // function uses the base register, then the base register is used
1875 // directly, and is not retrieved from X1.
1876 if (IsFunclet && F.hasPersonalityFn()) {
1877 EHPersonality Per = classifyEHPersonality(F.getPersonalityFn());
1878 if (isAsynchronousEHPersonality(Per)) {
1879 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::FP)
1880 .addReg(AArch64::X1)
1881 .setMIFlag(MachineInstr::FrameSetup);
1882 MBB.addLiveIn(AArch64::X1);
1886 if (EmitCFI && !EmitAsyncCFI) {
1888 emitDefineCFAWithFP(MF, MBB, MBBI, DL, FixedObject);
1890 StackOffset TotalSize =
1891 SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize());
1892 unsigned CFIIndex = MF.addFrameInst(createDefCFA(
1893 *RegInfo, /*FrameReg=*/AArch64::SP, /*Reg=*/AArch64::SP, TotalSize,
1894 /*LastAdjustmentWasScalable=*/false));
1895 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1896 .addCFIIndex(CFIIndex)
1897 .setMIFlags(MachineInstr::FrameSetup);
1899 emitCalleeSavedGPRLocations(MBB, MBBI);
1900 emitCalleeSavedSVELocations(MBB, MBBI);
1904 static void InsertReturnAddressAuth(MachineFunction &MF, MachineBasicBlock &MBB,
1905 bool NeedsWinCFI, bool *HasWinCFI) {
1906 const auto &MFI = *MF.getInfo<AArch64FunctionInfo>();
1907 if (!MFI.shouldSignReturnAddress(MF))
1909 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1910 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1912 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1914 if (MBBI != MBB.end())
1915 DL = MBBI->getDebugLoc();
1917 // The AUTIASP instruction assembles to a hint instruction before v8.3a so
1918 // this instruction can safely used for any v8a architecture.
1919 // From v8.3a onwards there are optimised authenticate LR and return
1920 // instructions, namely RETA{A,B}, that can be used instead. In this case the
1921 // DW_CFA_AARCH64_negate_ra_state can't be emitted.
1922 if (Subtarget.hasPAuth() &&
1923 !MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack) &&
1924 MBBI != MBB.end() && MBBI->getOpcode() == AArch64::RET_ReallyLR &&
1926 BuildMI(MBB, MBBI, DL,
1927 TII->get(MFI.shouldSignWithBKey() ? AArch64::RETAB : AArch64::RETAA))
1928 .copyImplicitOps(*MBBI);
1933 TII->get(MFI.shouldSignWithBKey() ? AArch64::AUTIBSP : AArch64::AUTIASP))
1934 .setMIFlag(MachineInstr::FrameDestroy);
1937 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
1938 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1939 .addCFIIndex(CFIIndex)
1940 .setMIFlags(MachineInstr::FrameDestroy);
1943 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PACSignLR))
1944 .setMIFlag(MachineInstr::FrameDestroy);
1949 static bool isFuncletReturnInstr(const MachineInstr &MI) {
1950 switch (MI.getOpcode()) {
1953 case AArch64::CATCHRET:
1954 case AArch64::CLEANUPRET:
1959 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
1960 MachineBasicBlock &MBB) const {
1961 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1962 MachineFrameInfo &MFI = MF.getFrameInfo();
1963 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1964 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1966 bool NeedsWinCFI = needsWinCFI(MF);
1968 MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF);
1969 bool HasWinCFI = false;
1970 bool IsFunclet = false;
1971 auto WinCFI = make_scope_exit([&]() { assert(HasWinCFI == MF.hasWinCFI()); });
1973 if (MBB.end() != MBBI) {
1974 DL = MBBI->getDebugLoc();
1975 IsFunclet = isFuncletReturnInstr(*MBBI);
1978 auto FinishingTouches = make_scope_exit([&]() {
1979 InsertReturnAddressAuth(MF, MBB, NeedsWinCFI, &HasWinCFI);
1980 if (needsShadowCallStackPrologueEpilogue(MF))
1981 emitShadowCallStackEpilogue(*TII, MF, MBB, MBB.getFirstTerminator(), DL);
1983 emitCalleeSavedGPRRestores(MBB, MBB.getFirstTerminator());
1985 BuildMI(MBB, MBB.getFirstTerminator(), DL,
1986 TII->get(AArch64::SEH_EpilogEnd))
1987 .setMIFlag(MachineInstr::FrameDestroy);
1990 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF)
1991 : MFI.getStackSize();
1992 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1994 // All calls are tail calls in GHC calling conv, and functions have no
1995 // prologue/epilogue.
1996 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1999 // How much of the stack used by incoming arguments this function is expected
2000 // to restore in this particular epilogue.
2001 int64_t ArgumentStackToRestore = getArgumentStackToRestore(MF, MBB);
2003 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
2004 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet);
2006 int64_t AfterCSRPopSize = ArgumentStackToRestore;
2007 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
2008 // We cannot rely on the local stack size set in emitPrologue if the function
2009 // has funclets, as funclets have different local stack size requirements, and
2010 // the current value set in emitPrologue may be that of the containing
2012 if (MF.hasEHFunclets())
2013 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
2014 if (homogeneousPrologEpilog(MF, &MBB)) {
2015 assert(!NeedsWinCFI);
2016 auto LastPopI = MBB.getFirstTerminator();
2017 if (LastPopI != MBB.begin()) {
2018 auto HomogeneousEpilog = std::prev(LastPopI);
2019 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
2020 LastPopI = HomogeneousEpilog;
2023 // Adjust local stack
2024 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
2025 StackOffset::getFixed(AFI->getLocalStackSize()), TII,
2026 MachineInstr::FrameDestroy, false, NeedsWinCFI);
2028 // SP has been already adjusted while restoring callee save regs.
2029 // We've bailed-out the case with adjusting SP for arguments.
2030 assert(AfterCSRPopSize == 0);
2033 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(MBB, NumBytes);
2034 // Assume we can't combine the last pop with the sp restore.
2036 bool CombineAfterCSRBump = false;
2037 if (!CombineSPBump && PrologueSaveSize != 0) {
2038 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator());
2039 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2040 AArch64InstrInfo::isSEHInstruction(*Pop))
2041 Pop = std::prev(Pop);
2042 // Converting the last ldp to a post-index ldp is valid only if the last
2043 // ldp's offset is 0.
2044 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2045 // If the offset is 0 and the AfterCSR pop is not actually trying to
2046 // allocate more stack for arguments (in space that an untimely interrupt
2047 // may clobber), convert it to a post-index ldp.
2048 if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
2049 convertCalleeSaveRestoreToSPPrePostIncDec(
2050 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2051 MachineInstr::FrameDestroy, PrologueSaveSize);
2053 // If not, make sure to emit an add after the last ldp.
2054 // We're doing this by transfering the size to be restored from the
2055 // adjustment *before* the CSR pops to the adjustment *after* the CSR
2057 AfterCSRPopSize += PrologueSaveSize;
2058 CombineAfterCSRBump = true;
2062 // Move past the restores of the callee-saved registers.
2063 // If we plan on combining the sp bump of the local stack size and the callee
2064 // save stack size, we might need to adjust the CSR save and restore offsets.
2065 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
2066 MachineBasicBlock::iterator Begin = MBB.begin();
2067 while (LastPopI != Begin) {
2069 if (!LastPopI->getFlag(MachineInstr::FrameDestroy) ||
2070 IsSVECalleeSave(LastPopI)) {
2073 } else if (CombineSPBump)
2074 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(),
2075 NeedsWinCFI, &HasWinCFI);
2078 if (MF.hasWinCFI()) {
2079 // If the prologue didn't contain any SEH opcodes and didn't set the
2080 // MF.hasWinCFI() flag, assume the epilogue won't either, and skip the
2081 // EpilogStart - to avoid generating CFI for functions that don't need it.
2082 // (And as we didn't generate any prologue at all, it would be asymmetrical
2083 // to the epilogue.) By the end of the function, we assert that
2084 // HasWinCFI is equal to MF.hasWinCFI(), to verify this assumption.
2086 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart))
2087 .setMIFlag(MachineInstr::FrameDestroy);
2090 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) {
2091 switch (MF.getTarget().Options.SwiftAsyncFramePointer) {
2092 case SwiftAsyncFramePointerMode::DeploymentBased:
2093 // Avoid the reload as it is GOT relative, and instead fall back to the
2094 // hardcoded value below. This allows a mismatch between the OS and
2095 // application without immediately terminating on the difference.
2097 case SwiftAsyncFramePointerMode::Always:
2098 // We need to reset FP to its untagged state on return. Bit 60 is
2099 // currently used to show the presence of an extended frame.
2101 // BIC x29, x29, #0x1000_0000_0000_0000
2102 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::ANDXri),
2104 .addUse(AArch64::FP)
2106 .setMIFlag(MachineInstr::FrameDestroy);
2109 case SwiftAsyncFramePointerMode::Never:
2114 const StackOffset &SVEStackSize = getSVEStackSize(MF);
2116 // If there is a single SP update, insert it before the ret and we're done.
2117 if (CombineSPBump) {
2118 assert(!SVEStackSize && "Cannot combine SP bump with SVE");
2120 // When we are about to restore the CSRs, the CFA register is SP again.
2121 if (EmitCFI && hasFP(MF)) {
2122 const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo();
2123 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true);
2125 MF.addFrameInst(MCCFIInstruction::cfiDefCfa(nullptr, Reg, NumBytes));
2126 BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
2127 .addCFIIndex(CFIIndex)
2128 .setMIFlags(MachineInstr::FrameDestroy);
2131 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
2132 StackOffset::getFixed(NumBytes + (int64_t)AfterCSRPopSize),
2133 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI,
2134 &HasWinCFI, EmitCFI, StackOffset::getFixed(NumBytes));
2138 NumBytes -= PrologueSaveSize;
2139 assert(NumBytes >= 0 && "Negative stack allocation size!?");
2141 // Process the SVE callee-saves to determine what space needs to be
2143 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2144 MachineBasicBlock::iterator RestoreBegin = LastPopI, RestoreEnd = LastPopI;
2145 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
2146 RestoreBegin = std::prev(RestoreEnd);
2147 while (RestoreBegin != MBB.begin() &&
2148 IsSVECalleeSave(std::prev(RestoreBegin)))
2151 assert(IsSVECalleeSave(RestoreBegin) &&
2152 IsSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction");
2154 StackOffset CalleeSavedSizeAsOffset =
2155 StackOffset::getScalable(CalleeSavedSize);
2156 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2157 DeallocateAfter = CalleeSavedSizeAsOffset;
2160 // Deallocate the SVE area.
2162 // If we have stack realignment or variable sized objects on the stack,
2163 // restore the stack pointer from the frame pointer prior to SVE CSR
2165 if (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) {
2166 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
2167 // Set SP to start of SVE callee-save area from which they can
2168 // be reloaded. The code below will deallocate the stack space
2169 // space by moving FP -> SP.
2170 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP,
2171 StackOffset::getScalable(-CalleeSavedSize), TII,
2172 MachineInstr::FrameDestroy);
2175 if (AFI->getSVECalleeSavedStackSize()) {
2176 // Deallocate the non-SVE locals first before we can deallocate (and
2177 // restore callee saves) from the SVE area.
2179 MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
2180 StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy,
2181 false, false, nullptr, EmitCFI && !hasFP(MF),
2182 SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize));
2186 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
2187 DeallocateBefore, TII, MachineInstr::FrameDestroy, false,
2188 false, nullptr, EmitCFI && !hasFP(MF),
2190 StackOffset::getFixed(NumBytes + PrologueSaveSize));
2192 emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
2193 DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
2194 false, nullptr, EmitCFI && !hasFP(MF),
2196 StackOffset::getFixed(NumBytes + PrologueSaveSize));
2199 emitCalleeSavedSVERestores(MBB, RestoreEnd);
2203 bool RedZone = canUseRedZone(MF);
2204 // If this was a redzone leaf function, we don't need to restore the
2205 // stack pointer (but we may need to pop stack args for fastcc).
2206 if (RedZone && AfterCSRPopSize == 0)
2209 // Pop the local variables off the stack. If there are no callee-saved
2210 // registers, it means we are actually positioned at the terminator and can
2211 // combine stack increment for the locals and the stack increment for
2212 // callee-popped arguments into (possibly) a single instruction and be done.
2213 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2214 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2215 if (NoCalleeSaveRestore)
2216 StackRestoreBytes += AfterCSRPopSize;
2219 MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
2220 StackOffset::getFixed(StackRestoreBytes), TII,
2221 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2222 StackOffset::getFixed((RedZone ? 0 : NumBytes) + PrologueSaveSize));
2224 // If we were able to combine the local stack pop with the argument pop,
2226 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2233 // Restore the original stack pointer.
2234 // FIXME: Rather than doing the math here, we should instead just use
2235 // non-post-indexed loads for the restores if we aren't actually going to
2236 // be able to save any instructions.
2237 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) {
2239 MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
2240 StackOffset::getFixed(-AFI->getCalleeSaveBaseToFrameRecordOffset()),
2241 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI);
2242 } else if (NumBytes)
2243 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
2244 StackOffset::getFixed(NumBytes), TII,
2245 MachineInstr::FrameDestroy, false, NeedsWinCFI);
2247 // When we are about to restore the CSRs, the CFA register is SP again.
2248 if (EmitCFI && hasFP(MF)) {
2249 const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo();
2250 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true);
2251 unsigned CFIIndex = MF.addFrameInst(
2252 MCCFIInstruction::cfiDefCfa(nullptr, Reg, PrologueSaveSize));
2253 BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
2254 .addCFIIndex(CFIIndex)
2255 .setMIFlags(MachineInstr::FrameDestroy);
2258 // This must be placed after the callee-save restore code because that code
2259 // assumes the SP is at the same location as it was after the callee-save save
2260 // code in the prologue.
2261 if (AfterCSRPopSize) {
2262 assert(AfterCSRPopSize > 0 && "attempting to reallocate arg stack that an "
2263 "interrupt may have clobbered");
2266 MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
2267 StackOffset::getFixed(AfterCSRPopSize), TII, MachineInstr::FrameDestroy,
2268 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2269 StackOffset::getFixed(CombineAfterCSRBump ? PrologueSaveSize : 0));
2273 bool AArch64FrameLowering::enableCFIFixup(MachineFunction &MF) const {
2274 return TargetFrameLowering::enableCFIFixup(MF) &&
2275 MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF);
2278 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for
2279 /// debug info. It's the same as what we use for resolving the code-gen
2280 /// references for now. FIXME: This can go wrong when references are
2281 /// SP-relative and simple call frames aren't used.
2283 AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
2284 Register &FrameReg) const {
2285 return resolveFrameIndexReference(
2288 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress),
2293 AArch64FrameLowering::getNonLocalFrameIndexReference(const MachineFunction &MF,
2295 return StackOffset::getFixed(getSEHFrameIndexOffset(MF, FI));
2298 static StackOffset getFPOffset(const MachineFunction &MF,
2299 int64_t ObjectOffset) {
2300 const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
2301 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
2303 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
2304 unsigned FixedObject =
2305 getFixedObjectSize(MF, AFI, IsWin64, /*IsFunclet=*/false);
2306 int64_t CalleeSaveSize = AFI->getCalleeSavedStackSize(MF.getFrameInfo());
2308 CalleeSaveSize - AFI->getCalleeSaveBaseToFrameRecordOffset();
2309 return StackOffset::getFixed(ObjectOffset + FixedObject + FPAdjust);
2312 static StackOffset getStackOffset(const MachineFunction &MF,
2313 int64_t ObjectOffset) {
2314 const auto &MFI = MF.getFrameInfo();
2315 return StackOffset::getFixed(ObjectOffset + (int64_t)MFI.getStackSize());
2318 // TODO: This function currently does not work for scalable vectors.
2319 int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF,
2321 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>(
2322 MF.getSubtarget().getRegisterInfo());
2323 int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI);
2324 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2325 ? getFPOffset(MF, ObjectOffset).getFixed()
2326 : getStackOffset(MF, ObjectOffset).getFixed();
2329 StackOffset AArch64FrameLowering::resolveFrameIndexReference(
2330 const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP,
2331 bool ForSimm) const {
2332 const auto &MFI = MF.getFrameInfo();
2333 int64_t ObjectOffset = MFI.getObjectOffset(FI);
2334 bool isFixed = MFI.isFixedObjectIndex(FI);
2335 bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector;
2336 return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg,
2340 StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
2341 const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE,
2342 Register &FrameReg, bool PreferFP, bool ForSimm) const {
2343 const auto &MFI = MF.getFrameInfo();
2344 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>(
2345 MF.getSubtarget().getRegisterInfo());
2346 const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
2347 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
2349 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed();
2350 int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed();
2352 !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI));
2354 const StackOffset &SVEStackSize = getSVEStackSize(MF);
2356 // Use frame pointer to reference fixed objects. Use it for locals if
2357 // there are VLAs or a dynamically realigned SP (and thus the SP isn't
2358 // reliable as a base). Make sure useFPForScavengingIndex() does the
2359 // right thing for the emergency spill slot.
2361 if (AFI->hasStackFrame() && !isSVE) {
2362 // We shouldn't prefer using the FP to access fixed-sized stack objects when
2363 // there are scalable (SVE) objects in between the FP and the fixed-sized
2365 PreferFP &= !SVEStackSize;
2367 // Note: Keeping the following as multiple 'if' statements rather than
2368 // merging to a single expression for readability.
2370 // Argument access should always use the FP.
2373 } else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2374 // References to the CSR area must use FP if we're re-aligning the stack
2375 // since the dynamically-sized alignment padding is between the SP/BP and
2377 assert(hasFP(MF) && "Re-aligned stack must have frame pointer");
2379 } else if (hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2380 // If the FPOffset is negative and we're producing a signed immediate, we
2381 // have to keep in mind that the available offset range for negative
2382 // offsets is smaller than for positive ones. If an offset is available
2383 // via the FP and the SP, use whichever is closest.
2384 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2385 PreferFP |= Offset > -FPOffset && !SVEStackSize;
2387 if (MFI.hasVarSizedObjects()) {
2388 // If we have variable sized objects, we can use either FP or BP, as the
2389 // SP offset is unknown. We can use the base pointer if we have one and
2390 // FP is not preferred. If not, we're stuck with using FP.
2391 bool CanUseBP = RegInfo->hasBasePointer(MF);
2392 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best.
2394 else if (!CanUseBP) // Can't use BP. Forced to use FP.
2396 // else we can use BP and FP, but the offset from FP won't fit.
2397 // That will make us scavenge registers which we can probably avoid by
2398 // using BP. If it won't fit for BP either, we'll scavenge anyway.
2399 } else if (FPOffset >= 0) {
2400 // Use SP or FP, whichever gives us the best chance of the offset
2401 // being in range for direct access. If the FPOffset is positive,
2402 // that'll always be best, as the SP will be even further away.
2404 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2405 // Funclets access the locals contained in the parent's stack frame
2406 // via the frame pointer, so we have to use the FP in the parent
2410 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()) &&
2411 "Funclets should only be present on Win64");
2414 // We have the choice between FP and (SP or BP).
2415 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it.
2422 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2423 "In the presence of dynamic stack pointer realignment, "
2424 "non-argument/CSR objects cannot be accessed through the frame pointer");
2427 StackOffset FPOffset =
2428 StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset);
2429 StackOffset SPOffset =
2431 StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(),
2433 // Always use the FP for SVE spills if available and beneficial.
2434 if (hasFP(MF) && (SPOffset.getFixed() ||
2435 FPOffset.getScalable() < SPOffset.getScalable() ||
2436 RegInfo->hasStackRealignment(MF))) {
2437 FrameReg = RegInfo->getFrameRegister(MF);
2441 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2442 : (unsigned)AArch64::SP;
2446 StackOffset ScalableOffset = {};
2447 if (UseFP && !(isFixed || isCSR))
2448 ScalableOffset = -SVEStackSize;
2449 if (!UseFP && (isFixed || isCSR))
2450 ScalableOffset = SVEStackSize;
2453 FrameReg = RegInfo->getFrameRegister(MF);
2454 return StackOffset::getFixed(FPOffset) + ScalableOffset;
2457 // Use the base pointer if we have one.
2458 if (RegInfo->hasBasePointer(MF))
2459 FrameReg = RegInfo->getBaseRegister();
2461 assert(!MFI.hasVarSizedObjects() &&
2462 "Can't use SP when we have var sized objects.");
2463 FrameReg = AArch64::SP;
2464 // If we're using the red zone for this function, the SP won't actually
2465 // be adjusted, so the offsets will be negative. They're also all
2466 // within range of the signed 9-bit immediate instructions.
2467 if (canUseRedZone(MF))
2468 Offset -= AFI->getLocalStackSize();
2471 return StackOffset::getFixed(Offset) + ScalableOffset;
2474 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
2475 // Do not set a kill flag on values that are also marked as live-in. This
2476 // happens with the @llvm-returnaddress intrinsic and with arguments passed in
2477 // callee saved registers.
2478 // Omitting the kill flags is conservatively correct even if the live-in
2479 // is not used after all.
2480 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg);
2481 return getKillRegState(!IsLiveIn);
2484 static bool produceCompactUnwindFrame(MachineFunction &MF) {
2485 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
2486 AttributeList Attrs = MF.getFunction().getAttributes();
2487 return Subtarget.isTargetMachO() &&
2488 !(Subtarget.getTargetLowering()->supportSwiftError() &&
2489 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2490 MF.getFunction().getCallingConv() != CallingConv::SwiftTail;
2493 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
2494 bool NeedsWinCFI, bool IsFirst,
2495 const TargetRegisterInfo *TRI) {
2496 // If we are generating register pairs for a Windows function that requires
2497 // EH support, then pair consecutive registers only. There are no unwind
2498 // opcodes for saves/restores of non-consectuve register pairs.
2499 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x,
2501 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
2503 if (Reg2 == AArch64::FP)
2507 if (TRI->getEncodingValue(Reg2) == TRI->getEncodingValue(Reg1) + 1)
2509 // If pairing a GPR with LR, the pair can be described by the save_lrpair
2510 // opcode. If this is the first register pair, it would end up with a
2511 // predecrement, but there's no save_lrpair_x opcode, so we can only do this
2512 // if LR is paired with something else than the first register.
2513 // The save_lrpair opcode requires the first register to be an odd one.
2514 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2515 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2520 /// Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
2521 /// WindowsCFI requires that only consecutive registers can be paired.
2522 /// LR and FP need to be allocated together when the frame needs to save
2523 /// the frame-record. This means any other register pairing with LR is invalid.
2524 static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2,
2525 bool UsesWinAAPCS, bool NeedsWinCFI,
2526 bool NeedsFrameRecord, bool IsFirst,
2527 const TargetRegisterInfo *TRI) {
2529 return invalidateWindowsRegisterPairing(Reg1, Reg2, NeedsWinCFI, IsFirst,
2532 // If we need to store the frame record, don't pair any register
2533 // with LR other than FP.
2534 if (NeedsFrameRecord)
2535 return Reg2 == AArch64::LR;
2542 struct RegPairInfo {
2543 unsigned Reg1 = AArch64::NoRegister;
2544 unsigned Reg2 = AArch64::NoRegister;
2547 enum RegType { GPR, FPR64, FPR128, PPR, ZPR } Type;
2549 RegPairInfo() = default;
2551 bool isPaired() const { return Reg2 != AArch64::NoRegister; }
2553 unsigned getScale() const {
2564 llvm_unreachable("Unsupported type");
2567 bool isScalable() const { return Type == PPR || Type == ZPR; }
2570 } // end anonymous namespace
2572 static void computeCalleeSaveRegisterPairs(
2573 MachineFunction &MF, ArrayRef<CalleeSavedInfo> CSI,
2574 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs,
2575 bool NeedsFrameRecord) {
2580 bool IsWindows = isTargetWindows(MF);
2581 bool NeedsWinCFI = needsWinCFI(MF);
2582 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
2583 MachineFrameInfo &MFI = MF.getFrameInfo();
2584 CallingConv::ID CC = MF.getFunction().getCallingConv();
2585 unsigned Count = CSI.size();
2587 // MachO's compact unwind format relies on all registers being stored in
2589 assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost ||
2590 CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS ||
2591 CC == CallingConv::Win64 || (Count & 1) == 0) &&
2592 "Odd number of callee-saved regs to spill!");
2593 int ByteOffset = AFI->getCalleeSavedStackSize();
2594 int StackFillDir = -1;
2596 unsigned FirstReg = 0;
2598 // For WinCFI, fill the stack from the bottom up.
2601 // As the CSI array is reversed to match PrologEpilogInserter, iterate
2602 // backwards, to pair up registers starting from lower numbered registers.
2604 FirstReg = Count - 1;
2606 int ScalableByteOffset = AFI->getSVECalleeSavedStackSize();
2607 bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace();
2609 // When iterating backwards, the loop condition relies on unsigned wraparound.
2610 for (unsigned i = FirstReg; i < Count; i += RegInc) {
2612 RPI.Reg1 = CSI[i].getReg();
2614 if (AArch64::GPR64RegClass.contains(RPI.Reg1))
2615 RPI.Type = RegPairInfo::GPR;
2616 else if (AArch64::FPR64RegClass.contains(RPI.Reg1))
2617 RPI.Type = RegPairInfo::FPR64;
2618 else if (AArch64::FPR128RegClass.contains(RPI.Reg1))
2619 RPI.Type = RegPairInfo::FPR128;
2620 else if (AArch64::ZPRRegClass.contains(RPI.Reg1))
2621 RPI.Type = RegPairInfo::ZPR;
2622 else if (AArch64::PPRRegClass.contains(RPI.Reg1))
2623 RPI.Type = RegPairInfo::PPR;
2625 llvm_unreachable("Unsupported register class.");
2627 // Add the next reg to the pair if it is in the same register class.
2628 if (unsigned(i + RegInc) < Count) {
2629 Register NextReg = CSI[i + RegInc].getReg();
2630 bool IsFirst = i == FirstReg;
2632 case RegPairInfo::GPR:
2633 if (AArch64::GPR64RegClass.contains(NextReg) &&
2634 !invalidateRegisterPairing(RPI.Reg1, NextReg, IsWindows,
2635 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2639 case RegPairInfo::FPR64:
2640 if (AArch64::FPR64RegClass.contains(NextReg) &&
2641 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI,
2645 case RegPairInfo::FPR128:
2646 if (AArch64::FPR128RegClass.contains(NextReg))
2649 case RegPairInfo::PPR:
2650 case RegPairInfo::ZPR:
2655 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
2656 // list to come in sorted by frame index so that we can issue the store
2657 // pair instructions directly. Assert if we see anything otherwise.
2659 // The order of the registers in the list is controlled by
2660 // getCalleeSavedRegs(), so they will always be in-order, as well.
2661 assert((!RPI.isPaired() ||
2662 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
2663 "Out of order callee saved regs!");
2665 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2666 RPI.Reg1 == AArch64::LR) &&
2667 "FrameRecord must be allocated together with LR");
2669 // Windows AAPCS has FP and LR reversed.
2670 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2671 RPI.Reg2 == AArch64::LR) &&
2672 "FrameRecord must be allocated together with LR");
2674 // MachO's compact unwind format relies on all registers being stored in
2675 // adjacent register pairs.
2676 assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost ||
2677 CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS ||
2678 CC == CallingConv::Win64 ||
2680 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2681 RPI.Reg1 + 1 == RPI.Reg2))) &&
2682 "Callee-save registers not saved as adjacent register pair!");
2684 RPI.FrameIdx = CSI[i].getFrameIdx();
2686 RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair
2687 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
2689 int Scale = RPI.getScale();
2691 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2692 assert(OffsetPre % Scale == 0);
2694 if (RPI.isScalable())
2695 ScalableByteOffset += StackFillDir * Scale;
2697 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
2699 // Swift's async context is directly before FP, so allocate an extra
2701 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() &&
2702 RPI.Reg2 == AArch64::FP)
2703 ByteOffset += StackFillDir * 8;
2705 assert(!(RPI.isScalable() && RPI.isPaired()) &&
2706 "Paired spill/fill instructions don't exist for SVE vectors");
2708 // Round up size of non-pair to pair size if we need to pad the
2709 // callee-save area to ensure 16-byte alignment.
2710 if (NeedGapToAlignStack && !NeedsWinCFI &&
2711 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
2712 !RPI.isPaired() && ByteOffset % 16 != 0) {
2713 ByteOffset += 8 * StackFillDir;
2714 assert(MFI.getObjectAlign(RPI.FrameIdx) <= Align(16));
2715 // A stack frame with a gap looks like this, bottom up:
2716 // d9, d8. x21, gap, x20, x19.
2717 // Set extra alignment on the x21 object to create the gap above it.
2718 MFI.setObjectAlignment(RPI.FrameIdx, Align(16));
2719 NeedGapToAlignStack = false;
2722 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2723 assert(OffsetPost % Scale == 0);
2724 // If filling top down (default), we want the offset after incrementing it.
2725 // If fillibg bootom up (WinCFI) we need the original offset.
2726 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
2728 // The FP, LR pair goes 8 bytes into our expanded 24-byte slot so that the
2729 // Swift context can directly precede FP.
2730 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() &&
2731 RPI.Reg2 == AArch64::FP)
2733 RPI.Offset = Offset / Scale;
2735 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
2736 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
2737 "Offset out of bounds for LDP/STP immediate");
2739 // Save the offset to frame record so that the FP register can point to the
2740 // innermost frame record (spilled FP and LR registers).
2741 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
2742 RPI.Reg2 == AArch64::FP) ||
2743 (IsWindows && RPI.Reg1 == AArch64::FP &&
2744 RPI.Reg2 == AArch64::LR)))
2745 AFI->setCalleeSaveBaseToFrameRecordOffset(Offset);
2747 RegPairs.push_back(RPI);
2752 // If we need an alignment gap in the stack, align the topmost stack
2753 // object. A stack frame with a gap looks like this, bottom up:
2754 // x19, d8. d9, gap.
2755 // Set extra alignment on the topmost stack object (the first element in
2756 // CSI, which goes top down), to create the gap above it.
2757 if (AFI->hasCalleeSaveStackFreeSpace())
2758 MFI.setObjectAlignment(CSI[0].getFrameIdx(), Align(16));
2759 // We iterated bottom up over the registers; flip RegPairs back to top
2761 std::reverse(RegPairs.begin(), RegPairs.end());
2765 bool AArch64FrameLowering::spillCalleeSavedRegisters(
2766 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2767 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2768 MachineFunction &MF = *MBB.getParent();
2769 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2770 bool NeedsWinCFI = needsWinCFI(MF);
2772 SmallVector<RegPairInfo, 8> RegPairs;
2774 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF));
2776 const MachineRegisterInfo &MRI = MF.getRegInfo();
2777 if (homogeneousPrologEpilog(MF)) {
2778 auto MIB = BuildMI(MBB, MI, DL, TII.get(AArch64::HOM_Prolog))
2779 .setMIFlag(MachineInstr::FrameSetup);
2781 for (auto &RPI : RegPairs) {
2782 MIB.addReg(RPI.Reg1);
2783 MIB.addReg(RPI.Reg2);
2785 // Update register live in.
2786 if (!MRI.isReserved(RPI.Reg1))
2787 MBB.addLiveIn(RPI.Reg1);
2788 if (!MRI.isReserved(RPI.Reg2))
2789 MBB.addLiveIn(RPI.Reg2);
2793 for (const RegPairInfo &RPI : llvm::reverse(RegPairs)) {
2794 unsigned Reg1 = RPI.Reg1;
2795 unsigned Reg2 = RPI.Reg2;
2798 // Issue sequence of spills for cs regs. The first spill may be converted
2799 // to a pre-decrement store later by emitPrologue if the callee-save stack
2800 // area allocation can't be combined with the local stack area allocation.
2802 // stp x22, x21, [sp, #0] // addImm(+0)
2803 // stp x20, x19, [sp, #16] // addImm(+2)
2804 // stp fp, lr, [sp, #32] // addImm(+4)
2805 // Rationale: This sequence saves uop updates compared to a sequence of
2806 // pre-increment spills like stp xi,xj,[sp,#-16]!
2807 // Note: Similar rationale and sequence for restores in epilog.
2811 case RegPairInfo::GPR:
2812 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2814 Alignment = Align(8);
2816 case RegPairInfo::FPR64:
2817 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2819 Alignment = Align(8);
2821 case RegPairInfo::FPR128:
2822 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2824 Alignment = Align(16);
2826 case RegPairInfo::ZPR:
2827 StrOpc = AArch64::STR_ZXI;
2829 Alignment = Align(16);
2831 case RegPairInfo::PPR:
2832 StrOpc = AArch64::STR_PXI;
2834 Alignment = Align(2);
2837 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
2838 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI);
2839 dbgs() << ") -> fi#(" << RPI.FrameIdx;
2840 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1;
2843 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2844 "Windows unwdinding requires a consecutive (FP,LR) pair");
2845 // Windows unwind codes require consecutive registers if registers are
2846 // paired. Make the switch here, so that the code below will save (x,x+1)
2848 unsigned FrameIdxReg1 = RPI.FrameIdx;
2849 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2850 if (NeedsWinCFI && RPI.isPaired()) {
2851 std::swap(Reg1, Reg2);
2852 std::swap(FrameIdxReg1, FrameIdxReg2);
2854 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
2855 if (!MRI.isReserved(Reg1))
2856 MBB.addLiveIn(Reg1);
2857 if (RPI.isPaired()) {
2858 if (!MRI.isReserved(Reg2))
2859 MBB.addLiveIn(Reg2);
2860 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
2861 MIB.addMemOperand(MF.getMachineMemOperand(
2862 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
2863 MachineMemOperand::MOStore, Size, Alignment));
2865 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
2866 .addReg(AArch64::SP)
2867 .addImm(RPI.Offset) // [sp, #offset*scale],
2868 // where factor*scale is implicit
2869 .setMIFlag(MachineInstr::FrameSetup);
2870 MIB.addMemOperand(MF.getMachineMemOperand(
2871 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
2872 MachineMemOperand::MOStore, Size, Alignment));
2874 InsertSEH(MIB, TII, MachineInstr::FrameSetup);
2876 // Update the StackIDs of the SVE stack slots.
2877 MachineFrameInfo &MFI = MF.getFrameInfo();
2878 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR)
2879 MFI.setStackID(RPI.FrameIdx, TargetStackID::ScalableVector);
2885 bool AArch64FrameLowering::restoreCalleeSavedRegisters(
2886 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
2887 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2888 MachineFunction &MF = *MBB.getParent();
2889 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2891 SmallVector<RegPairInfo, 8> RegPairs;
2892 bool NeedsWinCFI = needsWinCFI(MF);
2894 if (MBBI != MBB.end())
2895 DL = MBBI->getDebugLoc();
2897 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF));
2899 auto EmitMI = [&](const RegPairInfo &RPI) -> MachineBasicBlock::iterator {
2900 unsigned Reg1 = RPI.Reg1;
2901 unsigned Reg2 = RPI.Reg2;
2903 // Issue sequence of restores for cs regs. The last restore may be converted
2904 // to a post-increment load later by emitEpilogue if the callee-save stack
2905 // area allocation can't be combined with the local stack area allocation.
2907 // ldp fp, lr, [sp, #32] // addImm(+4)
2908 // ldp x20, x19, [sp, #16] // addImm(+2)
2909 // ldp x22, x21, [sp, #0] // addImm(+0)
2910 // Note: see comment in spillCalleeSavedRegisters()
2915 case RegPairInfo::GPR:
2916 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2918 Alignment = Align(8);
2920 case RegPairInfo::FPR64:
2921 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2923 Alignment = Align(8);
2925 case RegPairInfo::FPR128:
2926 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2928 Alignment = Align(16);
2930 case RegPairInfo::ZPR:
2931 LdrOpc = AArch64::LDR_ZXI;
2933 Alignment = Align(16);
2935 case RegPairInfo::PPR:
2936 LdrOpc = AArch64::LDR_PXI;
2938 Alignment = Align(2);
2941 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
2942 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI);
2943 dbgs() << ") -> fi#(" << RPI.FrameIdx;
2944 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1;
2947 // Windows unwind codes require consecutive registers if registers are
2948 // paired. Make the switch here, so that the code below will save (x,x+1)
2950 unsigned FrameIdxReg1 = RPI.FrameIdx;
2951 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2952 if (NeedsWinCFI && RPI.isPaired()) {
2953 std::swap(Reg1, Reg2);
2954 std::swap(FrameIdxReg1, FrameIdxReg2);
2956 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(LdrOpc));
2957 if (RPI.isPaired()) {
2958 MIB.addReg(Reg2, getDefRegState(true));
2959 MIB.addMemOperand(MF.getMachineMemOperand(
2960 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
2961 MachineMemOperand::MOLoad, Size, Alignment));
2963 MIB.addReg(Reg1, getDefRegState(true))
2964 .addReg(AArch64::SP)
2965 .addImm(RPI.Offset) // [sp, #offset*scale]
2966 // where factor*scale is implicit
2967 .setMIFlag(MachineInstr::FrameDestroy);
2968 MIB.addMemOperand(MF.getMachineMemOperand(
2969 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
2970 MachineMemOperand::MOLoad, Size, Alignment));
2972 InsertSEH(MIB, TII, MachineInstr::FrameDestroy);
2974 return MIB->getIterator();
2977 // SVE objects are always restored in reverse order.
2978 for (const RegPairInfo &RPI : reverse(RegPairs))
2979 if (RPI.isScalable())
2982 if (homogeneousPrologEpilog(MF, &MBB)) {
2983 auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog))
2984 .setMIFlag(MachineInstr::FrameDestroy);
2985 for (auto &RPI : RegPairs) {
2986 MIB.addReg(RPI.Reg1, RegState::Define);
2987 MIB.addReg(RPI.Reg2, RegState::Define);
2992 if (ReverseCSRRestoreSeq) {
2993 MachineBasicBlock::iterator First = MBB.end();
2994 for (const RegPairInfo &RPI : reverse(RegPairs)) {
2995 if (RPI.isScalable())
2997 MachineBasicBlock::iterator It = EmitMI(RPI);
2998 if (First == MBB.end())
3001 if (First != MBB.end())
3002 MBB.splice(MBBI, &MBB, First);
3004 for (const RegPairInfo &RPI : RegPairs) {
3005 if (RPI.isScalable())
3014 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
3015 BitVector &SavedRegs,
3016 RegScavenger *RS) const {
3017 // All calls are tail calls in GHC calling conv, and functions have no
3018 // prologue/epilogue.
3019 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
3022 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
3023 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
3024 MF.getSubtarget().getRegisterInfo());
3025 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
3026 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
3027 unsigned UnspilledCSGPR = AArch64::NoRegister;
3028 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
3030 MachineFrameInfo &MFI = MF.getFrameInfo();
3031 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
3033 unsigned BasePointerReg = RegInfo->hasBasePointer(MF)
3034 ? RegInfo->getBaseRegister()
3035 : (unsigned)AArch64::NoRegister;
3037 unsigned ExtraCSSpill = 0;
3038 // Figure out which callee-saved registers to save/restore.
3039 for (unsigned i = 0; CSRegs[i]; ++i) {
3040 const unsigned Reg = CSRegs[i];
3042 // Add the base pointer register to SavedRegs if it is callee-save.
3043 if (Reg == BasePointerReg)
3046 bool RegUsed = SavedRegs.test(Reg);
3047 unsigned PairedReg = AArch64::NoRegister;
3048 if (AArch64::GPR64RegClass.contains(Reg) ||
3049 AArch64::FPR64RegClass.contains(Reg) ||
3050 AArch64::FPR128RegClass.contains(Reg))
3051 PairedReg = CSRegs[i ^ 1];
3054 if (AArch64::GPR64RegClass.contains(Reg) &&
3055 !RegInfo->isReservedReg(MF, Reg)) {
3056 UnspilledCSGPR = Reg;
3057 UnspilledCSGPRPaired = PairedReg;
3062 // MachO's compact unwind format relies on all registers being stored in
3064 // FIXME: the usual format is actually better if unwinding isn't needed.
3065 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3066 !SavedRegs.test(PairedReg)) {
3067 SavedRegs.set(PairedReg);
3068 if (AArch64::GPR64RegClass.contains(PairedReg) &&
3069 !RegInfo->isReservedReg(MF, PairedReg))
3070 ExtraCSSpill = PairedReg;
3074 if (MF.getFunction().getCallingConv() == CallingConv::Win64 &&
3075 !Subtarget.isTargetWindows()) {
3076 // For Windows calling convention on a non-windows OS, where X18 is treated
3077 // as reserved, back up X18 when entering non-windows code (marked with the
3078 // Windows calling convention) and restore when returning regardless of
3079 // whether the individual function uses it - it might call other functions
3081 SavedRegs.set(AArch64::X18);
3084 // Calculates the callee saved stack size.
3085 unsigned CSStackSize = 0;
3086 unsigned SVECSStackSize = 0;
3087 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
3088 const MachineRegisterInfo &MRI = MF.getRegInfo();
3089 for (unsigned Reg : SavedRegs.set_bits()) {
3090 auto RegSize = TRI->getRegSizeInBits(Reg, MRI) / 8;
3091 if (AArch64::PPRRegClass.contains(Reg) ||
3092 AArch64::ZPRRegClass.contains(Reg))
3093 SVECSStackSize += RegSize;
3095 CSStackSize += RegSize;
3098 // Save number of saved regs, so we can easily update CSStackSize later.
3099 unsigned NumSavedRegs = SavedRegs.count();
3101 // The frame record needs to be created by saving the appropriate registers
3102 uint64_t EstimatedStackSize = MFI.estimateStackSize(MF);
3104 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) {
3105 SavedRegs.set(AArch64::FP);
3106 SavedRegs.set(AArch64::LR);
3109 LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nSaved CSRs:";
3111 : SavedRegs.set_bits()) dbgs()
3112 << ' ' << printReg(Reg, RegInfo);
3115 // If any callee-saved registers are used, the frame cannot be eliminated.
3116 int64_t SVEStackSize =
3117 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3118 bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize;
3120 // The CSR spill slots have not been allocated yet, so estimateStackSize
3121 // won't include them.
3122 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF);
3124 // We may address some of the stack above the canonical frame address, either
3125 // for our own arguments or during a call. Include that in calculating whether
3126 // we have complicated addressing concerns.
3127 int64_t CalleeStackUsed = 0;
3128 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) {
3129 int64_t FixedOff = MFI.getObjectOffset(I);
3130 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
3133 // Conservatively always assume BigStack when there are SVE spills.
3134 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3135 CalleeStackUsed) > EstimatedStackSizeLimit;
3136 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
3137 AFI->setHasStackFrame(true);
3139 // Estimate if we might need to scavenge a register at some point in order
3140 // to materialize a stack offset. If so, either spill one additional
3141 // callee-saved register or reserve a special spill slot to facilitate
3142 // register scavenging. If we already spilled an extra callee-saved register
3143 // above to keep the number of spills even, we don't need to do anything else
3146 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3147 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo)
3148 << " to get a scratch register.\n");
3149 SavedRegs.set(UnspilledCSGPR);
3150 // MachO's compact unwind format relies on all registers being stored in
3151 // pairs, so if we need to spill one extra for BigStack, then we need to
3153 if (producePairRegisters(MF))
3154 SavedRegs.set(UnspilledCSGPRPaired);
3155 ExtraCSSpill = UnspilledCSGPR;
3158 // If we didn't find an extra callee-saved register to spill, create
3159 // an emergency spill slot.
3160 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) {
3161 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
3162 const TargetRegisterClass &RC = AArch64::GPR64RegClass;
3163 unsigned Size = TRI->getSpillSize(RC);
3164 Align Alignment = TRI->getSpillAlign(RC);
3165 int FI = MFI.CreateStackObject(Size, Alignment, false);
3166 RS->addScavengingFrameIndex(FI);
3167 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
3168 << " as the emergency spill slot.\n");
3172 // Adding the size of additional 64bit GPR saves.
3173 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs);
3175 // A Swift asynchronous context extends the frame record with a pointer
3176 // directly before FP.
3177 if (hasFP(MF) && AFI->hasSwiftAsyncContext())
3180 uint64_t AlignedCSStackSize = alignTo(CSStackSize, 16);
3181 LLVM_DEBUG(dbgs() << "Estimated stack frame size: "
3182 << EstimatedStackSize + AlignedCSStackSize
3185 assert((!MFI.isCalleeSavedInfoValid() ||
3186 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3187 "Should not invalidate callee saved info");
3189 // Round up to register pair alignment to avoid additional SP adjustment
3191 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3192 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3193 AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16));
3196 bool AArch64FrameLowering::assignCalleeSavedSpillSlots(
3197 MachineFunction &MF, const TargetRegisterInfo *RegInfo,
3198 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex,
3199 unsigned &MaxCSFrameIndex) const {
3200 bool NeedsWinCFI = needsWinCFI(MF);
3201 // To match the canonical windows frame layout, reverse the list of
3202 // callee saved registers to get them laid out by PrologEpilogInserter
3203 // in the right order. (PrologEpilogInserter allocates stack objects top
3204 // down. Windows canonical prologs store higher numbered registers at
3205 // the top, thus have the CSI array start from the highest registers.)
3207 std::reverse(CSI.begin(), CSI.end());
3210 return true; // Early exit if no callee saved registers are modified!
3212 // Now that we know which registers need to be saved and restored, allocate
3213 // stack slots for them.
3214 MachineFrameInfo &MFI = MF.getFrameInfo();
3215 auto *AFI = MF.getInfo<AArch64FunctionInfo>();
3217 bool UsesWinAAPCS = isTargetWindows(MF);
3218 if (UsesWinAAPCS && hasFP(MF) && AFI->hasSwiftAsyncContext()) {
3219 int FrameIdx = MFI.CreateStackObject(8, Align(16), true);
3220 AFI->setSwiftAsyncContextFrameIdx(FrameIdx);
3221 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3222 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3225 for (auto &CS : CSI) {
3226 Register Reg = CS.getReg();
3227 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
3229 unsigned Size = RegInfo->getSpillSize(*RC);
3230 Align Alignment(RegInfo->getSpillAlign(*RC));
3231 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
3232 CS.setFrameIdx(FrameIdx);
3234 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3235 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3237 // Grab 8 bytes below FP for the extended asynchronous frame info.
3238 if (hasFP(MF) && AFI->hasSwiftAsyncContext() && !UsesWinAAPCS &&
3239 Reg == AArch64::FP) {
3240 FrameIdx = MFI.CreateStackObject(8, Alignment, true);
3241 AFI->setSwiftAsyncContextFrameIdx(FrameIdx);
3242 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3243 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3249 bool AArch64FrameLowering::enableStackSlotScavenging(
3250 const MachineFunction &MF) const {
3251 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
3252 return AFI->hasCalleeSaveStackFreeSpace();
3255 /// returns true if there are any SVE callee saves.
3256 static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI,
3257 int &Min, int &Max) {
3258 Min = std::numeric_limits<int>::max();
3259 Max = std::numeric_limits<int>::min();
3261 if (!MFI.isCalleeSavedInfoValid())
3264 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
3265 for (auto &CS : CSI) {
3266 if (AArch64::ZPRRegClass.contains(CS.getReg()) ||
3267 AArch64::PPRRegClass.contains(CS.getReg())) {
3268 assert((Max == std::numeric_limits<int>::min() ||
3269 Max + 1 == CS.getFrameIdx()) &&
3270 "SVE CalleeSaves are not consecutive");
3272 Min = std::min(Min, CS.getFrameIdx());
3273 Max = std::max(Max, CS.getFrameIdx());
3276 return Min != std::numeric_limits<int>::max();
3279 // Process all the SVE stack objects and determine offsets for each
3280 // object. If AssignOffsets is true, the offsets get assigned.
3281 // Fills in the first and last callee-saved frame indices into
3282 // Min/MaxCSFrameIndex, respectively.
3283 // Returns the size of the stack.
3284 static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
3285 int &MinCSFrameIndex,
3286 int &MaxCSFrameIndex,
3287 bool AssignOffsets) {
3289 // First process all fixed stack objects.
3290 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
3291 assert(MFI.getStackID(I) != TargetStackID::ScalableVector &&
3292 "SVE vectors should never be passed on the stack by value, only by "
3296 auto Assign = [&MFI](int FI, int64_t Offset) {
3297 LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n");
3298 MFI.setObjectOffset(FI, Offset);
3303 // Then process all callee saved slots.
3304 if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) {
3305 // Assign offsets to the callee save slots.
3306 for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) {
3307 Offset += MFI.getObjectSize(I);
3308 Offset = alignTo(Offset, MFI.getObjectAlign(I));
3314 // Ensure that the Callee-save area is aligned to 16bytes.
3315 Offset = alignTo(Offset, Align(16U));
3317 // Create a buffer of SVE objects to allocate and sort it.
3318 SmallVector<int, 8> ObjectsToAllocate;
3319 // If we have a stack protector, and we've previously decided that we have SVE
3320 // objects on the stack and thus need it to go in the SVE stack area, then it
3321 // needs to go first.
3322 int StackProtectorFI = -1;
3323 if (MFI.hasStackProtectorIndex()) {
3324 StackProtectorFI = MFI.getStackProtectorIndex();
3325 if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector)
3326 ObjectsToAllocate.push_back(StackProtectorFI);
3328 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
3329 unsigned StackID = MFI.getStackID(I);
3330 if (StackID != TargetStackID::ScalableVector)
3332 if (I == StackProtectorFI)
3334 if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex)
3336 if (MFI.isDeadObjectIndex(I))
3339 ObjectsToAllocate.push_back(I);
3342 // Allocate all SVE locals and spills
3343 for (unsigned FI : ObjectsToAllocate) {
3344 Align Alignment = MFI.getObjectAlign(FI);
3345 // FIXME: Given that the length of SVE vectors is not necessarily a power of
3346 // two, we'd need to align every object dynamically at runtime if the
3347 // alignment is larger than 16. This is not yet supported.
3348 if (Alignment > Align(16))
3350 "Alignment of scalable vectors > 16 bytes is not yet supported");
3352 Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment);
3354 Assign(FI, -Offset);
3360 int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
3361 MachineFrameInfo &MFI) const {
3362 int MinCSFrameIndex, MaxCSFrameIndex;
3363 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false);
3366 int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
3367 MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const {
3368 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex,
3372 void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
3373 MachineFunction &MF, RegScavenger *RS) const {
3374 MachineFrameInfo &MFI = MF.getFrameInfo();
3376 assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown &&
3377 "Upwards growing stack unsupported");
3379 int MinCSFrameIndex, MaxCSFrameIndex;
3380 int64_t SVEStackSize =
3381 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
3383 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
3384 AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U));
3385 AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex);
3387 // If this function isn't doing Win64-style C++ EH, we don't need to do
3389 if (!MF.hasEHFunclets())
3391 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
3392 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
3394 MachineBasicBlock &MBB = MF.front();
3395 auto MBBI = MBB.begin();
3396 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
3399 // Create an UnwindHelp object.
3400 // The UnwindHelp object is allocated at the start of the fixed object area
3401 int64_t FixedObject =
3402 getFixedObjectSize(MF, AFI, /*IsWin64*/ true, /*IsFunclet*/ false);
3403 int UnwindHelpFI = MFI.CreateFixedObject(/*Size*/ 8,
3404 /*SPOffset*/ -FixedObject,
3405 /*IsImmutable=*/false);
3406 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3408 // We need to store -2 into the UnwindHelp object at the start of the
3411 RS->enterBasicBlockEnd(MBB);
3412 RS->backward(std::prev(MBBI));
3413 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
3414 assert(DstReg && "There must be a free register after frame setup");
3415 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2);
3416 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi))
3417 .addReg(DstReg, getKillRegState(true))
3418 .addFrameIndex(UnwindHelpFI)
3423 struct TagStoreInstr {
3425 int64_t Offset, Size;
3426 explicit TagStoreInstr(MachineInstr *MI, int64_t Offset, int64_t Size)
3427 : MI(MI), Offset(Offset), Size(Size) {}
3430 class TagStoreEdit {
3431 MachineFunction *MF;
3432 MachineBasicBlock *MBB;
3433 MachineRegisterInfo *MRI;
3434 // Tag store instructions that are being replaced.
3435 SmallVector<TagStoreInstr, 8> TagStores;
3436 // Combined memref arguments of the above instructions.
3437 SmallVector<MachineMemOperand *, 8> CombinedMemRefs;
3439 // Replace allocation tags in [FrameReg + FrameRegOffset, FrameReg +
3440 // FrameRegOffset + Size) with the address tag of SP.
3442 StackOffset FrameRegOffset;
3444 // If not std::nullopt, move FrameReg to (FrameReg + FrameRegUpdate) at the
3446 std::optional<int64_t> FrameRegUpdate;
3447 // MIFlags for any FrameReg updating instructions.
3448 unsigned FrameRegUpdateFlags;
3450 // Use zeroing instruction variants.
3454 void emitUnrolled(MachineBasicBlock::iterator InsertI);
3455 void emitLoop(MachineBasicBlock::iterator InsertI);
3458 TagStoreEdit(MachineBasicBlock *MBB, bool ZeroData)
3459 : MBB(MBB), ZeroData(ZeroData) {
3460 MF = MBB->getParent();
3461 MRI = &MF->getRegInfo();
3463 // Add an instruction to be replaced. Instructions must be added in the
3464 // ascending order of Offset, and have to be adjacent.
3465 void addInstruction(TagStoreInstr I) {
3466 assert((TagStores.empty() ||
3467 TagStores.back().Offset + TagStores.back().Size == I.Offset) &&
3468 "Non-adjacent tag store instructions.");
3469 TagStores.push_back(I);
3471 void clear() { TagStores.clear(); }
3472 // Emit equivalent code at the given location, and erase the current set of
3473 // instructions. May skip if the replacement is not profitable. May invalidate
3474 // the input iterator and replace it with a valid one.
3475 void emitCode(MachineBasicBlock::iterator &InsertI,
3476 const AArch64FrameLowering *TFI, bool TryMergeSPUpdate);
3479 void TagStoreEdit::emitUnrolled(MachineBasicBlock::iterator InsertI) {
3480 const AArch64InstrInfo *TII =
3481 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
3483 const int64_t kMinOffset = -256 * 16;
3484 const int64_t kMaxOffset = 255 * 16;
3486 Register BaseReg = FrameReg;
3487 int64_t BaseRegOffsetBytes = FrameRegOffset.getFixed();
3488 if (BaseRegOffsetBytes < kMinOffset ||
3489 BaseRegOffsetBytes + (Size - Size % 32) > kMaxOffset ||
3490 // BaseReg can be FP, which is not necessarily aligned to 16-bytes. In
3491 // that case, BaseRegOffsetBytes will not be aligned to 16 bytes, which
3492 // is required for the offset of ST2G.
3493 BaseRegOffsetBytes % 16 != 0) {
3494 Register ScratchReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3495 emitFrameOffset(*MBB, InsertI, DL, ScratchReg, BaseReg,
3496 StackOffset::getFixed(BaseRegOffsetBytes), TII);
3497 BaseReg = ScratchReg;
3498 BaseRegOffsetBytes = 0;
3501 MachineInstr *LastI = nullptr;
3503 int64_t InstrSize = (Size > 16) ? 32 : 16;
3506 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3507 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
3508 assert(BaseRegOffsetBytes % 16 == 0);
3509 MachineInstr *I = BuildMI(*MBB, InsertI, DL, TII->get(Opcode))
3510 .addReg(AArch64::SP)
3512 .addImm(BaseRegOffsetBytes / 16)
3513 .setMemRefs(CombinedMemRefs);
3514 // A store to [BaseReg, #0] should go last for an opportunity to fold the
3515 // final SP adjustment in the epilogue.
3516 if (BaseRegOffsetBytes == 0)
3518 BaseRegOffsetBytes += InstrSize;
3523 MBB->splice(InsertI, MBB, LastI);
3526 void TagStoreEdit::emitLoop(MachineBasicBlock::iterator InsertI) {
3527 const AArch64InstrInfo *TII =
3528 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
3530 Register BaseReg = FrameRegUpdate
3532 : MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3533 Register SizeReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3535 emitFrameOffset(*MBB, InsertI, DL, BaseReg, FrameReg, FrameRegOffset, TII);
3537 int64_t LoopSize = Size;
3538 // If the loop size is not a multiple of 32, split off one 16-byte store at
3539 // the end to fold BaseReg update into.
3540 if (FrameRegUpdate && *FrameRegUpdate)
3541 LoopSize -= LoopSize % 32;
3542 MachineInstr *LoopI = BuildMI(*MBB, InsertI, DL,
3543 TII->get(ZeroData ? AArch64::STZGloop_wback
3544 : AArch64::STGloop_wback))
3549 .setMemRefs(CombinedMemRefs);
3551 LoopI->setFlags(FrameRegUpdateFlags);
3553 int64_t ExtraBaseRegUpdate =
3554 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.getFixed() - Size) : 0;
3555 if (LoopSize < Size) {
3556 assert(FrameRegUpdate);
3557 assert(Size - LoopSize == 16);
3558 // Tag 16 more bytes at BaseReg and update BaseReg.
3559 BuildMI(*MBB, InsertI, DL,
3560 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3564 .addImm(1 + ExtraBaseRegUpdate / 16)
3565 .setMemRefs(CombinedMemRefs)
3566 .setMIFlags(FrameRegUpdateFlags);
3567 } else if (ExtraBaseRegUpdate) {
3571 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3574 .addImm(std::abs(ExtraBaseRegUpdate))
3576 .setMIFlags(FrameRegUpdateFlags);
3580 // Check if *II is a register update that can be merged into STGloop that ends
3581 // at (Reg + Size). RemainingOffset is the required adjustment to Reg after the
3583 bool canMergeRegUpdate(MachineBasicBlock::iterator II, unsigned Reg,
3584 int64_t Size, int64_t *TotalOffset) {
3585 MachineInstr &MI = *II;
3586 if ((MI.getOpcode() == AArch64::ADDXri ||
3587 MI.getOpcode() == AArch64::SUBXri) &&
3588 MI.getOperand(0).getReg() == Reg && MI.getOperand(1).getReg() == Reg) {
3589 unsigned Shift = AArch64_AM::getShiftValue(MI.getOperand(3).getImm());
3590 int64_t Offset = MI.getOperand(2).getImm() << Shift;
3591 if (MI.getOpcode() == AArch64::SUBXri)
3593 int64_t AbsPostOffset = std::abs(Offset - Size);
3594 const int64_t kMaxOffset =
3595 0xFFF; // Max encoding for unshifted ADDXri / SUBXri
3596 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
3597 *TotalOffset = Offset;
3604 void mergeMemRefs(const SmallVectorImpl<TagStoreInstr> &TSE,
3605 SmallVectorImpl<MachineMemOperand *> &MemRefs) {
3607 for (auto &TS : TSE) {
3608 MachineInstr *MI = TS.MI;
3609 // An instruction without memory operands may access anything. Be
3610 // conservative and return an empty list.
3611 if (MI->memoperands_empty()) {
3615 MemRefs.append(MI->memoperands_begin(), MI->memoperands_end());
3619 void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI,
3620 const AArch64FrameLowering *TFI,
3621 bool TryMergeSPUpdate) {
3622 if (TagStores.empty())
3624 TagStoreInstr &FirstTagStore = TagStores[0];
3625 TagStoreInstr &LastTagStore = TagStores[TagStores.size() - 1];
3626 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3627 DL = TagStores[0].MI->getDebugLoc();
3630 FrameRegOffset = TFI->resolveFrameOffsetReference(
3631 *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg,
3632 /*PreferFP=*/false, /*ForSimm=*/true);
3634 FrameRegUpdate = std::nullopt;
3636 mergeMemRefs(TagStores, CombinedMemRefs);
3638 LLVM_DEBUG(dbgs() << "Replacing adjacent STG instructions:\n";
3639 for (const auto &Instr
3640 : TagStores) { dbgs() << " " << *Instr.MI; });
3642 // Size threshold where a loop becomes shorter than a linear sequence of
3643 // tagging instructions.
3644 const int kSetTagLoopThreshold = 176;
3645 if (Size < kSetTagLoopThreshold) {
3646 if (TagStores.size() < 2)
3648 emitUnrolled(InsertI);
3650 MachineInstr *UpdateInstr = nullptr;
3651 int64_t TotalOffset = 0;
3652 if (TryMergeSPUpdate) {
3653 // See if we can merge base register update into the STGloop.
3654 // This is done in AArch64LoadStoreOptimizer for "normal" stores,
3655 // but STGloop is way too unusual for that, and also it only
3656 // realistically happens in function epilogue. Also, STGloop is expanded
3657 // before that pass.
3658 if (InsertI != MBB->end() &&
3659 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.getFixed() + Size,
3661 UpdateInstr = &*InsertI++;
3662 LLVM_DEBUG(dbgs() << "Folding SP update into loop:\n "
3667 if (!UpdateInstr && TagStores.size() < 2)
3671 FrameRegUpdate = TotalOffset;
3672 FrameRegUpdateFlags = UpdateInstr->getFlags();
3676 UpdateInstr->eraseFromParent();
3679 for (auto &TS : TagStores)
3680 TS.MI->eraseFromParent();
3683 bool isMergeableStackTaggingInstruction(MachineInstr &MI, int64_t &Offset,
3684 int64_t &Size, bool &ZeroData) {
3685 MachineFunction &MF = *MI.getParent()->getParent();
3686 const MachineFrameInfo &MFI = MF.getFrameInfo();
3688 unsigned Opcode = MI.getOpcode();
3689 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3690 Opcode == AArch64::STZ2Gi);
3692 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3693 if (!MI.getOperand(0).isDead() || !MI.getOperand(1).isDead())
3695 if (!MI.getOperand(2).isImm() || !MI.getOperand(3).isFI())
3697 Offset = MFI.getObjectOffset(MI.getOperand(3).getIndex());
3698 Size = MI.getOperand(2).getImm();
3702 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3704 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3709 if (MI.getOperand(0).getReg() != AArch64::SP || !MI.getOperand(1).isFI())
3712 Offset = MFI.getObjectOffset(MI.getOperand(1).getIndex()) +
3713 16 * MI.getOperand(2).getImm();
3717 // Detect a run of memory tagging instructions for adjacent stack frame slots,
3718 // and replace them with a shorter instruction sequence:
3719 // * replace STG + STG with ST2G
3720 // * replace STGloop + STGloop with STGloop
3721 // This code needs to run when stack slot offsets are already known, but before
3722 // FrameIndex operands in STG instructions are eliminated.
3723 MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II,
3724 const AArch64FrameLowering *TFI,
3727 int64_t Size, Offset;
3728 MachineInstr &MI = *II;
3729 MachineBasicBlock *MBB = MI.getParent();
3730 MachineBasicBlock::iterator NextI = ++II;
3731 if (&MI == &MBB->instr_back())
3733 if (!isMergeableStackTaggingInstruction(MI, Offset, Size, FirstZeroData))
3736 SmallVector<TagStoreInstr, 4> Instrs;
3737 Instrs.emplace_back(&MI, Offset, Size);
3739 constexpr int kScanLimit = 10;
3741 for (MachineBasicBlock::iterator E = MBB->end();
3742 NextI != E && Count < kScanLimit; ++NextI) {
3743 MachineInstr &MI = *NextI;
3745 int64_t Size, Offset;
3746 // Collect instructions that update memory tags with a FrameIndex operand
3747 // and (when applicable) constant size, and whose output registers are dead
3748 // (the latter is almost always the case in practice). Since these
3749 // instructions effectively have no inputs or outputs, we are free to skip
3750 // any non-aliasing instructions in between without tracking used registers.
3751 if (isMergeableStackTaggingInstruction(MI, Offset, Size, ZeroData)) {
3752 if (ZeroData != FirstZeroData)
3754 Instrs.emplace_back(&MI, Offset, Size);
3758 // Only count non-transient, non-tagging instructions toward the scan
3760 if (!MI.isTransient())
3763 // Just in case, stop before the epilogue code starts.
3764 if (MI.getFlag(MachineInstr::FrameSetup) ||
3765 MI.getFlag(MachineInstr::FrameDestroy))
3768 // Reject anything that may alias the collected instructions.
3769 if (MI.mayLoadOrStore() || MI.hasUnmodeledSideEffects())
3773 // New code will be inserted after the last tagging instruction we've found.
3774 MachineBasicBlock::iterator InsertI = Instrs.back().MI;
3777 llvm::stable_sort(Instrs,
3778 [](const TagStoreInstr &Left, const TagStoreInstr &Right) {
3779 return Left.Offset < Right.Offset;
3782 // Make sure that we don't have any overlapping stores.
3783 int64_t CurOffset = Instrs[0].Offset;
3784 for (auto &Instr : Instrs) {
3785 if (CurOffset > Instr.Offset)
3787 CurOffset = Instr.Offset + Instr.Size;
3790 // Find contiguous runs of tagged memory and emit shorter instruction
3791 // sequencies for them when possible.
3792 TagStoreEdit TSE(MBB, FirstZeroData);
3793 std::optional<int64_t> EndOffset;
3794 for (auto &Instr : Instrs) {
3795 if (EndOffset && *EndOffset != Instr.Offset) {
3797 TSE.emitCode(InsertI, TFI, /*TryMergeSPUpdate = */ false);
3801 TSE.addInstruction(Instr);
3802 EndOffset = Instr.Offset + Instr.Size;
3805 const MachineFunction *MF = MBB->getParent();
3806 // Multiple FP/SP updates in a loop cannot be described by CFI instructions.
3808 InsertI, TFI, /*TryMergeSPUpdate = */
3809 !MF->getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(*MF));
3815 void AArch64FrameLowering::processFunctionBeforeFrameIndicesReplaced(
3816 MachineFunction &MF, RegScavenger *RS = nullptr) const {
3817 if (StackTaggingMergeSetTag)
3819 for (MachineBasicBlock::iterator II = BB.begin(); II != BB.end();)
3820 II = tryMergeAdjacentSTG(II, this, RS);
3823 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP
3824 /// before the update. This is easily retrieved as it is exactly the offset
3825 /// that is set in processFunctionBeforeFrameFinalized.
3826 StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP(
3827 const MachineFunction &MF, int FI, Register &FrameReg,
3828 bool IgnoreSPUpdates) const {
3829 const MachineFrameInfo &MFI = MF.getFrameInfo();
3830 if (IgnoreSPUpdates) {
3831 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is "
3832 << MFI.getObjectOffset(FI) << "\n");
3833 FrameReg = AArch64::SP;
3834 return StackOffset::getFixed(MFI.getObjectOffset(FI));
3837 // Go to common code if we cannot provide sp + offset.
3838 if (MFI.hasVarSizedObjects() ||
3839 MF.getInfo<AArch64FunctionInfo>()->getStackSizeSVE() ||
3840 MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF))
3841 return getFrameIndexReference(MF, FI, FrameReg);
3843 FrameReg = AArch64::SP;
3844 return getStackOffset(MF, MFI.getObjectOffset(FI));
3847 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve
3848 /// the parent's frame pointer
3849 unsigned AArch64FrameLowering::getWinEHParentFrameOffset(
3850 const MachineFunction &MF) const {
3854 /// Funclets only need to account for space for the callee saved registers,
3855 /// as the locals are accounted for in the parent's stack frame.
3856 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize(
3857 const MachineFunction &MF) const {
3858 // This is the size of the pushed CSRs.
3860 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize();
3861 // This is the amount of stack a funclet needs to allocate.
3862 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(),
3867 struct FrameObject {
3868 bool IsValid = false;
3869 // Index of the object in MFI.
3870 int ObjectIndex = 0;
3871 // Group ID this object belongs to.
3872 int GroupIndex = -1;
3873 // This object should be placed first (closest to SP).
3874 bool ObjectFirst = false;
3875 // This object's group (which always contains the object with
3876 // ObjectFirst==true) should be placed first.
3877 bool GroupFirst = false;
3880 class GroupBuilder {
3881 SmallVector<int, 8> CurrentMembers;
3882 int NextGroupIndex = 0;
3883 std::vector<FrameObject> &Objects;
3886 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3887 void AddMember(int Index) { CurrentMembers.push_back(Index); }
3888 void EndCurrentGroup() {
3889 if (CurrentMembers.size() > 1) {
3890 // Create a new group with the current member list. This might remove them
3891 // from their pre-existing groups. That's OK, dealing with overlapping
3892 // groups is too hard and unlikely to make a difference.
3893 LLVM_DEBUG(dbgs() << "group:");
3894 for (int Index : CurrentMembers) {
3895 Objects[Index].GroupIndex = NextGroupIndex;
3896 LLVM_DEBUG(dbgs() << " " << Index);
3898 LLVM_DEBUG(dbgs() << "\n");
3901 CurrentMembers.clear();
3905 bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) {
3906 // Objects at a lower index are closer to FP; objects at a higher index are
3909 // For consistency in our comparison, all invalid objects are placed
3910 // at the end. This also allows us to stop walking when we hit the
3911 // first invalid item after it's all sorted.
3913 // The "first" object goes first (closest to SP), followed by the members of
3914 // the "first" group.
3916 // The rest are sorted by the group index to keep the groups together.
3917 // Higher numbered groups are more likely to be around longer (i.e. untagged
3918 // in the function epilogue and not at some earlier point). Place them closer
3921 // If all else equal, sort by the object index to keep the objects in the
3923 return std::make_tuple(!A.IsValid, A.ObjectFirst, A.GroupFirst, A.GroupIndex,
3925 std::make_tuple(!B.IsValid, B.ObjectFirst, B.GroupFirst, B.GroupIndex,
3930 void AArch64FrameLowering::orderFrameObjects(
3931 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
3932 if (!OrderFrameObjects || ObjectsToAllocate.empty())
3935 const MachineFrameInfo &MFI = MF.getFrameInfo();
3936 std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd());
3937 for (auto &Obj : ObjectsToAllocate) {
3938 FrameObjects[Obj].IsValid = true;
3939 FrameObjects[Obj].ObjectIndex = Obj;
3942 // Identify stack slots that are tagged at the same time.
3943 GroupBuilder GB(FrameObjects);
3944 for (auto &MBB : MF) {
3945 for (auto &MI : MBB) {
3946 if (MI.isDebugInstr())
3949 switch (MI.getOpcode()) {
3950 case AArch64::STGloop:
3951 case AArch64::STZGloop:
3955 case AArch64::STZGi:
3956 case AArch64::ST2Gi:
3957 case AArch64::STZ2Gi:
3966 const MachineOperand &MO = MI.getOperand(OpIndex);
3968 int FI = MO.getIndex();
3969 if (FI >= 0 && FI < MFI.getObjectIndexEnd() &&
3970 FrameObjects[FI].IsValid)
3975 // If this is a stack tagging instruction for a slot that is not part of a
3976 // group yet, either start a new group or add it to the current one.
3978 GB.AddMember(TaggedFI);
3980 GB.EndCurrentGroup();
3982 // Groups should never span multiple basic blocks.
3983 GB.EndCurrentGroup();
3986 // If the function's tagged base pointer is pinned to a stack slot, we want to
3987 // put that slot first when possible. This will likely place it at SP + 0,
3988 // and save one instruction when generating the base pointer because IRG does
3989 // not allow an immediate offset.
3990 const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>();
3991 std::optional<int> TBPI = AFI.getTaggedBasePointerIndex();
3993 FrameObjects[*TBPI].ObjectFirst = true;
3994 FrameObjects[*TBPI].GroupFirst = true;
3995 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3996 if (FirstGroupIndex >= 0)
3997 for (FrameObject &Object : FrameObjects)
3998 if (Object.GroupIndex == FirstGroupIndex)
3999 Object.GroupFirst = true;
4002 llvm::stable_sort(FrameObjects, FrameObjectCompare);
4005 for (auto &Obj : FrameObjects) {
4006 // All invalid items are sorted at the end, so it's safe to stop.
4009 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4012 LLVM_DEBUG(dbgs() << "Final frame order:\n"; for (auto &Obj
4016 dbgs() << " " << Obj.ObjectIndex << ": group " << Obj.GroupIndex;
4017 if (Obj.ObjectFirst)
4018 dbgs() << ", first";
4020 dbgs() << ", group-first";