1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
41 #include "llvm/Support/MachineValueType.h"
47 #define DEBUG_TYPE "aarch64-call-lowering"
51 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
52 : CallLowering(&TLI) {}
55 struct IncomingArgHandler : public CallLowering::ValueHandler {
56 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
58 : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
60 Register getStackAddress(uint64_t Size, int64_t Offset,
61 MachinePointerInfo &MPO) override {
62 auto &MFI = MIRBuilder.getMF().getFrameInfo();
63 int FI = MFI.CreateFixedObject(Size, Offset, true);
64 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
65 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
66 StackUsed = std::max(StackUsed, Size + Offset);
67 return AddrReg.getReg(0);
70 void assignValueToReg(Register ValVReg, Register PhysReg,
71 CCValAssign &VA) override {
72 markPhysRegUsed(PhysReg);
73 switch (VA.getLocInfo()) {
75 MIRBuilder.buildCopy(ValVReg, PhysReg);
77 case CCValAssign::LocInfo::SExt:
78 case CCValAssign::LocInfo::ZExt:
79 case CCValAssign::LocInfo::AExt: {
80 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
81 MIRBuilder.buildTrunc(ValVReg, Copy);
87 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
88 MachinePointerInfo &MPO, CCValAssign &VA) override {
89 MachineFunction &MF = MIRBuilder.getMF();
90 auto MMO = MF.getMachineMemOperand(
91 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
92 inferAlignFromPtrInfo(MF, MPO));
93 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
96 /// How the physical register gets marked varies between formal
97 /// parameters (it's a basic-block live-in), and a call instruction
98 /// (it's an implicit-def of the BL).
99 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
101 bool isIncomingArgumentHandler() const override { return true; }
106 struct FormalArgHandler : public IncomingArgHandler {
107 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
108 CCAssignFn *AssignFn)
109 : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
111 void markPhysRegUsed(unsigned PhysReg) override {
112 MIRBuilder.getMRI()->addLiveIn(PhysReg);
113 MIRBuilder.getMBB().addLiveIn(PhysReg);
117 struct CallReturnHandler : public IncomingArgHandler {
118 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
119 MachineInstrBuilder MIB, CCAssignFn *AssignFn)
120 : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
122 void markPhysRegUsed(unsigned PhysReg) override {
123 MIB.addDef(PhysReg, RegState::Implicit);
126 MachineInstrBuilder MIB;
129 struct OutgoingArgHandler : public CallLowering::ValueHandler {
130 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
131 MachineInstrBuilder MIB, CCAssignFn *AssignFn,
132 CCAssignFn *AssignFnVarArg, bool IsTailCall = false,
134 : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
135 AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff),
136 StackSize(0), SPReg(0) {}
138 bool isIncomingArgumentHandler() const override { return false; }
140 Register getStackAddress(uint64_t Size, int64_t Offset,
141 MachinePointerInfo &MPO) override {
142 MachineFunction &MF = MIRBuilder.getMF();
143 LLT p0 = LLT::pointer(0, 64);
144 LLT s64 = LLT::scalar(64);
148 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
149 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
150 MPO = MachinePointerInfo::getFixedStack(MF, FI);
151 return FIReg.getReg(0);
155 SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
157 auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
159 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
161 MPO = MachinePointerInfo::getStack(MF, Offset);
162 return AddrReg.getReg(0);
165 void assignValueToReg(Register ValVReg, Register PhysReg,
166 CCValAssign &VA) override {
167 MIB.addUse(PhysReg, RegState::Implicit);
168 Register ExtReg = extendRegister(ValVReg, VA);
169 MIRBuilder.buildCopy(PhysReg, ExtReg);
172 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
173 MachinePointerInfo &MPO, CCValAssign &VA) override {
174 MachineFunction &MF = MIRBuilder.getMF();
175 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size,
176 inferAlignFromPtrInfo(MF, MPO));
177 MIRBuilder.buildStore(ValVReg, Addr, *MMO);
180 void assignValueToAddress(const CallLowering::ArgInfo &Arg, Register Addr,
181 uint64_t Size, MachinePointerInfo &MPO,
182 CCValAssign &VA) override {
183 unsigned MaxSize = Size * 8;
184 // For varargs, we always want to extend them to 8 bytes, in which case
185 // we disable setting a max.
189 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt
190 ? extendRegister(Arg.Regs[0], VA, MaxSize)
193 // If we extended we might need to adjust the MMO's Size.
194 const LLT RegTy = MRI.getType(ValVReg);
195 if (RegTy.getSizeInBytes() > Size)
196 Size = RegTy.getSizeInBytes();
198 assignValueToAddress(ValVReg, Addr, Size, MPO, VA);
201 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
202 CCValAssign::LocInfo LocInfo,
203 const CallLowering::ArgInfo &Info,
204 ISD::ArgFlagsTy Flags,
205 CCState &State) override {
208 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
210 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
212 StackSize = State.getNextStackOffset();
216 MachineInstrBuilder MIB;
217 CCAssignFn *AssignFnVarArg;
220 /// For tail calls, the byte offset of the call's argument area from the
221 /// callee's. Unused elsewhere.
225 // Cache the SP register vreg if we need it more than once in this call site.
230 static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
231 return CallConv == CallingConv::Fast && TailCallOpt;
234 void AArch64CallLowering::splitToValueTypes(
235 const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
236 const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const {
237 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
238 LLVMContext &Ctx = OrigArg.Ty->getContext();
240 SmallVector<EVT, 4> SplitVTs;
241 SmallVector<uint64_t, 4> Offsets;
242 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
244 if (SplitVTs.size() == 0)
247 if (SplitVTs.size() == 1) {
248 // No splitting to do, but we want to replace the original type (e.g. [1 x
249 // double] -> double).
250 SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
251 OrigArg.Flags[0], OrigArg.IsFixed);
255 // Create one ArgInfo for each virtual register in the original ArgInfo.
256 assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
258 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
259 OrigArg.Ty, CallConv, false);
260 for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
261 Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
262 SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
265 SplitArgs.back().Flags[0].setInConsecutiveRegs();
268 SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
271 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
273 ArrayRef<Register> VRegs,
274 Register SwiftErrorVReg) const {
275 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
276 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
277 "Return value without a vreg");
280 if (!VRegs.empty()) {
281 MachineFunction &MF = MIRBuilder.getMF();
282 const Function &F = MF.getFunction();
284 MachineRegisterInfo &MRI = MF.getRegInfo();
285 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
286 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
287 auto &DL = F.getParent()->getDataLayout();
288 LLVMContext &Ctx = Val->getType()->getContext();
290 SmallVector<EVT, 4> SplitEVTs;
291 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
292 assert(VRegs.size() == SplitEVTs.size() &&
293 "For each split Type there should be exactly one VReg.");
295 SmallVector<ArgInfo, 8> SplitArgs;
296 CallingConv::ID CC = F.getCallingConv();
298 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
299 if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
300 LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
304 Register CurVReg = VRegs[i];
305 ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
306 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
308 // i1 is a special case because SDAG i1 true is naturally zero extended
309 // when widened using ANYEXT. We need to do it explicitly here.
310 if (MRI.getType(CurVReg).getSizeInBits() == 1) {
311 CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
313 // Some types will need extending as specified by the CC.
314 MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
315 if (EVT(NewVT) != SplitEVTs[i]) {
316 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
317 if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
319 ExtendOp = TargetOpcode::G_SEXT;
320 else if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
322 ExtendOp = TargetOpcode::G_ZEXT;
325 LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
326 CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
327 // Instead of an extend, we might have a vector type which needs
328 // padding with more elements, e.g. <2 x half> -> <4 x half>.
329 if (NewVT.isVector()) {
330 if (OldLLT.isVector()) {
331 if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
332 // We don't handle VA types which are not exactly twice the
333 // size, but can easily be done in future.
334 if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
335 LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
338 auto Undef = MIRBuilder.buildUndef({OldLLT});
340 MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0);
342 // Just do a vector extend.
343 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
346 } else if (NewLLT.getNumElements() == 2) {
347 // We need to pad a <1 x S> type to <2 x S>. Since we don't have
348 // <1 x S> vector types in GISel we use a build_vector instead
349 // of a vector merge/concat.
350 auto Undef = MIRBuilder.buildUndef({OldLLT});
353 .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
356 LLVM_DEBUG(dbgs() << "Could not handle ret ty");
362 MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
366 if (CurVReg != CurArgInfo.Regs[0]) {
367 CurArgInfo.Regs[0] = CurVReg;
368 // Reset the arg flags after modifying CurVReg.
369 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
371 splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC);
374 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
375 Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
378 if (SwiftErrorVReg) {
379 MIB.addUse(AArch64::X21, RegState::Implicit);
380 MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
383 MIRBuilder.insertInstr(MIB);
387 /// Helper function to compute forwarded registers for musttail calls. Computes
388 /// the forwarded registers, sets MBB liveness, and emits COPY instructions that
389 /// can be used to save + restore registers later.
390 static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
391 CCAssignFn *AssignFn) {
392 MachineBasicBlock &MBB = MIRBuilder.getMBB();
393 MachineFunction &MF = MIRBuilder.getMF();
394 MachineFrameInfo &MFI = MF.getFrameInfo();
396 if (!MFI.hasMustTailInVarArgFunc())
399 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
400 const Function &F = MF.getFunction();
401 assert(F.isVarArg() && "Expected F to be vararg?");
403 // Compute the set of forwarded registers. The rest are scratch.
404 SmallVector<CCValAssign, 16> ArgLocs;
405 CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
407 SmallVector<MVT, 2> RegParmTypes;
408 RegParmTypes.push_back(MVT::i64);
409 RegParmTypes.push_back(MVT::f128);
411 // Later on, we can use this vector to restore the registers if necessary.
412 SmallVectorImpl<ForwardedRegister> &Forwards =
413 FuncInfo->getForwardedMustTailRegParms();
414 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
416 // Conservatively forward X8, since it might be used for an aggregate
418 if (!CCInfo.isAllocated(AArch64::X8)) {
419 unsigned X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
420 Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
423 // Add the forwards to the MachineBasicBlock and MachineFunction.
424 for (const auto &F : Forwards) {
425 MBB.addLiveIn(F.PReg);
426 MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
430 bool AArch64CallLowering::fallBackToDAGISel(const Function &F) const {
431 if (isa<ScalableVectorType>(F.getReturnType()))
433 return llvm::any_of(F.args(), [](const Argument &A) {
434 return isa<ScalableVectorType>(A.getType());
438 bool AArch64CallLowering::lowerFormalArguments(
439 MachineIRBuilder &MIRBuilder, const Function &F,
440 ArrayRef<ArrayRef<Register>> VRegs) const {
441 MachineFunction &MF = MIRBuilder.getMF();
442 MachineBasicBlock &MBB = MIRBuilder.getMBB();
443 MachineRegisterInfo &MRI = MF.getRegInfo();
444 auto &DL = F.getParent()->getDataLayout();
446 SmallVector<ArgInfo, 8> SplitArgs;
448 for (auto &Arg : F.args()) {
449 if (DL.getTypeStoreSize(Arg.getType()).isZero())
452 ArgInfo OrigArg{VRegs[i], Arg.getType()};
453 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
455 splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv());
460 MIRBuilder.setInstr(*MBB.begin());
462 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
463 CCAssignFn *AssignFn =
464 TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
466 FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
467 if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
470 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
471 uint64_t StackOffset = Handler.StackUsed;
473 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
474 if (!Subtarget.isTargetDarwin()) {
475 // FIXME: we need to reimplement saveVarArgsRegisters from
476 // AArch64ISelLowering.
480 // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
481 StackOffset = alignTo(Handler.StackUsed, Subtarget.isTargetILP32() ? 4 : 8);
483 auto &MFI = MIRBuilder.getMF().getFrameInfo();
484 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
487 if (doesCalleeRestoreStack(F.getCallingConv(),
488 MF.getTarget().Options.GuaranteedTailCallOpt)) {
489 // We have a non-standard ABI, so why not make full use of the stack that
490 // we're going to pop? It must be aligned to 16 B in any case.
491 StackOffset = alignTo(StackOffset, 16);
493 // If we're expected to restore the stack (e.g. fastcc), then we'll be
494 // adding a multiple of 16.
495 FuncInfo->setArgumentStackToRestore(StackOffset);
497 // Our own callers will guarantee that the space is free by giving an
498 // aligned value to CALLSEQ_START.
501 // When we tail call, we need to check if the callee's arguments
502 // will fit on the caller's stack. So, whenever we lower formal arguments,
503 // we should keep track of this information, since we might lower a tail call
504 // in this function later.
505 FuncInfo->setBytesInStackArgArea(StackOffset);
507 auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
508 if (Subtarget.hasCustomCallingConv())
509 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
511 handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
513 // Move back to the end of the basic block.
514 MIRBuilder.setMBB(MBB);
519 /// Return true if the calling convention is one that we can guarantee TCO for.
520 static bool canGuaranteeTCO(CallingConv::ID CC) {
521 return CC == CallingConv::Fast;
524 /// Return true if we might ever do TCO for calls with this calling convention.
525 static bool mayTailCallThisCC(CallingConv::ID CC) {
528 case CallingConv::PreserveMost:
529 case CallingConv::Swift:
532 return canGuaranteeTCO(CC);
536 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
538 static std::pair<CCAssignFn *, CCAssignFn *>
539 getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
540 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
543 bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
544 CallLoweringInfo &Info, MachineFunction &MF,
545 SmallVectorImpl<ArgInfo> &InArgs) const {
546 const Function &CallerF = MF.getFunction();
547 CallingConv::ID CalleeCC = Info.CallConv;
548 CallingConv::ID CallerCC = CallerF.getCallingConv();
550 // If the calling conventions match, then everything must be the same.
551 if (CalleeCC == CallerCC)
554 // Check if the caller and callee will handle arguments in the same way.
555 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
556 CCAssignFn *CalleeAssignFnFixed;
557 CCAssignFn *CalleeAssignFnVarArg;
558 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
559 getAssignFnsForCC(CalleeCC, TLI);
561 CCAssignFn *CallerAssignFnFixed;
562 CCAssignFn *CallerAssignFnVarArg;
563 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
564 getAssignFnsForCC(CallerCC, TLI);
566 if (!resultsCompatible(Info, MF, InArgs, *CalleeAssignFnFixed,
567 *CalleeAssignFnVarArg, *CallerAssignFnFixed,
568 *CallerAssignFnVarArg))
571 // Make sure that the caller and callee preserve all of the same registers.
572 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
573 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
574 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
575 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
576 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
577 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
580 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
583 bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
584 CallLoweringInfo &Info, MachineFunction &MF,
585 SmallVectorImpl<ArgInfo> &OutArgs) const {
586 // If there are no outgoing arguments, then we are done.
590 const Function &CallerF = MF.getFunction();
591 CallingConv::ID CalleeCC = Info.CallConv;
592 CallingConv::ID CallerCC = CallerF.getCallingConv();
593 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
595 CCAssignFn *AssignFnFixed;
596 CCAssignFn *AssignFnVarArg;
597 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
599 // We have outgoing arguments. Make sure that we can tail call with them.
600 SmallVector<CCValAssign, 16> OutLocs;
601 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
603 if (!analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg)) {
604 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
608 // Make sure that they can fit on the caller's stack.
609 const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
610 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
611 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
615 // Verify that the parameters in callee-saved registers match.
616 // TODO: Port this over to CallLowering as general code once swiftself is
618 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
619 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
620 MachineRegisterInfo &MRI = MF.getRegInfo();
622 for (unsigned i = 0; i < OutLocs.size(); ++i) {
623 auto &ArgLoc = OutLocs[i];
624 // If it's not a register, it's fine.
625 if (!ArgLoc.isRegLoc()) {
627 // Be conservative and disallow variadic memory operands to match SDAG's
629 // FIXME: If the caller's calling convention is C, then we can
630 // potentially use its argument area. However, for cases like fastcc,
631 // we can't do anything.
634 << "... Cannot tail call vararg function with stack arguments\n");
640 Register Reg = ArgLoc.getLocReg();
642 // Only look at callee-saved registers.
643 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
648 << "... Call has an argument passed in a callee-saved register.\n");
650 // Check if it was copied from.
651 ArgInfo &OutInfo = OutArgs[i];
653 if (OutInfo.Regs.size() > 1) {
655 dbgs() << "... Cannot handle arguments in multiple registers.\n");
659 // Check if we copy the register, walking through copies from virtual
660 // registers. Note that getDefIgnoringCopies does not ignore copies from
661 // physical registers.
662 MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
663 if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
666 << "... Parameter was not copied into a VReg, cannot tail call.\n");
670 // Got a copy. Verify that it's the same as the register we want.
671 Register CopyRHS = RegDef->getOperand(1).getReg();
672 if (CopyRHS != Reg) {
673 LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
674 "VReg, cannot tail call.\n");
682 bool AArch64CallLowering::isEligibleForTailCallOptimization(
683 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
684 SmallVectorImpl<ArgInfo> &InArgs,
685 SmallVectorImpl<ArgInfo> &OutArgs) const {
687 // Must pass all target-independent checks in order to tail call optimize.
688 if (!Info.IsTailCall)
691 CallingConv::ID CalleeCC = Info.CallConv;
692 MachineFunction &MF = MIRBuilder.getMF();
693 const Function &CallerF = MF.getFunction();
695 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
697 if (Info.SwiftErrorVReg) {
698 // TODO: We should handle this.
699 // Note that this is also handled by the check for no outgoing arguments.
700 // Proactively disabling this though, because the swifterror handling in
701 // lowerCall inserts a COPY *after* the location of the call.
702 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
706 if (!mayTailCallThisCC(CalleeCC)) {
707 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
711 // Byval parameters hand the function a pointer directly into the stack area
712 // we want to reuse during a tail call. Working around this *is* possible (see
715 // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
718 // On Windows, "inreg" attributes signify non-aggregate indirect returns.
719 // In this case, it is necessary to save/restore X0 in the callee. Tail
720 // call opt interferes with this. So we disable tail call opt when the
721 // caller has an argument with "inreg" attribute.
723 // FIXME: Check whether the callee also has an "inreg" argument.
725 // When the caller has a swifterror argument, we don't want to tail call
726 // because would have to move into the swifterror register before the
728 if (any_of(CallerF.args(), [](const Argument &A) {
729 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
731 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
732 "inreg, or swifterror arguments\n");
736 // Externally-defined functions with weak linkage should not be
737 // tail-called on AArch64 when the OS does not support dynamic
738 // pre-emption of symbols, as the AAELF spec requires normal calls
739 // to undefined weak functions to be replaced with a NOP or jump to the
740 // next instruction. The behaviour of branch instructions in this
741 // situation (as used for tail calls) is implementation-defined, so we
742 // cannot rely on the linker replacing the tail call with a return.
743 if (Info.Callee.isGlobal()) {
744 const GlobalValue *GV = Info.Callee.getGlobal();
745 const Triple &TT = MF.getTarget().getTargetTriple();
746 if (GV->hasExternalWeakLinkage() &&
747 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
748 TT.isOSBinFormatMachO())) {
749 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
750 "with weak linkage for this OS.\n");
755 // If we have -tailcallopt, then we're done.
756 if (MF.getTarget().Options.GuaranteedTailCallOpt)
757 return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv();
759 // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
760 // Try to find cases where we can do that.
762 // I want anyone implementing a new calling convention to think long and hard
763 // about this assert.
764 assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&
765 "Unexpected variadic calling convention");
767 // Verify that the incoming and outgoing arguments from the callee are
768 // safe to tail call.
769 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
772 << "... Caller and callee have incompatible calling conventions.\n");
776 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
780 dbgs() << "... Call is eligible for tail call optimization.\n");
784 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
787 return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
790 return AArch64::TCRETURNdi;
792 // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
794 if (CallerF.getFunction().hasFnAttribute("branch-target-enforcement"))
795 return AArch64::TCRETURNriBTI;
797 return AArch64::TCRETURNri;
800 bool AArch64CallLowering::lowerTailCall(
801 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
802 SmallVectorImpl<ArgInfo> &OutArgs) const {
803 MachineFunction &MF = MIRBuilder.getMF();
804 const Function &F = MF.getFunction();
805 MachineRegisterInfo &MRI = MF.getRegInfo();
806 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
807 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
809 // True when we're tail calling, but without -tailcallopt.
810 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
812 // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
813 // register class. Until we can do that, we should fall back here.
814 if (F.hasFnAttribute("branch-target-enforcement")) {
816 dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
820 // Find out which ABI gets to decide where things go.
821 CallingConv::ID CalleeCC = Info.CallConv;
822 CCAssignFn *AssignFnFixed;
823 CCAssignFn *AssignFnVarArg;
824 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
826 MachineInstrBuilder CallSeqStart;
828 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
830 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
831 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
832 MIB.add(Info.Callee);
834 // Byte offset for the tail call. When we are sibcalling, this will always
838 // Tell the call which registers are clobbered.
839 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
840 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
841 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
842 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
843 MIB.addRegMask(Mask);
845 if (TRI->isAnyArgRegReserved(MF))
846 TRI->emitReservedArgRegCallError(MF);
848 // FPDiff is the byte offset of the call's argument area from the callee's.
849 // Stores to callee stack arguments will be placed in FixedStackSlots offset
850 // by this amount for a tail call. In a sibling call it must be 0 because the
851 // caller will deallocate the entire stack and the callee still expects its
852 // arguments to begin at SP+0.
855 // This will be 0 for sibcalls, potentially nonzero for tail calls produced
856 // by -tailcallopt. For sibcalls, the memory operands for the call are
857 // already available in the caller's incoming argument space.
858 unsigned NumBytes = 0;
860 // We aren't sibcalling, so we need to compute FPDiff. We need to do this
861 // before handling assignments, because FPDiff must be known for memory
863 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
864 SmallVector<CCValAssign, 16> OutLocs;
865 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
866 analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg);
868 // The callee will pop the argument stack as a tail call. Thus, we must
869 // keep it 16-byte aligned.
870 NumBytes = alignTo(OutInfo.getNextStackOffset(), 16);
872 // FPDiff will be negative if this tail call requires more space than we
873 // would automatically have in our incoming argument space. Positive if we
874 // actually shrink the stack.
875 FPDiff = NumReusableBytes - NumBytes;
877 // The stack pointer must be 16-byte aligned at all times it's used for a
878 // memory operation, which in practice means at *all* times and in
879 // particular across call boundaries. Therefore our own arguments started at
880 // a 16-byte aligned SP and the delta applied for the tail call should
881 // satisfy the same constraint.
882 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
885 const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
887 // Do the actual argument marshalling.
888 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
889 AssignFnVarArg, true, FPDiff);
890 if (!handleAssignments(MIRBuilder, OutArgs, Handler))
893 if (Info.IsVarArg && Info.IsMustTailCall) {
894 // Now we know what's being passed to the function. Add uses to the call for
895 // the forwarded registers that we *aren't* passing as parameters. This will
896 // preserve the copies we build earlier.
897 for (const auto &F : Forwards) {
898 Register ForwardedReg = F.PReg;
899 // If the register is already passed, or aliases a register which is
900 // already being passed, then skip it.
901 if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
904 return TRI->regsOverlap(Use.getReg(), ForwardedReg);
908 // We aren't passing it already, so we should add it to the call.
909 MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
910 MIB.addReg(ForwardedReg, RegState::Implicit);
914 // If we have -tailcallopt, we need to adjust the stack. We'll do the call
915 // sequence start and end here.
917 MIB->getOperand(1).setImm(FPDiff);
918 CallSeqStart.addImm(NumBytes).addImm(0);
919 // End the call sequence *before* emitting the call. Normally, we would
920 // tidy the frame up after the call. However, here, we've laid out the
921 // parameters so that when SP is reset, they will be in the correct
923 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(NumBytes).addImm(0);
926 // Now we can add the actual call instruction to the correct basic block.
927 MIRBuilder.insertInstr(MIB);
929 // If Callee is a reg, since it is used by a target specific instruction,
930 // it must have a register class matching the constraint of that instruction.
931 if (Info.Callee.isReg())
932 MIB->getOperand(0).setReg(constrainOperandRegClass(
933 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
934 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
937 MF.getFrameInfo().setHasTailCall();
938 Info.LoweredTailCall = true;
942 bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
943 CallLoweringInfo &Info) const {
944 MachineFunction &MF = MIRBuilder.getMF();
945 const Function &F = MF.getFunction();
946 MachineRegisterInfo &MRI = MF.getRegInfo();
947 auto &DL = F.getParent()->getDataLayout();
948 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
950 SmallVector<ArgInfo, 8> OutArgs;
951 for (auto &OrigArg : Info.OrigArgs) {
952 splitToValueTypes(OrigArg, OutArgs, DL, MRI, Info.CallConv);
953 // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
954 if (OrigArg.Ty->isIntegerTy(1))
955 OutArgs.back().Flags[0].setZExt();
958 SmallVector<ArgInfo, 8> InArgs;
959 if (!Info.OrigRet.Ty->isVoidTy())
960 splitToValueTypes(Info.OrigRet, InArgs, DL, MRI, F.getCallingConv());
962 // If we can lower as a tail call, do that instead.
963 bool CanTailCallOpt =
964 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
966 // We must emit a tail call if we have musttail.
967 if (Info.IsMustTailCall && !CanTailCallOpt) {
968 // There are types of incoming/outgoing arguments we can't handle yet, so
969 // it doesn't make sense to actually die here like in ISelLowering. Instead,
970 // fall back to SelectionDAG and let it try to handle this.
971 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
976 return lowerTailCall(MIRBuilder, Info, OutArgs);
978 // Find out which ABI gets to decide where things go.
979 CCAssignFn *AssignFnFixed;
980 CCAssignFn *AssignFnVarArg;
981 std::tie(AssignFnFixed, AssignFnVarArg) =
982 getAssignFnsForCC(Info.CallConv, TLI);
984 MachineInstrBuilder CallSeqStart;
985 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
987 // Create a temporarily-floating call instruction so we can add the implicit
988 // uses of arg registers.
989 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
991 auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
992 MIB.add(Info.Callee);
994 // Tell the call which registers are clobbered.
995 auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
996 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
997 if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
998 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
999 MIB.addRegMask(Mask);
1001 if (TRI->isAnyArgRegReserved(MF))
1002 TRI->emitReservedArgRegCallError(MF);
1004 // Do the actual argument marshalling.
1005 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
1006 AssignFnVarArg, false);
1007 if (!handleAssignments(MIRBuilder, OutArgs, Handler))
1010 // Now we can add the actual call instruction to the correct basic block.
1011 MIRBuilder.insertInstr(MIB);
1013 // If Callee is a reg, since it is used by a target specific
1014 // instruction, it must have a register class matching the
1015 // constraint of that instruction.
1016 if (Info.Callee.isReg())
1017 MIB->getOperand(0).setReg(constrainOperandRegClass(
1018 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1019 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
1022 // Finally we can copy the returned value back into its virtual-register. In
1023 // symmetry with the arguments, the physical register must be an
1024 // implicit-define of the call instruction.
1025 if (!Info.OrigRet.Ty->isVoidTy()) {
1026 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1027 CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
1028 if (!handleAssignments(MIRBuilder, InArgs, Handler))
1032 if (Info.SwiftErrorVReg) {
1033 MIB.addDef(AArch64::X21, RegState::Implicit);
1034 MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1037 uint64_t CalleePopBytes =
1038 doesCalleeRestoreStack(Info.CallConv,
1039 MF.getTarget().Options.GuaranteedTailCallOpt)
1040 ? alignTo(Handler.StackSize, 16)
1043 CallSeqStart.addImm(Handler.StackSize).addImm(0);
1044 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1045 .addImm(Handler.StackSize)
1046 .addImm(CalleePopBytes);