1 //=== X86CallingConv.cpp - X86 Custom Calling Convention Impl -*- C++ -*-===//
\r
3 // The LLVM Compiler Infrastructure
\r
5 // This file is distributed under the University of Illinois Open Source
\r
6 // License. See LICENSE.TXT for details.
\r
8 //===----------------------------------------------------------------------===//
\r
10 // This file contains the implementation of custom routines for the X86
\r
11 // Calling Convention that aren't done by tablegen.
\r
13 //===----------------------------------------------------------------------===//
\r
15 #include "MCTargetDesc/X86MCTargetDesc.h"
\r
16 #include "X86Subtarget.h"
\r
17 #include "llvm/CodeGen/CallingConvLower.h"
\r
18 #include "llvm/IR/CallingConv.h"
\r
22 bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
\r
23 CCValAssign::LocInfo &LocInfo,
\r
24 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
\r
25 // List of GPR registers that are available to store values in regcall
\r
26 // calling convention.
\r
27 static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI,
\r
30 // The vector will save all the available registers for allocation.
\r
31 SmallVector<unsigned, 5> AvailableRegs;
\r
33 // searching for the available registers.
\r
34 for (auto Reg : RegList) {
\r
35 if (!State.isAllocated(Reg))
\r
36 AvailableRegs.push_back(Reg);
\r
39 const size_t RequiredGprsUponSplit = 2;
\r
40 if (AvailableRegs.size() < RequiredGprsUponSplit)
\r
41 return false; // Not enough free registers - continue the search.
\r
43 // Allocating the available registers.
\r
44 for (unsigned I = 0; I < RequiredGprsUponSplit; I++) {
\r
46 // Marking the register as located.
\r
47 unsigned Reg = State.AllocateReg(AvailableRegs[I]);
\r
49 // Since we previously made sure that 2 registers are available
\r
50 // we expect that a real register number will be returned.
\r
51 assert(Reg && "Expecting a register will be available");
\r
53 // Assign the value to the allocated register
\r
54 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
\r
57 // Successful in allocating regsiters - stop scanning next rules.
\r
61 static ArrayRef<MCPhysReg> CC_X86_VectorCallGetSSEs(const MVT &ValVT) {
\r
62 if (ValVT.is512BitVector()) {
\r
63 static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2,
\r
64 X86::ZMM3, X86::ZMM4, X86::ZMM5};
\r
65 return makeArrayRef(std::begin(RegListZMM), std::end(RegListZMM));
\r
68 if (ValVT.is256BitVector()) {
\r
69 static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2,
\r
70 X86::YMM3, X86::YMM4, X86::YMM5};
\r
71 return makeArrayRef(std::begin(RegListYMM), std::end(RegListYMM));
\r
74 static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2,
\r
75 X86::XMM3, X86::XMM4, X86::XMM5};
\r
76 return makeArrayRef(std::begin(RegListXMM), std::end(RegListXMM));
\r
79 static ArrayRef<MCPhysReg> CC_X86_64_VectorCallGetGPRs() {
\r
80 static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9};
\r
81 return makeArrayRef(std::begin(RegListGPR), std::end(RegListGPR));
\r
84 static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT,
\r
86 CCValAssign::LocInfo &LocInfo,
\r
87 ISD::ArgFlagsTy &ArgFlags,
\r
90 ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT);
\r
91 bool Is64bit = static_cast<const X86Subtarget &>(
\r
92 State.getMachineFunction().getSubtarget())
\r
95 for (auto Reg : RegList) {
\r
96 // If the register is not marked as allocated - assign to it.
\r
97 if (!State.isAllocated(Reg)) {
\r
98 unsigned AssigedReg = State.AllocateReg(Reg);
\r
99 assert(AssigedReg == Reg && "Expecting a valid register allocation");
\r
101 CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo));
\r
104 // If the register is marked as shadow allocated - assign to it.
\r
105 if (Is64bit && State.IsShadowAllocatedReg(Reg)) {
\r
106 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
\r
111 llvm_unreachable("Clang should ensure that hva marked vectors will have "
\r
112 "an available register.");
\r
116 bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
\r
117 CCValAssign::LocInfo &LocInfo,
\r
118 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
\r
119 // On the second pass, go through the HVAs only.
\r
120 if (ArgFlags.isSecArgPass()) {
\r
121 if (ArgFlags.isHva())
\r
122 return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
\r
127 // Process only vector types as defined by vectorcall spec:
\r
128 // "A vector type is either a floating-point type, for example,
\r
129 // a float or double, or an SIMD vector type, for example, __m128 or __m256".
\r
130 if (!(ValVT.isFloatingPoint() ||
\r
131 (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
\r
132 // If R9 was already assigned it means that we are after the fourth element
\r
133 // and because this is not an HVA / Vector type, we need to allocate
\r
134 // shadow XMM register.
\r
135 if (State.isAllocated(X86::R9)) {
\r
136 // Assign shadow XMM register.
\r
137 (void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT));
\r
143 if (!ArgFlags.isHva() || ArgFlags.isHvaStart()) {
\r
144 // Assign shadow GPR register.
\r
145 (void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs());
\r
147 // Assign XMM register - (shadow for HVA and non-shadow for non HVA).
\r
148 if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
\r
149 // In Vectorcall Calling convention, additional shadow stack can be
\r
150 // created on top of the basic 32 bytes of win64.
\r
151 // It can happen if the fifth or sixth argument is vector type or HVA.
\r
152 // At that case for each argument a shadow stack of 8 bytes is allocated.
\r
153 if (Reg == X86::XMM4 || Reg == X86::XMM5)
\r
154 State.AllocateStack(8, 8);
\r
156 if (!ArgFlags.isHva()) {
\r
157 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
\r
158 return true; // Allocated a register - Stop the search.
\r
163 // If this is an HVA - Stop the search,
\r
164 // otherwise continue the search.
\r
165 return ArgFlags.isHva();
\r
168 bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
\r
169 CCValAssign::LocInfo &LocInfo,
\r
170 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
\r
171 // On the second pass, go through the HVAs only.
\r
172 if (ArgFlags.isSecArgPass()) {
\r
173 if (ArgFlags.isHva())
\r
174 return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
\r
179 // Process only vector types as defined by vectorcall spec:
\r
180 // "A vector type is either a floating point type, for example,
\r
181 // a float or double, or an SIMD vector type, for example, __m128 or __m256".
\r
182 if (!(ValVT.isFloatingPoint() ||
\r
183 (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
\r
187 if (ArgFlags.isHva())
\r
188 return true; // If this is an HVA - Stop the search.
\r
190 // Assign XMM register.
\r
191 if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
\r
192 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
\r
196 // In case we did not find an available XMM register for a vector -
\r
197 // pass it indirectly.
\r
198 // It is similar to CCPassIndirect, with the addition of inreg.
\r
199 if (!ValVT.isFloatingPoint()) {
\r
201 LocInfo = CCValAssign::Indirect;
\r
202 ArgFlags.setInReg();
\r
205 return false; // No register was assigned - Continue the search.
\r
208 } // End llvm namespace
\r