1 //===- AArch64RegisterBankInfo.cpp -------------------------------*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the targeting of the RegisterBankInfo class for
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64InstrInfo.h" // For XXXRegClassID.
17 #include "llvm/CodeGen/LowLevelType.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
20 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetSubtargetInfo.h"
24 // This file will be TableGen'ed at some point.
25 #include "AArch64GenRegisterBankInfo.def"
29 #ifndef LLVM_BUILD_GLOBAL_ISEL
30 #error "You shouldn't build this"
33 AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
34 : RegisterBankInfo(AArch64::RegBanks, AArch64::NumRegisterBanks) {
35 static bool AlreadyInit = false;
36 // We have only one set of register banks, whatever the subtarget
37 // is. Therefore, the initialization of the RegBanks table should be
38 // done only once. Indeed the table of all register banks
39 // (AArch64::RegBanks) is unique in the compiler. At some point, it
40 // will get tablegen'ed and the whole constructor becomes empty.
44 // Initialize the GPR bank.
45 createRegisterBank(AArch64::GPRRegBankID, "GPR");
46 // The GPR register bank is fully defined by all the registers in
47 // GR64all + its subclasses.
48 addRegBankCoverage(AArch64::GPRRegBankID, AArch64::GPR64allRegClassID, TRI);
49 const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
51 assert(&AArch64::GPRRegBank == &RBGPR &&
52 "The order in RegBanks is messed up");
53 assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
54 "Subclass not added?");
55 assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
57 // Initialize the FPR bank.
58 createRegisterBank(AArch64::FPRRegBankID, "FPR");
59 // The FPR register bank is fully defined by all the registers in
60 // GR64all + its subclasses.
61 addRegBankCoverage(AArch64::FPRRegBankID, AArch64::QQQQRegClassID, TRI);
62 const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
64 assert(&AArch64::FPRRegBank == &RBFPR &&
65 "The order in RegBanks is messed up");
66 assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
67 "Subclass not added?");
68 assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
69 "Subclass not added?");
70 assert(RBFPR.getSize() == 512 &&
71 "FPRs should hold up to 512-bit via QQQQ sequence");
73 // Initialize the CCR bank.
74 createRegisterBank(AArch64::CCRRegBankID, "CCR");
75 addRegBankCoverage(AArch64::CCRRegBankID, AArch64::CCRRegClassID, TRI);
76 const RegisterBank &RBCCR = getRegBank(AArch64::CCRRegBankID);
78 assert(&AArch64::CCRRegBank == &RBCCR &&
79 "The order in RegBanks is messed up");
80 assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
82 assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");
84 // Check that the TableGen'ed like file is in sync we our expectations.
86 assert(AArch64::PartialMappingIdx::PMI_GPR32 ==
87 AArch64::PartialMappingIdx::PMI_FirstGPR &&
88 "GPR32 index not first in the GPR list");
89 assert(AArch64::PartialMappingIdx::PMI_GPR64 ==
90 AArch64::PartialMappingIdx::PMI_LastGPR &&
91 "GPR64 index not last in the GPR list");
92 assert(AArch64::PartialMappingIdx::PMI_FirstGPR <=
93 AArch64::PartialMappingIdx::PMI_LastGPR &&
94 "GPR list is backward");
95 assert(AArch64::PartialMappingIdx::PMI_FPR32 ==
96 AArch64::PartialMappingIdx::PMI_FirstFPR &&
97 "FPR32 index not first in the FPR list");
98 assert(AArch64::PartialMappingIdx::PMI_FPR512 ==
99 AArch64::PartialMappingIdx::PMI_LastFPR &&
100 "FPR512 index not last in the FPR list");
101 assert(AArch64::PartialMappingIdx::PMI_FirstFPR <=
102 AArch64::PartialMappingIdx::PMI_LastFPR &&
103 "FPR list is backward");
104 assert(AArch64::PartialMappingIdx::PMI_FPR32 + 1 ==
105 AArch64::PartialMappingIdx::PMI_FPR64 &&
106 AArch64::PartialMappingIdx::PMI_FPR64 + 1 ==
107 AArch64::PartialMappingIdx::PMI_FPR128 &&
108 AArch64::PartialMappingIdx::PMI_FPR128 + 1 ==
109 AArch64::PartialMappingIdx::PMI_FPR256 &&
110 AArch64::PartialMappingIdx::PMI_FPR256 + 1 ==
111 AArch64::PartialMappingIdx::PMI_FPR512 &&
112 "FPR indices not properly ordered");
114 // Check partial mapping.
115 #define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
117 const PartialMapping &Map = \
118 AArch64::PartMappings[AArch64::PartialMappingIdx::Idx - \
119 AArch64::PartialMappingIdx::PMI_Min]; \
121 assert(Map.StartIdx == ValStartIdx && Map.Length == ValLength && \
122 Map.RegBank == &RB && #Idx " is incorrectly initialized"); \
125 CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
126 CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
127 CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
128 CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
129 CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
130 CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
131 CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
133 // Check value mapping.
134 #define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
136 unsigned PartialMapBaseIdx = \
137 AArch64::PartialMappingIdx::PMI_##RBName##Size - \
138 AArch64::PartialMappingIdx::PMI_Min; \
139 (void)PartialMapBaseIdx; \
140 const ValueMapping &Map = AArch64::getValueMapping( \
141 AArch64::PartialMappingIdx::PMI_First##RBName, Size)[Offset]; \
143 assert(Map.BreakDown == &AArch64::PartMappings[PartialMapBaseIdx] && \
144 Map.NumBreakDowns == 1 && #RBName #Size \
145 " " #Offset " is incorrectly initialized"); \
148 #define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
150 CHECK_VALUEMAP(GPR, 32);
151 CHECK_VALUEMAP(GPR, 64);
152 CHECK_VALUEMAP(FPR, 32);
153 CHECK_VALUEMAP(FPR, 64);
154 CHECK_VALUEMAP(FPR, 128);
155 CHECK_VALUEMAP(FPR, 256);
156 CHECK_VALUEMAP(FPR, 512);
158 // Check the value mapping for 3-operands instructions where all the operands
159 // map to the same value mapping.
160 #define CHECK_VALUEMAP_3OPS(RBName, Size) \
162 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
163 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
164 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
167 CHECK_VALUEMAP_3OPS(GPR, 32);
168 CHECK_VALUEMAP_3OPS(GPR, 64);
169 CHECK_VALUEMAP_3OPS(FPR, 32);
170 CHECK_VALUEMAP_3OPS(FPR, 64);
171 CHECK_VALUEMAP_3OPS(FPR, 128);
172 CHECK_VALUEMAP_3OPS(FPR, 256);
173 CHECK_VALUEMAP_3OPS(FPR, 512);
175 #define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
177 unsigned PartialMapDstIdx = \
178 AArch64::PMI_##RBNameDst##Size - AArch64::PMI_Min; \
179 unsigned PartialMapSrcIdx = \
180 AArch64::PMI_##RBNameSrc##Size - AArch64::PMI_Min; \
181 (void) PartialMapDstIdx; \
182 (void) PartialMapSrcIdx; \
183 const ValueMapping *Map = AArch64::getCopyMapping( \
184 AArch64::PMI_First##RBNameDst == AArch64::PMI_FirstGPR, \
185 AArch64::PMI_First##RBNameSrc == AArch64::PMI_FirstGPR, Size); \
187 assert(Map[0].BreakDown == &AArch64::PartMappings[PartialMapDstIdx] && \
188 Map[0].NumBreakDowns == 1 && #RBNameDst #Size \
189 " Dst is incorrectly initialized"); \
190 assert(Map[1].BreakDown == &AArch64::PartMappings[PartialMapSrcIdx] && \
191 Map[1].NumBreakDowns == 1 && #RBNameSrc #Size \
192 " Src is incorrectly initialized"); \
196 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
197 CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 32);
198 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
199 CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 64);
200 CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 32);
201 CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 32);
202 CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 64);
203 CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 64);
205 assert(verify(TRI) && "Invalid register bank information");
208 unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
209 const RegisterBank &B,
210 unsigned Size) const {
211 // What do we do with different size?
212 // copy are same size.
213 // Will introduce other hooks for different size:
215 // * build_sequence cost.
217 // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
218 // FIXME: This should be deduced from the scheduling model.
219 if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
220 // FMOVXDr or FMOVWSr.
222 if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
223 // FMOVDXr or FMOVSWr.
226 return RegisterBankInfo::copyCost(A, B, Size);
229 const RegisterBank &AArch64RegisterBankInfo::getRegBankFromRegClass(
230 const TargetRegisterClass &RC) const {
231 switch (RC.getID()) {
232 case AArch64::FPR8RegClassID:
233 case AArch64::FPR16RegClassID:
234 case AArch64::FPR32RegClassID:
235 case AArch64::FPR64RegClassID:
236 case AArch64::FPR128RegClassID:
237 case AArch64::FPR128_loRegClassID:
238 case AArch64::DDRegClassID:
239 case AArch64::DDDRegClassID:
240 case AArch64::DDDDRegClassID:
241 case AArch64::QQRegClassID:
242 case AArch64::QQQRegClassID:
243 case AArch64::QQQQRegClassID:
244 return getRegBank(AArch64::FPRRegBankID);
245 case AArch64::GPR32commonRegClassID:
246 case AArch64::GPR32RegClassID:
247 case AArch64::GPR32spRegClassID:
248 case AArch64::GPR32sponlyRegClassID:
249 case AArch64::GPR32allRegClassID:
250 case AArch64::GPR64commonRegClassID:
251 case AArch64::GPR64RegClassID:
252 case AArch64::GPR64spRegClassID:
253 case AArch64::GPR64sponlyRegClassID:
254 case AArch64::GPR64allRegClassID:
255 case AArch64::tcGPR64RegClassID:
256 case AArch64::WSeqPairsClassRegClassID:
257 case AArch64::XSeqPairsClassRegClassID:
258 return getRegBank(AArch64::GPRRegBankID);
259 case AArch64::CCRRegClassID:
260 return getRegBank(AArch64::CCRRegBankID);
262 llvm_unreachable("Register class not supported");
266 RegisterBankInfo::InstructionMappings
267 AArch64RegisterBankInfo::getInstrAlternativeMappings(
268 const MachineInstr &MI) const {
269 const MachineFunction &MF = *MI.getParent()->getParent();
270 const TargetSubtargetInfo &STI = MF.getSubtarget();
271 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
272 const MachineRegisterInfo &MRI = MF.getRegInfo();
274 switch (MI.getOpcode()) {
275 case TargetOpcode::G_OR: {
276 // 32 and 64-bit or can be mapped on either FPR or
277 // GPR for the same cost.
278 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
279 if (Size != 32 && Size != 64)
282 // If the instruction has any implicit-defs or uses,
283 // do not mess with it.
284 if (MI.getNumOperands() != 3)
286 InstructionMappings AltMappings;
287 InstructionMapping GPRMapping(
288 /*ID*/ 1, /*Cost*/ 1,
289 AArch64::getValueMapping(AArch64::PMI_FirstGPR, Size),
291 InstructionMapping FPRMapping(
292 /*ID*/ 2, /*Cost*/ 1,
293 AArch64::getValueMapping(AArch64::PMI_FirstFPR, Size),
296 AltMappings.emplace_back(std::move(GPRMapping));
297 AltMappings.emplace_back(std::move(FPRMapping));
300 case TargetOpcode::G_BITCAST: {
301 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
302 if (Size != 32 && Size != 64)
305 // If the instruction has any implicit-defs or uses,
306 // do not mess with it.
307 if (MI.getNumOperands() != 2)
310 InstructionMappings AltMappings;
311 InstructionMapping GPRMapping(
312 /*ID*/ 1, /*Cost*/ 1,
313 AArch64::getCopyMapping(/*DstIsGPR*/ true, /*SrcIsGPR*/ true, Size),
315 InstructionMapping FPRMapping(
316 /*ID*/ 2, /*Cost*/ 1,
317 AArch64::getCopyMapping(/*DstIsGPR*/ false, /*SrcIsGPR*/ false, Size),
319 InstructionMapping GPRToFPRMapping(
321 /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
322 AArch64::getCopyMapping(/*DstIsGPR*/ false, /*SrcIsGPR*/ true, Size),
324 InstructionMapping FPRToGPRMapping(
326 /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
327 AArch64::getCopyMapping(/*DstIsGPR*/ true, /*SrcIsGPR*/ false, Size),
330 AltMappings.emplace_back(std::move(GPRMapping));
331 AltMappings.emplace_back(std::move(FPRMapping));
332 AltMappings.emplace_back(std::move(GPRToFPRMapping));
333 AltMappings.emplace_back(std::move(FPRToGPRMapping));
336 case TargetOpcode::G_LOAD: {
337 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
341 // If the instruction has any implicit-defs or uses,
342 // do not mess with it.
343 if (MI.getNumOperands() != 2)
346 InstructionMappings AltMappings;
347 InstructionMapping GPRMapping(
348 /*ID*/ 1, /*Cost*/ 1,
350 {AArch64::getValueMapping(AArch64::PMI_FirstGPR, Size),
351 // Addresses are GPR 64-bit.
352 AArch64::getValueMapping(AArch64::PMI_FirstGPR, 64)}),
354 InstructionMapping FPRMapping(
355 /*ID*/ 2, /*Cost*/ 1,
357 {AArch64::getValueMapping(AArch64::PMI_FirstFPR, Size),
358 // Addresses are GPR 64-bit.
359 AArch64::getValueMapping(AArch64::PMI_FirstGPR, 64)}),
362 AltMappings.emplace_back(std::move(GPRMapping));
363 AltMappings.emplace_back(std::move(FPRMapping));
369 return RegisterBankInfo::getInstrAlternativeMappings(MI);
372 void AArch64RegisterBankInfo::applyMappingImpl(
373 const OperandsMapper &OpdMapper) const {
374 switch (OpdMapper.getMI().getOpcode()) {
375 case TargetOpcode::G_OR:
376 case TargetOpcode::G_BITCAST:
377 case TargetOpcode::G_LOAD: {
378 // Those ID must match getInstrAlternativeMappings.
379 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
380 OpdMapper.getInstrMapping().getID() <= 4) &&
381 "Don't know how to handle that ID");
382 return applyDefaultMapping(OpdMapper);
385 llvm_unreachable("Don't know how to handle that operation");
389 /// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
390 /// having only floating-point operands.
391 static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
393 case TargetOpcode::G_FADD:
394 case TargetOpcode::G_FSUB:
395 case TargetOpcode::G_FMUL:
396 case TargetOpcode::G_FDIV:
397 case TargetOpcode::G_FCONSTANT:
398 case TargetOpcode::G_FPEXT:
399 case TargetOpcode::G_FPTRUNC:
405 RegisterBankInfo::InstructionMapping
406 AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
407 const unsigned Opc = MI.getOpcode();
408 const MachineFunction &MF = *MI.getParent()->getParent();
409 const MachineRegisterInfo &MRI = MF.getRegInfo();
411 unsigned NumOperands = MI.getNumOperands();
412 assert(NumOperands <= 3 &&
413 "This code is for instructions with 3 or less operands");
415 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
416 unsigned Size = Ty.getSizeInBits();
417 bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
420 // Make sure all the operands are using similar size and type.
421 // Should probably be checked by the machine verifier.
422 // This code won't catch cases where the number of lanes is
423 // different between the operands.
424 // If we want to go to that level of details, it is probably
425 // best to check that the types are the same, period.
426 // Currently, we just check that the register banks are the same
428 for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
429 LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
430 assert(AArch64::getRegBankBaseIdxOffset(OpTy.getSizeInBits()) ==
431 AArch64::getRegBankBaseIdxOffset(Size) &&
432 "Operand has incompatible size");
433 bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
435 assert(IsFPR == OpIsFPR && "Operand has incompatible type");
437 #endif // End NDEBUG.
439 AArch64::PartialMappingIdx RBIdx =
440 IsFPR ? AArch64::PMI_FirstFPR : AArch64::PMI_FirstGPR;
442 return InstructionMapping{DefaultMappingID, 1,
443 AArch64::getValueMapping(RBIdx, Size), NumOperands};
446 RegisterBankInfo::InstructionMapping
447 AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
448 const unsigned Opc = MI.getOpcode();
449 const MachineFunction &MF = *MI.getParent()->getParent();
450 const MachineRegisterInfo &MRI = MF.getRegInfo();
452 // Try the default logic for non-generic instructions that are either copies
453 // or already have some operands assigned to banks.
454 if (!isPreISelGenericOpcode(Opc)) {
455 RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
456 if (Mapping.isValid())
461 // G_{F|S|U}REM are not listed because they are not legal.
463 case TargetOpcode::G_ADD:
464 case TargetOpcode::G_SUB:
465 case TargetOpcode::G_GEP:
466 case TargetOpcode::G_MUL:
467 case TargetOpcode::G_SDIV:
468 case TargetOpcode::G_UDIV:
470 case TargetOpcode::G_AND:
471 case TargetOpcode::G_OR:
472 case TargetOpcode::G_XOR:
474 case TargetOpcode::G_SHL:
475 case TargetOpcode::G_LSHR:
476 case TargetOpcode::G_ASHR:
477 // Floating point ops.
478 case TargetOpcode::G_FADD:
479 case TargetOpcode::G_FSUB:
480 case TargetOpcode::G_FMUL:
481 case TargetOpcode::G_FDIV:
482 return getSameKindOfOperandsMapping(MI);
483 case TargetOpcode::G_BITCAST: {
484 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
485 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
486 unsigned Size = DstTy.getSizeInBits();
487 bool DstIsGPR = !DstTy.isVector();
488 bool SrcIsGPR = !SrcTy.isVector();
489 const RegisterBank &DstRB =
490 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
491 const RegisterBank &SrcRB =
492 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
493 return InstructionMapping{DefaultMappingID, copyCost(DstRB, SrcRB, Size),
494 AArch64::getCopyMapping(DstIsGPR, SrcIsGPR, Size),
497 case TargetOpcode::G_SEQUENCE:
498 // FIXME: support this, but the generic code is really not going to do
500 return InstructionMapping();
505 unsigned NumOperands = MI.getNumOperands();
507 // Track the size and bank of each register. We don't do partial mappings.
508 SmallVector<unsigned, 4> OpSize(NumOperands);
509 SmallVector<AArch64::PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
510 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
511 auto &MO = MI.getOperand(Idx);
515 LLT Ty = MRI.getType(MO.getReg());
516 OpSize[Idx] = Ty.getSizeInBits();
518 // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
519 // For floating-point instructions, scalars go in FPRs.
520 if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc))
521 OpRegBankIdx[Idx] = AArch64::PMI_FirstFPR;
523 OpRegBankIdx[Idx] = AArch64::PMI_FirstGPR;
527 // Some of the floating-point instructions have mixed GPR and FPR operands:
528 // fine-tune the computed mapping.
530 case TargetOpcode::G_SITOFP:
531 case TargetOpcode::G_UITOFP: {
532 OpRegBankIdx = {AArch64::PMI_FirstFPR, AArch64::PMI_FirstGPR};
535 case TargetOpcode::G_FPTOSI:
536 case TargetOpcode::G_FPTOUI: {
537 OpRegBankIdx = {AArch64::PMI_FirstGPR, AArch64::PMI_FirstFPR};
540 case TargetOpcode::G_FCMP: {
541 OpRegBankIdx = {AArch64::PMI_FirstGPR,
542 /* Predicate */ AArch64::PMI_None, AArch64::PMI_FirstFPR,
543 AArch64::PMI_FirstFPR};
546 case TargetOpcode::G_BITCAST: {
547 // This is going to be a cross register bank copy and this is expensive.
548 if (OpRegBankIdx[0] != OpRegBankIdx[1])
550 copyCost(*AArch64::PartMappings[OpRegBankIdx[0]].RegBank,
551 *AArch64::PartMappings[OpRegBankIdx[1]].RegBank, OpSize[0]);
554 case TargetOpcode::G_LOAD: {
555 // Loading in vector unit is slightly more expensive.
556 // This is actually only true for the LD1R and co instructions,
557 // but anyway for the fast mode this number does not matter and
558 // for the greedy mode the cost of the cross bank copy will
559 // offset this number.
560 // FIXME: Should be derived from the scheduling model.
561 if (OpRegBankIdx[0] >= AArch64::PMI_FirstFPR)
566 // Finally construct the computed mapping.
567 RegisterBankInfo::InstructionMapping Mapping =
568 InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
569 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
570 for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
571 if (MI.getOperand(Idx).isReg())
573 AArch64::getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
575 Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));