1 //===- AArch64RegisterBankInfo.cpp -------------------------------*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the targeting of the RegisterBankInfo class for
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64InstrInfo.h" // For XXXRegClassID.
17 #include "llvm/CodeGen/LowLevelType.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
20 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetSubtargetInfo.h"
24 // This file will be TableGen'ed at some point.
25 #include "AArch64GenRegisterBankInfo.def"
29 #ifndef LLVM_BUILD_GLOBAL_ISEL
30 #error "You shouldn't build this"
33 AArch64RegisterBankInfo::AArch64RegisterBankInfo(const TargetRegisterInfo &TRI)
34 : RegisterBankInfo(AArch64::RegBanks, AArch64::NumRegisterBanks) {
35 static bool AlreadyInit = false;
36 // We have only one set of register banks, whatever the subtarget
37 // is. Therefore, the initialization of the RegBanks table should be
38 // done only once. Indeed the table of all register banks
39 // (AArch64::RegBanks) is unique in the compiler. At some point, it
40 // will get tablegen'ed and the whole constructor becomes empty.
45 const RegisterBank &RBGPR = getRegBank(AArch64::GPRRegBankID);
47 assert(&AArch64::GPRRegBank == &RBGPR &&
48 "The order in RegBanks is messed up");
50 const RegisterBank &RBFPR = getRegBank(AArch64::FPRRegBankID);
52 assert(&AArch64::FPRRegBank == &RBFPR &&
53 "The order in RegBanks is messed up");
55 const RegisterBank &RBCCR = getRegBank(AArch64::CCRRegBankID);
57 assert(&AArch64::CCRRegBank == &RBCCR &&
58 "The order in RegBanks is messed up");
60 // The GPR register bank is fully defined by all the registers in
61 // GR64all + its subclasses.
62 assert(RBGPR.covers(*TRI.getRegClass(AArch64::GPR32RegClassID)) &&
63 "Subclass not added?");
64 assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
66 // The FPR register bank is fully defined by all the registers in
67 // GR64all + its subclasses.
68 assert(RBFPR.covers(*TRI.getRegClass(AArch64::QQRegClassID)) &&
69 "Subclass not added?");
70 assert(RBFPR.covers(*TRI.getRegClass(AArch64::FPR64RegClassID)) &&
71 "Subclass not added?");
72 assert(RBFPR.getSize() == 512 &&
73 "FPRs should hold up to 512-bit via QQQQ sequence");
75 assert(RBCCR.covers(*TRI.getRegClass(AArch64::CCRRegClassID)) &&
77 assert(RBCCR.getSize() == 32 && "CCR should hold up to 32-bit");
79 // Check that the TableGen'ed like file is in sync we our expectations.
81 assert(AArch64::PartialMappingIdx::PMI_GPR32 ==
82 AArch64::PartialMappingIdx::PMI_FirstGPR &&
83 "GPR32 index not first in the GPR list");
84 assert(AArch64::PartialMappingIdx::PMI_GPR64 ==
85 AArch64::PartialMappingIdx::PMI_LastGPR &&
86 "GPR64 index not last in the GPR list");
87 assert(AArch64::PartialMappingIdx::PMI_FirstGPR <=
88 AArch64::PartialMappingIdx::PMI_LastGPR &&
89 "GPR list is backward");
90 assert(AArch64::PartialMappingIdx::PMI_FPR32 ==
91 AArch64::PartialMappingIdx::PMI_FirstFPR &&
92 "FPR32 index not first in the FPR list");
93 assert(AArch64::PartialMappingIdx::PMI_FPR512 ==
94 AArch64::PartialMappingIdx::PMI_LastFPR &&
95 "FPR512 index not last in the FPR list");
96 assert(AArch64::PartialMappingIdx::PMI_FirstFPR <=
97 AArch64::PartialMappingIdx::PMI_LastFPR &&
98 "FPR list is backward");
99 assert(AArch64::PartialMappingIdx::PMI_FPR32 + 1 ==
100 AArch64::PartialMappingIdx::PMI_FPR64 &&
101 AArch64::PartialMappingIdx::PMI_FPR64 + 1 ==
102 AArch64::PartialMappingIdx::PMI_FPR128 &&
103 AArch64::PartialMappingIdx::PMI_FPR128 + 1 ==
104 AArch64::PartialMappingIdx::PMI_FPR256 &&
105 AArch64::PartialMappingIdx::PMI_FPR256 + 1 ==
106 AArch64::PartialMappingIdx::PMI_FPR512 &&
107 "FPR indices not properly ordered");
109 // Check partial mapping.
110 #define CHECK_PARTIALMAP(Idx, ValStartIdx, ValLength, RB) \
112 const PartialMapping &Map = \
113 AArch64::PartMappings[AArch64::PartialMappingIdx::Idx - \
114 AArch64::PartialMappingIdx::PMI_Min]; \
116 assert(Map.StartIdx == ValStartIdx && Map.Length == ValLength && \
117 Map.RegBank == &RB && #Idx " is incorrectly initialized"); \
120 CHECK_PARTIALMAP(PMI_GPR32, 0, 32, RBGPR);
121 CHECK_PARTIALMAP(PMI_GPR64, 0, 64, RBGPR);
122 CHECK_PARTIALMAP(PMI_FPR32, 0, 32, RBFPR);
123 CHECK_PARTIALMAP(PMI_FPR64, 0, 64, RBFPR);
124 CHECK_PARTIALMAP(PMI_FPR128, 0, 128, RBFPR);
125 CHECK_PARTIALMAP(PMI_FPR256, 0, 256, RBFPR);
126 CHECK_PARTIALMAP(PMI_FPR512, 0, 512, RBFPR);
128 // Check value mapping.
129 #define CHECK_VALUEMAP_IMPL(RBName, Size, Offset) \
131 unsigned PartialMapBaseIdx = \
132 AArch64::PartialMappingIdx::PMI_##RBName##Size - \
133 AArch64::PartialMappingIdx::PMI_Min; \
134 (void)PartialMapBaseIdx; \
135 const ValueMapping &Map = AArch64::getValueMapping( \
136 AArch64::PartialMappingIdx::PMI_First##RBName, Size)[Offset]; \
138 assert(Map.BreakDown == &AArch64::PartMappings[PartialMapBaseIdx] && \
139 Map.NumBreakDowns == 1 && #RBName #Size \
140 " " #Offset " is incorrectly initialized"); \
143 #define CHECK_VALUEMAP(RBName, Size) CHECK_VALUEMAP_IMPL(RBName, Size, 0)
145 CHECK_VALUEMAP(GPR, 32);
146 CHECK_VALUEMAP(GPR, 64);
147 CHECK_VALUEMAP(FPR, 32);
148 CHECK_VALUEMAP(FPR, 64);
149 CHECK_VALUEMAP(FPR, 128);
150 CHECK_VALUEMAP(FPR, 256);
151 CHECK_VALUEMAP(FPR, 512);
153 // Check the value mapping for 3-operands instructions where all the operands
154 // map to the same value mapping.
155 #define CHECK_VALUEMAP_3OPS(RBName, Size) \
157 CHECK_VALUEMAP_IMPL(RBName, Size, 0); \
158 CHECK_VALUEMAP_IMPL(RBName, Size, 1); \
159 CHECK_VALUEMAP_IMPL(RBName, Size, 2); \
162 CHECK_VALUEMAP_3OPS(GPR, 32);
163 CHECK_VALUEMAP_3OPS(GPR, 64);
164 CHECK_VALUEMAP_3OPS(FPR, 32);
165 CHECK_VALUEMAP_3OPS(FPR, 64);
166 CHECK_VALUEMAP_3OPS(FPR, 128);
167 CHECK_VALUEMAP_3OPS(FPR, 256);
168 CHECK_VALUEMAP_3OPS(FPR, 512);
170 #define CHECK_VALUEMAP_CROSSREGCPY(RBNameDst, RBNameSrc, Size) \
172 unsigned PartialMapDstIdx = \
173 AArch64::PMI_##RBNameDst##Size - AArch64::PMI_Min; \
174 unsigned PartialMapSrcIdx = \
175 AArch64::PMI_##RBNameSrc##Size - AArch64::PMI_Min; \
176 (void) PartialMapDstIdx; \
177 (void) PartialMapSrcIdx; \
178 const ValueMapping *Map = AArch64::getCopyMapping( \
179 AArch64::PMI_First##RBNameDst == AArch64::PMI_FirstGPR, \
180 AArch64::PMI_First##RBNameSrc == AArch64::PMI_FirstGPR, Size); \
182 assert(Map[0].BreakDown == &AArch64::PartMappings[PartialMapDstIdx] && \
183 Map[0].NumBreakDowns == 1 && #RBNameDst #Size \
184 " Dst is incorrectly initialized"); \
185 assert(Map[1].BreakDown == &AArch64::PartMappings[PartialMapSrcIdx] && \
186 Map[1].NumBreakDowns == 1 && #RBNameSrc #Size \
187 " Src is incorrectly initialized"); \
191 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 32);
192 CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 32);
193 CHECK_VALUEMAP_CROSSREGCPY(GPR, GPR, 64);
194 CHECK_VALUEMAP_CROSSREGCPY(GPR, FPR, 64);
195 CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 32);
196 CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 32);
197 CHECK_VALUEMAP_CROSSREGCPY(FPR, FPR, 64);
198 CHECK_VALUEMAP_CROSSREGCPY(FPR, GPR, 64);
200 assert(verify(TRI) && "Invalid register bank information");
203 unsigned AArch64RegisterBankInfo::copyCost(const RegisterBank &A,
204 const RegisterBank &B,
205 unsigned Size) const {
206 // What do we do with different size?
207 // copy are same size.
208 // Will introduce other hooks for different size:
210 // * build_sequence cost.
212 // Copy from (resp. to) GPR to (resp. from) FPR involves FMOV.
213 // FIXME: This should be deduced from the scheduling model.
214 if (&A == &AArch64::GPRRegBank && &B == &AArch64::FPRRegBank)
215 // FMOVXDr or FMOVWSr.
217 if (&A == &AArch64::FPRRegBank && &B == &AArch64::GPRRegBank)
218 // FMOVDXr or FMOVSWr.
221 return RegisterBankInfo::copyCost(A, B, Size);
224 const RegisterBank &AArch64RegisterBankInfo::getRegBankFromRegClass(
225 const TargetRegisterClass &RC) const {
226 switch (RC.getID()) {
227 case AArch64::FPR8RegClassID:
228 case AArch64::FPR16RegClassID:
229 case AArch64::FPR32RegClassID:
230 case AArch64::FPR64RegClassID:
231 case AArch64::FPR128RegClassID:
232 case AArch64::FPR128_loRegClassID:
233 case AArch64::DDRegClassID:
234 case AArch64::DDDRegClassID:
235 case AArch64::DDDDRegClassID:
236 case AArch64::QQRegClassID:
237 case AArch64::QQQRegClassID:
238 case AArch64::QQQQRegClassID:
239 return getRegBank(AArch64::FPRRegBankID);
240 case AArch64::GPR32commonRegClassID:
241 case AArch64::GPR32RegClassID:
242 case AArch64::GPR32spRegClassID:
243 case AArch64::GPR32sponlyRegClassID:
244 case AArch64::GPR32allRegClassID:
245 case AArch64::GPR64commonRegClassID:
246 case AArch64::GPR64RegClassID:
247 case AArch64::GPR64spRegClassID:
248 case AArch64::GPR64sponlyRegClassID:
249 case AArch64::GPR64allRegClassID:
250 case AArch64::tcGPR64RegClassID:
251 case AArch64::WSeqPairsClassRegClassID:
252 case AArch64::XSeqPairsClassRegClassID:
253 return getRegBank(AArch64::GPRRegBankID);
254 case AArch64::CCRRegClassID:
255 return getRegBank(AArch64::CCRRegBankID);
257 llvm_unreachable("Register class not supported");
261 RegisterBankInfo::InstructionMappings
262 AArch64RegisterBankInfo::getInstrAlternativeMappings(
263 const MachineInstr &MI) const {
264 const MachineFunction &MF = *MI.getParent()->getParent();
265 const TargetSubtargetInfo &STI = MF.getSubtarget();
266 const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
267 const MachineRegisterInfo &MRI = MF.getRegInfo();
269 switch (MI.getOpcode()) {
270 case TargetOpcode::G_OR: {
271 // 32 and 64-bit or can be mapped on either FPR or
272 // GPR for the same cost.
273 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
274 if (Size != 32 && Size != 64)
277 // If the instruction has any implicit-defs or uses,
278 // do not mess with it.
279 if (MI.getNumOperands() != 3)
281 InstructionMappings AltMappings;
282 InstructionMapping GPRMapping(
283 /*ID*/ 1, /*Cost*/ 1,
284 AArch64::getValueMapping(AArch64::PMI_FirstGPR, Size),
286 InstructionMapping FPRMapping(
287 /*ID*/ 2, /*Cost*/ 1,
288 AArch64::getValueMapping(AArch64::PMI_FirstFPR, Size),
291 AltMappings.emplace_back(std::move(GPRMapping));
292 AltMappings.emplace_back(std::move(FPRMapping));
295 case TargetOpcode::G_BITCAST: {
296 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
297 if (Size != 32 && Size != 64)
300 // If the instruction has any implicit-defs or uses,
301 // do not mess with it.
302 if (MI.getNumOperands() != 2)
305 InstructionMappings AltMappings;
306 InstructionMapping GPRMapping(
307 /*ID*/ 1, /*Cost*/ 1,
308 AArch64::getCopyMapping(/*DstIsGPR*/ true, /*SrcIsGPR*/ true, Size),
310 InstructionMapping FPRMapping(
311 /*ID*/ 2, /*Cost*/ 1,
312 AArch64::getCopyMapping(/*DstIsGPR*/ false, /*SrcIsGPR*/ false, Size),
314 InstructionMapping GPRToFPRMapping(
316 /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
317 AArch64::getCopyMapping(/*DstIsGPR*/ false, /*SrcIsGPR*/ true, Size),
319 InstructionMapping FPRToGPRMapping(
321 /*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
322 AArch64::getCopyMapping(/*DstIsGPR*/ true, /*SrcIsGPR*/ false, Size),
325 AltMappings.emplace_back(std::move(GPRMapping));
326 AltMappings.emplace_back(std::move(FPRMapping));
327 AltMappings.emplace_back(std::move(GPRToFPRMapping));
328 AltMappings.emplace_back(std::move(FPRToGPRMapping));
331 case TargetOpcode::G_LOAD: {
332 unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
336 // If the instruction has any implicit-defs or uses,
337 // do not mess with it.
338 if (MI.getNumOperands() != 2)
341 InstructionMappings AltMappings;
342 InstructionMapping GPRMapping(
343 /*ID*/ 1, /*Cost*/ 1,
345 {AArch64::getValueMapping(AArch64::PMI_FirstGPR, Size),
346 // Addresses are GPR 64-bit.
347 AArch64::getValueMapping(AArch64::PMI_FirstGPR, 64)}),
349 InstructionMapping FPRMapping(
350 /*ID*/ 2, /*Cost*/ 1,
352 {AArch64::getValueMapping(AArch64::PMI_FirstFPR, Size),
353 // Addresses are GPR 64-bit.
354 AArch64::getValueMapping(AArch64::PMI_FirstGPR, 64)}),
357 AltMappings.emplace_back(std::move(GPRMapping));
358 AltMappings.emplace_back(std::move(FPRMapping));
364 return RegisterBankInfo::getInstrAlternativeMappings(MI);
367 void AArch64RegisterBankInfo::applyMappingImpl(
368 const OperandsMapper &OpdMapper) const {
369 switch (OpdMapper.getMI().getOpcode()) {
370 case TargetOpcode::G_OR:
371 case TargetOpcode::G_BITCAST:
372 case TargetOpcode::G_LOAD: {
373 // Those ID must match getInstrAlternativeMappings.
374 assert((OpdMapper.getInstrMapping().getID() >= 1 &&
375 OpdMapper.getInstrMapping().getID() <= 4) &&
376 "Don't know how to handle that ID");
377 return applyDefaultMapping(OpdMapper);
380 llvm_unreachable("Don't know how to handle that operation");
384 /// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
385 /// having only floating-point operands.
386 static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
388 case TargetOpcode::G_FADD:
389 case TargetOpcode::G_FSUB:
390 case TargetOpcode::G_FMUL:
391 case TargetOpcode::G_FDIV:
392 case TargetOpcode::G_FCONSTANT:
393 case TargetOpcode::G_FPEXT:
394 case TargetOpcode::G_FPTRUNC:
400 RegisterBankInfo::InstructionMapping
401 AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
402 const unsigned Opc = MI.getOpcode();
403 const MachineFunction &MF = *MI.getParent()->getParent();
404 const MachineRegisterInfo &MRI = MF.getRegInfo();
406 unsigned NumOperands = MI.getNumOperands();
407 assert(NumOperands <= 3 &&
408 "This code is for instructions with 3 or less operands");
410 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
411 unsigned Size = Ty.getSizeInBits();
412 bool IsFPR = Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
415 // Make sure all the operands are using similar size and type.
416 // Should probably be checked by the machine verifier.
417 // This code won't catch cases where the number of lanes is
418 // different between the operands.
419 // If we want to go to that level of details, it is probably
420 // best to check that the types are the same, period.
421 // Currently, we just check that the register banks are the same
423 for (unsigned Idx = 1; Idx != NumOperands; ++Idx) {
424 LLT OpTy = MRI.getType(MI.getOperand(Idx).getReg());
425 assert(AArch64::getRegBankBaseIdxOffset(OpTy.getSizeInBits()) ==
426 AArch64::getRegBankBaseIdxOffset(Size) &&
427 "Operand has incompatible size");
428 bool OpIsFPR = OpTy.isVector() || isPreISelGenericFloatingPointOpcode(Opc);
430 assert(IsFPR == OpIsFPR && "Operand has incompatible type");
432 #endif // End NDEBUG.
434 AArch64::PartialMappingIdx RBIdx =
435 IsFPR ? AArch64::PMI_FirstFPR : AArch64::PMI_FirstGPR;
437 return InstructionMapping{DefaultMappingID, 1,
438 AArch64::getValueMapping(RBIdx, Size), NumOperands};
441 RegisterBankInfo::InstructionMapping
442 AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
443 const unsigned Opc = MI.getOpcode();
444 const MachineFunction &MF = *MI.getParent()->getParent();
445 const MachineRegisterInfo &MRI = MF.getRegInfo();
447 // Try the default logic for non-generic instructions that are either copies
448 // or already have some operands assigned to banks.
449 if (!isPreISelGenericOpcode(Opc)) {
450 RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
451 if (Mapping.isValid())
456 // G_{F|S|U}REM are not listed because they are not legal.
458 case TargetOpcode::G_ADD:
459 case TargetOpcode::G_SUB:
460 case TargetOpcode::G_GEP:
461 case TargetOpcode::G_MUL:
462 case TargetOpcode::G_SDIV:
463 case TargetOpcode::G_UDIV:
465 case TargetOpcode::G_AND:
466 case TargetOpcode::G_OR:
467 case TargetOpcode::G_XOR:
469 case TargetOpcode::G_SHL:
470 case TargetOpcode::G_LSHR:
471 case TargetOpcode::G_ASHR:
472 // Floating point ops.
473 case TargetOpcode::G_FADD:
474 case TargetOpcode::G_FSUB:
475 case TargetOpcode::G_FMUL:
476 case TargetOpcode::G_FDIV:
477 return getSameKindOfOperandsMapping(MI);
478 case TargetOpcode::G_BITCAST: {
479 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
480 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
481 unsigned Size = DstTy.getSizeInBits();
482 bool DstIsGPR = !DstTy.isVector();
483 bool SrcIsGPR = !SrcTy.isVector();
484 const RegisterBank &DstRB =
485 DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
486 const RegisterBank &SrcRB =
487 SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
488 return InstructionMapping{DefaultMappingID, copyCost(DstRB, SrcRB, Size),
489 AArch64::getCopyMapping(DstIsGPR, SrcIsGPR, Size),
492 case TargetOpcode::G_SEQUENCE:
493 // FIXME: support this, but the generic code is really not going to do
495 return InstructionMapping();
500 unsigned NumOperands = MI.getNumOperands();
502 // Track the size and bank of each register. We don't do partial mappings.
503 SmallVector<unsigned, 4> OpSize(NumOperands);
504 SmallVector<AArch64::PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
505 for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
506 auto &MO = MI.getOperand(Idx);
510 LLT Ty = MRI.getType(MO.getReg());
511 OpSize[Idx] = Ty.getSizeInBits();
513 // As a top-level guess, vectors go in FPRs, scalars and pointers in GPRs.
514 // For floating-point instructions, scalars go in FPRs.
515 if (Ty.isVector() || isPreISelGenericFloatingPointOpcode(Opc))
516 OpRegBankIdx[Idx] = AArch64::PMI_FirstFPR;
518 OpRegBankIdx[Idx] = AArch64::PMI_FirstGPR;
522 // Some of the floating-point instructions have mixed GPR and FPR operands:
523 // fine-tune the computed mapping.
525 case TargetOpcode::G_SITOFP:
526 case TargetOpcode::G_UITOFP: {
527 OpRegBankIdx = {AArch64::PMI_FirstFPR, AArch64::PMI_FirstGPR};
530 case TargetOpcode::G_FPTOSI:
531 case TargetOpcode::G_FPTOUI: {
532 OpRegBankIdx = {AArch64::PMI_FirstGPR, AArch64::PMI_FirstFPR};
535 case TargetOpcode::G_FCMP: {
536 OpRegBankIdx = {AArch64::PMI_FirstGPR,
537 /* Predicate */ AArch64::PMI_None, AArch64::PMI_FirstFPR,
538 AArch64::PMI_FirstFPR};
541 case TargetOpcode::G_BITCAST: {
542 // This is going to be a cross register bank copy and this is expensive.
543 if (OpRegBankIdx[0] != OpRegBankIdx[1])
545 copyCost(*AArch64::PartMappings[OpRegBankIdx[0]].RegBank,
546 *AArch64::PartMappings[OpRegBankIdx[1]].RegBank, OpSize[0]);
549 case TargetOpcode::G_LOAD: {
550 // Loading in vector unit is slightly more expensive.
551 // This is actually only true for the LD1R and co instructions,
552 // but anyway for the fast mode this number does not matter and
553 // for the greedy mode the cost of the cross bank copy will
554 // offset this number.
555 // FIXME: Should be derived from the scheduling model.
556 if (OpRegBankIdx[0] >= AArch64::PMI_FirstFPR)
561 // Finally construct the computed mapping.
562 RegisterBankInfo::InstructionMapping Mapping =
563 InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
564 SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
565 for (unsigned Idx = 0; Idx < NumOperands; ++Idx)
566 if (MI.getOperand(Idx).isReg())
568 AArch64::getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
570 Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));