1 //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that AVR uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "AVRISelLowering.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/Support/ErrorHandling.h"
28 #include "AVRMachineFunctionInfo.h"
29 #include "AVRSubtarget.h"
30 #include "AVRTargetMachine.h"
31 #include "MCTargetDesc/AVRMCTargetDesc.h"
35 AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
36 const AVRSubtarget &STI)
37 : TargetLowering(TM), Subtarget(STI) {
38 // Set up the register classes.
39 addRegisterClass(MVT::i8, &AVR::GPR8RegClass);
40 addRegisterClass(MVT::i16, &AVR::DREGSRegClass);
42 // Compute derived properties from the register classes.
43 computeRegisterProperties(Subtarget.getRegisterInfo());
45 setBooleanContents(ZeroOrOneBooleanContent);
46 setBooleanVectorContents(ZeroOrOneBooleanContent);
47 setSchedulingPreference(Sched::RegPressure);
48 setStackPointerRegisterToSaveRestore(AVR::SP);
49 setSupportsUnalignedAtomics(true);
51 setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
52 setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
54 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
55 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
56 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand);
57 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand);
59 for (MVT VT : MVT::integer_valuetypes()) {
60 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
61 setLoadExtAction(N, VT, MVT::i1, Promote);
62 setLoadExtAction(N, VT, MVT::i8, Expand);
66 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
68 for (MVT VT : MVT::integer_valuetypes()) {
69 setOperationAction(ISD::ADDC, VT, Legal);
70 setOperationAction(ISD::SUBC, VT, Legal);
71 setOperationAction(ISD::ADDE, VT, Legal);
72 setOperationAction(ISD::SUBE, VT, Legal);
75 // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
76 // revert into a sub since we don't have an add with immediate instruction.
77 setOperationAction(ISD::ADD, MVT::i32, Custom);
78 setOperationAction(ISD::ADD, MVT::i64, Custom);
80 // our shift instructions are only able to shift 1 bit at a time, so handle
81 // this in a custom way.
82 setOperationAction(ISD::SRA, MVT::i8, Custom);
83 setOperationAction(ISD::SHL, MVT::i8, Custom);
84 setOperationAction(ISD::SRL, MVT::i8, Custom);
85 setOperationAction(ISD::SRA, MVT::i16, Custom);
86 setOperationAction(ISD::SHL, MVT::i16, Custom);
87 setOperationAction(ISD::SRL, MVT::i16, Custom);
88 setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand);
89 setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand);
90 setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand);
92 setOperationAction(ISD::ROTL, MVT::i8, Custom);
93 setOperationAction(ISD::ROTL, MVT::i16, Expand);
94 setOperationAction(ISD::ROTR, MVT::i8, Custom);
95 setOperationAction(ISD::ROTR, MVT::i16, Expand);
97 setOperationAction(ISD::BR_CC, MVT::i8, Custom);
98 setOperationAction(ISD::BR_CC, MVT::i16, Custom);
99 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
100 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
101 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
103 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
104 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
105 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
106 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
107 setOperationAction(ISD::SETCC, MVT::i8, Custom);
108 setOperationAction(ISD::SETCC, MVT::i16, Custom);
109 setOperationAction(ISD::SETCC, MVT::i32, Custom);
110 setOperationAction(ISD::SETCC, MVT::i64, Custom);
111 setOperationAction(ISD::SELECT, MVT::i8, Expand);
112 setOperationAction(ISD::SELECT, MVT::i16, Expand);
114 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
116 // Add support for postincrement and predecrement load/stores.
117 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
118 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
119 setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal);
120 setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal);
121 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
122 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
123 setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal);
124 setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal);
126 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
128 setOperationAction(ISD::VASTART, MVT::Other, Custom);
129 setOperationAction(ISD::VAEND, MVT::Other, Expand);
130 setOperationAction(ISD::VAARG, MVT::Other, Expand);
131 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
133 // Atomic operations which must be lowered to rtlib calls
134 for (MVT VT : MVT::integer_valuetypes()) {
135 setOperationAction(ISD::ATOMIC_SWAP, VT, Expand);
136 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand);
137 setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand);
138 setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand);
139 setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand);
140 setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand);
141 setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand);
144 // Division/remainder
145 setOperationAction(ISD::UDIV, MVT::i8, Expand);
146 setOperationAction(ISD::UDIV, MVT::i16, Expand);
147 setOperationAction(ISD::UREM, MVT::i8, Expand);
148 setOperationAction(ISD::UREM, MVT::i16, Expand);
149 setOperationAction(ISD::SDIV, MVT::i8, Expand);
150 setOperationAction(ISD::SDIV, MVT::i16, Expand);
151 setOperationAction(ISD::SREM, MVT::i8, Expand);
152 setOperationAction(ISD::SREM, MVT::i16, Expand);
154 // Make division and modulus custom
155 setOperationAction(ISD::UDIVREM, MVT::i8, Custom);
156 setOperationAction(ISD::UDIVREM, MVT::i16, Custom);
157 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
158 setOperationAction(ISD::SDIVREM, MVT::i8, Custom);
159 setOperationAction(ISD::SDIVREM, MVT::i16, Custom);
160 setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
162 // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
163 setOperationAction(ISD::MUL, MVT::i8, Expand);
164 setOperationAction(ISD::MUL, MVT::i16, Expand);
166 // Expand 16 bit multiplications.
167 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
168 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
170 // Expand multiplications to libcalls when there is
172 if (!Subtarget.supportsMultiplication()) {
173 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
174 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
177 for (MVT VT : MVT::integer_valuetypes()) {
178 setOperationAction(ISD::MULHS, VT, Expand);
179 setOperationAction(ISD::MULHU, VT, Expand);
182 for (MVT VT : MVT::integer_valuetypes()) {
183 setOperationAction(ISD::CTPOP, VT, Expand);
184 setOperationAction(ISD::CTLZ, VT, Expand);
185 setOperationAction(ISD::CTTZ, VT, Expand);
188 for (MVT VT : MVT::integer_valuetypes()) {
189 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
190 // TODO: The generated code is pretty poor. Investigate using the
191 // same "shift and subtract with carry" trick that we do for
192 // extending 8-bit to 16-bit. This may require infrastructure
193 // improvements in how we treat 16-bit "registers" to be feasible.
196 // Division rtlib functions (not supported), use divmod functions instead
197 setLibcallName(RTLIB::SDIV_I8, nullptr);
198 setLibcallName(RTLIB::SDIV_I16, nullptr);
199 setLibcallName(RTLIB::SDIV_I32, nullptr);
200 setLibcallName(RTLIB::UDIV_I8, nullptr);
201 setLibcallName(RTLIB::UDIV_I16, nullptr);
202 setLibcallName(RTLIB::UDIV_I32, nullptr);
204 // Modulus rtlib functions (not supported), use divmod functions instead
205 setLibcallName(RTLIB::SREM_I8, nullptr);
206 setLibcallName(RTLIB::SREM_I16, nullptr);
207 setLibcallName(RTLIB::SREM_I32, nullptr);
208 setLibcallName(RTLIB::UREM_I8, nullptr);
209 setLibcallName(RTLIB::UREM_I16, nullptr);
210 setLibcallName(RTLIB::UREM_I32, nullptr);
212 // Division and modulus rtlib functions
213 setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4");
214 setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4");
215 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
216 setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4");
217 setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4");
218 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
220 // Several of the runtime library functions use a special calling conv
221 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN);
222 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN);
223 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN);
224 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN);
226 // Trigonometric rtlib functions
227 setLibcallName(RTLIB::SIN_F32, "sin");
228 setLibcallName(RTLIB::COS_F32, "cos");
230 setMinFunctionAlignment(Align(2));
231 setMinimumJumpTableEntries(UINT_MAX);
234 const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
265 EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
267 assert(!VT.isVector() && "No AVR SetCC type for vectors!");
271 SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
272 //:TODO: this function has to be completely rewritten to produce optimal
273 // code, for now it's producing very long but correct code.
275 const SDNode *N = Op.getNode();
276 EVT VT = Op.getValueType();
278 assert(isPowerOf2_32(VT.getSizeInBits()) &&
279 "Expected power-of-2 shift amount");
281 // Expand non-constant shifts to loops.
282 if (!isa<ConstantSDNode>(N->getOperand(1))) {
283 switch (Op.getOpcode()) {
285 llvm_unreachable("Invalid shift opcode!");
287 return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0),
290 return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),
293 SDValue Amt = N->getOperand(1);
294 EVT AmtVT = Amt.getValueType();
295 Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
296 DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
297 return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt);
300 SDValue Amt = N->getOperand(1);
301 EVT AmtVT = Amt.getValueType();
302 Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
303 DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
304 return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt);
307 return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),
312 uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
313 SDValue Victim = N->getOperand(0);
315 switch (Op.getOpcode()) {
321 ShiftAmount = ShiftAmount % VT.getSizeInBits();
325 ShiftAmount = ShiftAmount % VT.getSizeInBits();
334 llvm_unreachable("Invalid shift opcode");
337 while (ShiftAmount--) {
338 Victim = DAG.getNode(Opc8, dl, VT, Victim);
344 SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
345 unsigned Opcode = Op->getOpcode();
346 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
347 "Invalid opcode for Div/Rem lowering");
348 bool IsSigned = (Opcode == ISD::SDIVREM);
349 EVT VT = Op->getValueType(0);
350 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
353 switch (VT.getSimpleVT().SimpleTy) {
355 llvm_unreachable("Unexpected request for libcall!");
357 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
360 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
363 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
367 SDValue InChain = DAG.getEntryNode();
369 TargetLowering::ArgListTy Args;
370 TargetLowering::ArgListEntry Entry;
371 for (SDValue const &Value : Op->op_values()) {
373 Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext());
374 Entry.IsSExt = IsSigned;
375 Entry.IsZExt = !IsSigned;
376 Args.push_back(Entry);
379 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
380 getPointerTy(DAG.getDataLayout()));
382 Type *RetTy = (Type *)StructType::get(Ty, Ty);
385 TargetLowering::CallLoweringInfo CLI(DAG);
388 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
390 .setSExtResult(IsSigned)
391 .setZExtResult(!IsSigned);
393 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
394 return CallInfo.first;
397 SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
398 SelectionDAG &DAG) const {
399 auto DL = DAG.getDataLayout();
401 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
402 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
404 // Create the TargetGlobalAddress node, folding in the constant offset.
406 DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset);
407 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
410 SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
411 SelectionDAG &DAG) const {
412 auto DL = DAG.getDataLayout();
413 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
415 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL));
417 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
420 /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
421 static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) {
424 llvm_unreachable("Unknown condition code!");
426 return AVRCC::COND_EQ;
428 return AVRCC::COND_NE;
430 return AVRCC::COND_GE;
432 return AVRCC::COND_LT;
434 return AVRCC::COND_SH;
436 return AVRCC::COND_LO;
440 /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
441 /// the given operands.
442 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
443 SDValue &AVRcc, SelectionDAG &DAG,
446 EVT VT = LHS.getValueType();
447 bool UseTest = false;
453 // Swap operands and reverse the branching condition.
459 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
460 switch (C->getSExtValue()) {
462 // When doing lhs > -1 use a tst instruction on the top part of lhs
463 // and use brpl instead of using a chain of cp/cpc.
465 AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8);
469 // Turn lhs > 0 into 0 < lhs since 0 can be materialized with
470 // __zero_reg__ in lhs.
472 LHS = DAG.getConstant(0, DL, VT);
477 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
478 // us to fold the constant into the cmp instruction.
479 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
486 // Swap operands and reverse the branching condition.
492 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
493 switch (C->getSExtValue()) {
495 // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
496 // __zero_reg__ in lhs.
498 LHS = DAG.getConstant(0, DL, VT);
503 // When doing lhs < 0 use a tst instruction on the top part of lhs
504 // and use brmi instead of using a chain of cp/cpc.
506 AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8);
514 // Swap operands and reverse the branching condition.
520 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
521 // fold the constant into the cmp instruction.
522 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
523 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
527 // Swap operands and reverse the branching condition.
534 // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
535 // using the default and/or/xor expansion code which is much longer.
536 if (VT == MVT::i32) {
537 SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
538 DAG.getIntPtrConstant(0, DL));
539 SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
540 DAG.getIntPtrConstant(1, DL));
541 SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
542 DAG.getIntPtrConstant(0, DL));
543 SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
544 DAG.getIntPtrConstant(1, DL));
547 // When using tst we only care about the highest part.
548 SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi,
549 DAG.getIntPtrConstant(1, DL));
550 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
552 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
553 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
555 } else if (VT == MVT::i64) {
556 SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
557 DAG.getIntPtrConstant(0, DL));
558 SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
559 DAG.getIntPtrConstant(1, DL));
561 SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
562 DAG.getIntPtrConstant(0, DL));
563 SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
564 DAG.getIntPtrConstant(1, DL));
565 SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
566 DAG.getIntPtrConstant(0, DL));
567 SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
568 DAG.getIntPtrConstant(1, DL));
570 SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
571 DAG.getIntPtrConstant(0, DL));
572 SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
573 DAG.getIntPtrConstant(1, DL));
575 SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
576 DAG.getIntPtrConstant(0, DL));
577 SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
578 DAG.getIntPtrConstant(1, DL));
579 SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
580 DAG.getIntPtrConstant(0, DL));
581 SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
582 DAG.getIntPtrConstant(1, DL));
585 // When using tst we only care about the highest part.
586 SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3,
587 DAG.getIntPtrConstant(1, DL));
588 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
590 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS0, RHS0);
591 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp);
592 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp);
593 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp);
595 } else if (VT == MVT::i8 || VT == MVT::i16) {
597 // When using tst we only care about the highest part.
598 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue,
601 : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8,
602 LHS, DAG.getIntPtrConstant(1, DL)));
604 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS);
607 llvm_unreachable("Invalid comparison size");
610 // When using a test instruction AVRcc is already set.
612 AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8);
618 SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
619 SDValue Chain = Op.getOperand(0);
620 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
621 SDValue LHS = Op.getOperand(2);
622 SDValue RHS = Op.getOperand(3);
623 SDValue Dest = Op.getOperand(4);
627 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
629 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
633 SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
634 SDValue LHS = Op.getOperand(0);
635 SDValue RHS = Op.getOperand(1);
636 SDValue TrueV = Op.getOperand(2);
637 SDValue FalseV = Op.getOperand(3);
638 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
642 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
644 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
645 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
647 return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops);
650 SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
651 SDValue LHS = Op.getOperand(0);
652 SDValue RHS = Op.getOperand(1);
653 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
657 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL);
659 SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());
660 SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());
661 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
662 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
664 return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops);
667 SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
668 const MachineFunction &MF = DAG.getMachineFunction();
669 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
670 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
671 auto DL = DAG.getDataLayout();
674 // Vastart just stores the address of the VarArgsFrameIndex slot into the
675 // memory location argument.
676 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL));
678 return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
679 MachinePointerInfo(SV), 0);
682 SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
683 switch (Op.getOpcode()) {
685 llvm_unreachable("Don't know how to custom lower this!");
691 return LowerShifts(Op, DAG);
692 case ISD::GlobalAddress:
693 return LowerGlobalAddress(Op, DAG);
694 case ISD::BlockAddress:
695 return LowerBlockAddress(Op, DAG);
697 return LowerBR_CC(Op, DAG);
699 return LowerSELECT_CC(Op, DAG);
701 return LowerSETCC(Op, DAG);
703 return LowerVASTART(Op, DAG);
706 return LowerDivRem(Op, DAG);
712 /// Replace a node with an illegal result type
713 /// with a new node built out of custom code.
714 void AVRTargetLowering::ReplaceNodeResults(SDNode *N,
715 SmallVectorImpl<SDValue> &Results,
716 SelectionDAG &DAG) const {
719 switch (N->getOpcode()) {
721 // Convert add (x, imm) into sub (x, -imm).
722 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
723 SDValue Sub = DAG.getNode(
724 ISD::SUB, DL, N->getValueType(0), N->getOperand(0),
725 DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));
726 Results.push_back(Sub);
731 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
733 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
734 Results.push_back(Res.getValue(I));
741 /// Return true if the addressing mode represented
742 /// by AM is legal for this target, for a load/store of the specified type.
743 bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
744 const AddrMode &AM, Type *Ty,
745 unsigned AS, Instruction *I) const {
746 int64_t Offs = AM.BaseOffs;
748 // Allow absolute addresses.
749 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) {
753 // Flash memory instructions only allow zero offsets.
754 if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) {
758 // Allow reg+<6bit> offset.
761 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) {
768 /// Returns true by value, base pointer and
769 /// offset pointer and addressing mode by reference if the node's address
770 /// can be legally represented as pre-indexed load / store address.
771 bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
773 ISD::MemIndexedMode &AM,
774 SelectionDAG &DAG) const {
779 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
780 VT = LD->getMemoryVT();
781 Op = LD->getBasePtr().getNode();
782 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
784 if (AVR::isProgramMemoryAccess(LD)) {
787 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
788 VT = ST->getMemoryVT();
789 Op = ST->getBasePtr().getNode();
790 if (AVR::isProgramMemoryAccess(ST)) {
797 if (VT != MVT::i8 && VT != MVT::i16) {
801 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
805 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
806 int RHSC = RHS->getSExtValue();
807 if (Op->getOpcode() == ISD::SUB)
810 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
814 Base = Op->getOperand(0);
815 Offset = DAG.getConstant(RHSC, DL, MVT::i8);
824 /// Returns true by value, base pointer and
825 /// offset pointer and addressing mode by reference if this node can be
826 /// combined with a load / store to form a post-indexed load / store.
827 bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
830 ISD::MemIndexedMode &AM,
831 SelectionDAG &DAG) const {
835 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
836 VT = LD->getMemoryVT();
837 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
839 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
840 VT = ST->getMemoryVT();
841 if (AVR::isProgramMemoryAccess(ST)) {
848 if (VT != MVT::i8 && VT != MVT::i16) {
852 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
856 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
857 int RHSC = RHS->getSExtValue();
858 if (Op->getOpcode() == ISD::SUB)
860 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
864 Base = Op->getOperand(0);
865 Offset = DAG.getConstant(RHSC, DL, MVT::i8);
874 bool AVRTargetLowering::isOffsetFoldingLegal(
875 const GlobalAddressSDNode *GA) const {
879 //===----------------------------------------------------------------------===//
880 // Formal Arguments Calling Convention Implementation
881 //===----------------------------------------------------------------------===//
883 #include "AVRGenCallingConv.inc"
885 /// Registers for calling conventions, ordered in reverse as required by ABI.
886 /// Both arrays must be of the same length.
887 static const MCPhysReg RegList8[] = {
888 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
889 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
890 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
891 static const MCPhysReg RegList16[] = {
892 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22,
893 AVR::R22R21, AVR::R21R20, AVR::R20R19, AVR::R19R18,
894 AVR::R18R17, AVR::R17R16, AVR::R16R15, AVR::R15R14,
895 AVR::R14R13, AVR::R13R12, AVR::R12R11, AVR::R11R10,
896 AVR::R10R9, AVR::R9R8};
898 static_assert(array_lengthof(RegList8) == array_lengthof(RegList16),
899 "8-bit and 16-bit register arrays must be of equal length");
901 /// Analyze incoming and outgoing function arguments. We need custom C++ code
902 /// to handle special constraints in the ABI.
903 /// In addition, all pieces of a certain argument have to be passed either
904 /// using registers or the stack but never mixing both.
905 template <typename ArgT>
907 analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F,
908 const DataLayout *TD, const SmallVectorImpl<ArgT> &Args,
909 SmallVectorImpl<CCValAssign> &ArgLocs, CCState &CCInfo) {
910 unsigned NumArgs = Args.size();
911 // This is the index of the last used register, in RegList*.
912 // -1 means R26 (R26 is never actually used in CC).
914 // Once a value is passed to the stack it will always be used
915 bool UseStack = false;
916 for (unsigned i = 0; i != NumArgs;) {
918 // We have to count the number of bytes for each function argument, that is
919 // those Args with the same OrigArgIndex. This is important in case the
920 // function takes an aggregate type.
921 // Current argument will be between [i..j).
922 unsigned ArgIndex = Args[i].OrigArgIndex;
923 unsigned TotalBytes = VT.getStoreSize();
925 for (; j != NumArgs; ++j) {
926 if (Args[j].OrigArgIndex != ArgIndex)
928 TotalBytes += Args[j].VT.getStoreSize();
930 // Round up to even number of bytes.
931 TotalBytes = alignTo(TotalBytes, 2);
932 // Skip zero sized arguments
935 // The index of the first register to be used
936 unsigned RegIdx = RegLastIdx + TotalBytes;
938 // If there are not enough registers, use the stack
939 if (RegIdx >= array_lengthof(RegList8)) {
942 for (; i != j; ++i) {
946 auto evt = EVT(VT).getTypeForEVT(CCInfo.getContext());
947 unsigned Offset = CCInfo.AllocateStack(TD->getTypeAllocSize(evt),
948 TD->getABITypeAlign(evt));
950 CCValAssign::getMem(i, VT, Offset, VT, CCValAssign::Full));
954 Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
955 } else if (VT == MVT::i16) {
956 Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
959 "calling convention can only manage i8 and i16 types");
961 assert(Reg && "register not available in calling convention");
962 CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
963 // Registers inside a particular argument are sorted in increasing order
964 // (remember the array is reversed).
965 RegIdx -= VT.getStoreSize();
971 /// Count the total number of bytes needed to pass or return these arguments.
972 template <typename ArgT>
973 static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) {
974 unsigned TotalBytes = 0;
976 for (const ArgT& Arg : Args) {
977 TotalBytes += Arg.VT.getStoreSize();
982 /// Analyze incoming and outgoing value of returning from a function.
983 /// The algorithm is similar to analyzeArguments, but there can only be
984 /// one value, possibly an aggregate, and it is limited to 8 bytes.
985 template <typename ArgT>
986 static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args,
988 unsigned NumArgs = Args.size();
989 unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args);
990 // CanLowerReturn() guarantees this assertion.
991 assert(TotalBytes <= 8 && "return values greater than 8 bytes cannot be lowered");
993 // GCC-ABI says that the size is rounded up to the next even number,
994 // but actually once it is more than 4 it will always round up to 8.
995 if (TotalBytes > 4) {
998 TotalBytes = alignTo(TotalBytes, 2);
1001 // The index of the first register to use.
1002 int RegIdx = TotalBytes - 1;
1003 for (unsigned i = 0; i != NumArgs; ++i) {
1004 MVT VT = Args[i].VT;
1006 if (VT == MVT::i8) {
1007 Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
1008 } else if (VT == MVT::i16) {
1009 Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
1011 llvm_unreachable("calling convention can only manage i8 and i16 types");
1013 assert(Reg && "register not available in calling convention");
1014 CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
1015 // Registers sort in increasing order
1016 RegIdx -= VT.getStoreSize();
1020 SDValue AVRTargetLowering::LowerFormalArguments(
1021 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1022 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1023 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1024 MachineFunction &MF = DAG.getMachineFunction();
1025 MachineFrameInfo &MFI = MF.getFrameInfo();
1026 auto DL = DAG.getDataLayout();
1028 // Assign locations to all of the incoming arguments.
1029 SmallVector<CCValAssign, 16> ArgLocs;
1030 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1033 // Variadic functions do not need all the analysis below.
1035 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1037 analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo);
1041 for (CCValAssign &VA : ArgLocs) {
1043 // Arguments stored on registers.
1044 if (VA.isRegLoc()) {
1045 EVT RegVT = VA.getLocVT();
1046 const TargetRegisterClass *RC;
1047 if (RegVT == MVT::i8) {
1048 RC = &AVR::GPR8RegClass;
1049 } else if (RegVT == MVT::i16) {
1050 RC = &AVR::DREGSRegClass;
1052 llvm_unreachable("Unknown argument type!");
1055 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1056 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1058 // :NOTE: Clang should not promote any i8 into i16 but for safety the
1059 // following code will handle zexts or sexts generated by other
1060 // front ends. Otherwise:
1061 // If this is an 8 bit value, it is really passed promoted
1062 // to 16 bits. Insert an assert[sz]ext to capture this, then
1063 // truncate to the right size.
1064 switch (VA.getLocInfo()) {
1066 llvm_unreachable("Unknown loc info!");
1067 case CCValAssign::Full:
1069 case CCValAssign::BCvt:
1070 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1072 case CCValAssign::SExt:
1073 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1074 DAG.getValueType(VA.getValVT()));
1075 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1077 case CCValAssign::ZExt:
1078 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1079 DAG.getValueType(VA.getValVT()));
1080 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1084 InVals.push_back(ArgValue);
1087 assert(VA.isMemLoc());
1089 EVT LocVT = VA.getLocVT();
1091 // Create the frame index object for this incoming parameter.
1092 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1093 VA.getLocMemOffset(), true);
1095 // Create the SelectionDAG nodes corresponding to a load
1096 // from this parameter.
1097 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL));
1098 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1099 MachinePointerInfo::getFixedStack(MF, FI),
1104 // If the function takes variable number of arguments, make a frame index for
1105 // the start of the first vararg value... for expansion of llvm.va_start.
1107 unsigned StackSize = CCInfo.getNextStackOffset();
1108 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1110 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));
1116 //===----------------------------------------------------------------------===//
1117 // Call Calling Convention Implementation
1118 //===----------------------------------------------------------------------===//
1120 SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1121 SmallVectorImpl<SDValue> &InVals) const {
1122 SelectionDAG &DAG = CLI.DAG;
1124 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1125 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1126 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1127 SDValue Chain = CLI.Chain;
1128 SDValue Callee = CLI.Callee;
1129 bool &isTailCall = CLI.IsTailCall;
1130 CallingConv::ID CallConv = CLI.CallConv;
1131 bool isVarArg = CLI.IsVarArg;
1133 MachineFunction &MF = DAG.getMachineFunction();
1135 // AVR does not yet support tail call optimization.
1138 // Analyze operands of the call, assigning locations to each operand.
1139 SmallVector<CCValAssign, 16> ArgLocs;
1140 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1143 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1144 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1145 // node so that legalize doesn't hack it.
1146 const Function *F = nullptr;
1147 if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1148 const GlobalValue *GV = G->getGlobal();
1150 F = cast<Function>(GV);
1152 DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));
1153 } else if (const ExternalSymbolSDNode *ES =
1154 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1155 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1156 getPointerTy(DAG.getDataLayout()));
1159 // Variadic functions do not need all the analysis below.
1161 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1163 analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo);
1166 // Get a count of how many bytes are to be pushed on the stack.
1167 unsigned NumBytes = CCInfo.getNextStackOffset();
1169 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1171 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1173 // First, walk the register assignments, inserting copies.
1175 bool HasStackArgs = false;
1176 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1177 CCValAssign &VA = ArgLocs[AI];
1178 EVT RegVT = VA.getLocVT();
1179 SDValue Arg = OutVals[AI];
1181 // Promote the value if needed. With Clang this should not happen.
1182 switch (VA.getLocInfo()) {
1184 llvm_unreachable("Unknown loc info!");
1185 case CCValAssign::Full:
1187 case CCValAssign::SExt:
1188 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
1190 case CCValAssign::ZExt:
1191 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
1193 case CCValAssign::AExt:
1194 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
1196 case CCValAssign::BCvt:
1197 Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg);
1201 // Stop when we encounter a stack argument, we need to process them
1202 // in reverse order in the loop below.
1203 if (VA.isMemLoc()) {
1204 HasStackArgs = true;
1208 // Arguments that can be passed on registers must be kept in the RegsToPass
1210 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1213 // Second, stack arguments have to walked in reverse order by inserting
1214 // chained stores, this ensures their order is not changed by the scheduler
1215 // and that the push instruction sequence generated is correct, otherwise they
1216 // can be freely intermixed.
1218 for (AE = AI, AI = ArgLocs.size(); AI != AE; --AI) {
1219 unsigned Loc = AI - 1;
1220 CCValAssign &VA = ArgLocs[Loc];
1221 SDValue Arg = OutVals[Loc];
1223 assert(VA.isMemLoc());
1225 // SP points to one stack slot further so add one to adjust it.
1226 SDValue PtrOff = DAG.getNode(
1227 ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
1228 DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),
1229 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));
1232 DAG.getStore(Chain, DL, Arg, PtrOff,
1233 MachinePointerInfo::getStack(MF, VA.getLocMemOffset()),
1238 // Build a sequence of copy-to-reg nodes chained together with token chain and
1239 // flag operands which copy the outgoing args into registers. The InFlag in
1240 // necessary since all emited instructions must be stuck together.
1242 for (auto Reg : RegsToPass) {
1243 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag);
1244 InFlag = Chain.getValue(1);
1247 // Returns a chain & a flag for retval copy to use.
1248 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1249 SmallVector<SDValue, 8> Ops;
1250 Ops.push_back(Chain);
1251 Ops.push_back(Callee);
1253 // Add argument registers to the end of the list so that they are known live
1255 for (auto Reg : RegsToPass) {
1256 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1259 // Add a register mask operand representing the call-preserved registers.
1260 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1261 const uint32_t *Mask =
1262 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1263 assert(Mask && "Missing call preserved mask for calling convention");
1264 Ops.push_back(DAG.getRegisterMask(Mask));
1266 if (InFlag.getNode()) {
1267 Ops.push_back(InFlag);
1270 Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);
1271 InFlag = Chain.getValue(1);
1273 // Create the CALLSEQ_END node.
1274 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
1275 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
1278 InFlag = Chain.getValue(1);
1281 // Handle result values, copying them out of physregs into vregs that we
1283 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG,
1287 /// Lower the result values of a call into the
1288 /// appropriate copies out of appropriate physical registers.
1290 SDValue AVRTargetLowering::LowerCallResult(
1291 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1292 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG,
1293 SmallVectorImpl<SDValue> &InVals) const {
1295 // Assign locations to each value returned by this call.
1296 SmallVector<CCValAssign, 16> RVLocs;
1297 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1300 // Handle runtime calling convs.
1301 if (CallConv == CallingConv::AVR_BUILTIN) {
1302 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1304 analyzeReturnValues(Ins, CCInfo);
1307 // Copy all of the result registers out of their specified physreg.
1308 for (CCValAssign const &RVLoc : RVLocs) {
1309 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1312 InFlag = Chain.getValue(2);
1313 InVals.push_back(Chain.getValue(0));
1319 //===----------------------------------------------------------------------===//
1320 // Return Value Calling Convention Implementation
1321 //===----------------------------------------------------------------------===//
1323 bool AVRTargetLowering::CanLowerReturn(
1324 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
1325 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1326 if (CallConv == CallingConv::AVR_BUILTIN) {
1327 SmallVector<CCValAssign, 16> RVLocs;
1328 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1329 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1332 unsigned TotalBytes = getTotalArgumentsSizeInBytes(Outs);
1333 return TotalBytes <= 8;
1337 AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1339 const SmallVectorImpl<ISD::OutputArg> &Outs,
1340 const SmallVectorImpl<SDValue> &OutVals,
1341 const SDLoc &dl, SelectionDAG &DAG) const {
1342 // CCValAssign - represent the assignment of the return value to locations.
1343 SmallVector<CCValAssign, 16> RVLocs;
1345 // CCState - Info about the registers and stack slot.
1346 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1349 MachineFunction &MF = DAG.getMachineFunction();
1351 // Analyze return values.
1352 if (CallConv == CallingConv::AVR_BUILTIN) {
1353 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1355 analyzeReturnValues(Outs, CCInfo);
1359 SmallVector<SDValue, 4> RetOps(1, Chain);
1360 // Copy the result values into the output registers.
1361 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1362 CCValAssign &VA = RVLocs[i];
1363 assert(VA.isRegLoc() && "Can only return in registers!");
1365 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1367 // Guarantee that all emitted copies are stuck together with flags.
1368 Flag = Chain.getValue(1);
1369 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1372 // Don't emit the ret/reti instruction when the naked attribute is present in
1373 // the function being compiled.
1374 if (MF.getFunction().getAttributes().hasAttribute(
1375 AttributeList::FunctionIndex, Attribute::Naked)) {
1379 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1382 AFI->isInterruptOrSignalHandler()
1386 RetOps[0] = Chain; // Update chain.
1388 if (Flag.getNode()) {
1389 RetOps.push_back(Flag);
1392 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1395 //===----------------------------------------------------------------------===//
1397 //===----------------------------------------------------------------------===//
1399 MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
1400 MachineBasicBlock *BB) const {
1402 const TargetRegisterClass *RC;
1403 bool HasRepeatedOperand = false;
1404 MachineFunction *F = BB->getParent();
1405 MachineRegisterInfo &RI = F->getRegInfo();
1406 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1407 DebugLoc dl = MI.getDebugLoc();
1409 switch (MI.getOpcode()) {
1411 llvm_unreachable("Invalid shift opcode!");
1413 Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd
1414 RC = &AVR::GPR8RegClass;
1415 HasRepeatedOperand = true;
1419 RC = &AVR::DREGSRegClass;
1423 RC = &AVR::GPR8RegClass;
1427 RC = &AVR::DREGSRegClass;
1431 RC = &AVR::GPR8RegClass;
1435 RC = &AVR::DREGSRegClass;
1439 RC = &AVR::GPR8RegClass;
1443 RC = &AVR::DREGSRegClass;
1447 RC = &AVR::GPR8RegClass;
1451 RC = &AVR::DREGSRegClass;
1455 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1457 MachineFunction::iterator I;
1458 for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I);
1459 if (I != F->end()) ++I;
1461 // Create loop block.
1462 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
1463 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);
1465 F->insert(I, LoopBB);
1466 F->insert(I, RemBB);
1468 // Update machine-CFG edges by transferring all successors of the current
1469 // block to the block containing instructions after shift.
1470 RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
1472 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1474 // Add adges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB.
1475 BB->addSuccessor(LoopBB);
1476 BB->addSuccessor(RemBB);
1477 LoopBB->addSuccessor(RemBB);
1478 LoopBB->addSuccessor(LoopBB);
1480 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass);
1481 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass);
1482 Register ShiftReg = RI.createVirtualRegister(RC);
1483 Register ShiftReg2 = RI.createVirtualRegister(RC);
1484 Register ShiftAmtSrcReg = MI.getOperand(2).getReg();
1485 Register SrcReg = MI.getOperand(1).getReg();
1486 Register DstReg = MI.getOperand(0).getReg();
1491 BuildMI(BB, dl, TII.get(AVR::CPIRdK)).addReg(ShiftAmtSrcReg).addImm(0);
1492 BuildMI(BB, dl, TII.get(AVR::BREQk)).addMBB(RemBB);
1495 // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1496 // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB]
1497 // ShiftReg2 = shift ShiftReg
1498 // ShiftAmt2 = ShiftAmt - 1;
1499 BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftReg)
1504 BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftAmtReg)
1505 .addReg(ShiftAmtSrcReg)
1507 .addReg(ShiftAmtReg2)
1510 auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);
1511 if (HasRepeatedOperand)
1512 ShiftMI.addReg(ShiftReg);
1514 BuildMI(LoopBB, dl, TII.get(AVR::SUBIRdK), ShiftAmtReg2)
1515 .addReg(ShiftAmtReg)
1517 BuildMI(LoopBB, dl, TII.get(AVR::BRNEk)).addMBB(LoopBB);
1520 // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
1521 BuildMI(*RemBB, RemBB->begin(), dl, TII.get(AVR::PHI), DstReg)
1527 MI.eraseFromParent(); // The pseudo instruction is gone now.
1531 static bool isCopyMulResult(MachineBasicBlock::iterator const &I) {
1532 if (I->getOpcode() == AVR::COPY) {
1533 Register SrcReg = I->getOperand(1).getReg();
1534 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
1540 // The mul instructions wreak havock on our zero_reg R1. We need to clear it
1541 // after the result has been evacuated. This is probably not the best way to do
1542 // it, but it works for now.
1543 MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
1544 MachineBasicBlock *BB) const {
1545 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1546 MachineBasicBlock::iterator I(MI);
1547 ++I; // in any case insert *after* the mul instruction
1548 if (isCopyMulResult(I))
1550 if (isCopyMulResult(I))
1552 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)
1559 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1560 MachineBasicBlock *MBB) const {
1561 int Opc = MI.getOpcode();
1563 // Pseudo shift instructions with a non constant shift amount are expanded
1576 return insertShift(MI, MBB);
1579 return insertMul(MI, MBB);
1582 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
1583 "Unexpected instr type to insert");
1585 const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent()
1589 DebugLoc dl = MI.getDebugLoc();
1591 // To "insert" a SELECT instruction, we insert the diamond
1592 // control-flow pattern. The incoming instruction knows the
1593 // destination vreg to set, the condition code register to branch
1594 // on, the true/false values to select between, and a branch opcode
1597 MachineFunction *MF = MBB->getParent();
1598 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
1599 MachineBasicBlock *FallThrough = MBB->getFallThrough();
1601 // If the current basic block falls through to another basic block,
1602 // we must insert an unconditional branch to the fallthrough destination
1603 // if we are to insert basic blocks at the prior fallthrough point.
1604 if (FallThrough != nullptr) {
1605 BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough);
1608 MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1609 MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1611 MachineFunction::iterator I;
1612 for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I);
1613 if (I != MF->end()) ++I;
1614 MF->insert(I, trueMBB);
1615 MF->insert(I, falseMBB);
1617 // Transfer remaining instructions and all successors of the current
1618 // block to the block which will contain the Phi node for the
1620 trueMBB->splice(trueMBB->begin(), MBB,
1621 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
1622 trueMBB->transferSuccessorsAndUpdatePHIs(MBB);
1624 AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm();
1625 BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB);
1626 BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB);
1627 MBB->addSuccessor(falseMBB);
1628 MBB->addSuccessor(trueMBB);
1630 // Unconditionally flow back to the true block
1631 BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB);
1632 falseMBB->addSuccessor(trueMBB);
1634 // Set up the Phi node to determine where we came from
1635 BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI), MI.getOperand(0).getReg())
1636 .addReg(MI.getOperand(1).getReg())
1638 .addReg(MI.getOperand(2).getReg())
1641 MI.eraseFromParent(); // The pseudo instruction is gone now.
1645 //===----------------------------------------------------------------------===//
1646 // Inline Asm Support
1647 //===----------------------------------------------------------------------===//
1649 AVRTargetLowering::ConstraintType
1650 AVRTargetLowering::getConstraintType(StringRef Constraint) const {
1651 if (Constraint.size() == 1) {
1652 // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
1653 switch (Constraint[0]) {
1656 case 'a': // Simple upper registers
1657 case 'b': // Base pointer registers pairs
1658 case 'd': // Upper register
1659 case 'l': // Lower registers
1660 case 'e': // Pointer register pairs
1661 case 'q': // Stack pointer register
1662 case 'r': // Any register
1663 case 'w': // Special upper register pairs
1664 return C_RegisterClass;
1665 case 't': // Temporary register
1666 case 'x': case 'X': // Pointer register pair X
1667 case 'y': case 'Y': // Pointer register pair Y
1668 case 'z': case 'Z': // Pointer register pair Z
1670 case 'Q': // A memory address based on Y or Z pointer with displacement.
1672 case 'G': // Floating point constant
1673 case 'I': // 6-bit positive integer constant
1674 case 'J': // 6-bit negative integer constant
1675 case 'K': // Integer constant (Range: 2)
1676 case 'L': // Integer constant (Range: 0)
1677 case 'M': // 8-bit integer constant
1678 case 'N': // Integer constant (Range: -1)
1679 case 'O': // Integer constant (Range: 8, 16, 24)
1680 case 'P': // Integer constant (Range: 1)
1681 case 'R': // Integer constant (Range: -6 to 5)x
1686 return TargetLowering::getConstraintType(Constraint);
1690 AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
1691 // Not sure if this is actually the right thing to do, but we got to do
1692 // *something* [agnat]
1693 switch (ConstraintCode[0]) {
1695 return InlineAsm::Constraint_Q;
1697 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
1700 AVRTargetLowering::ConstraintWeight
1701 AVRTargetLowering::getSingleConstraintMatchWeight(
1702 AsmOperandInfo &info, const char *constraint) const {
1703 ConstraintWeight weight = CW_Invalid;
1704 Value *CallOperandVal = info.CallOperandVal;
1706 // If we don't have a value, we can't do a match,
1707 // but allow it at the lowest weight.
1708 // (this behaviour has been copied from the ARM backend)
1709 if (!CallOperandVal) {
1713 // Look at the constraint type.
1714 switch (*constraint) {
1716 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
1721 weight = CW_Register;
1732 weight = CW_SpecificReg;
1735 if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) {
1737 weight = CW_Constant;
1742 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1743 if (isUInt<6>(C->getZExtValue())) {
1744 weight = CW_Constant;
1749 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1750 if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
1751 weight = CW_Constant;
1756 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1757 if (C->getZExtValue() == 2) {
1758 weight = CW_Constant;
1763 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1764 if (C->getZExtValue() == 0) {
1765 weight = CW_Constant;
1770 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1771 if (isUInt<8>(C->getZExtValue())) {
1772 weight = CW_Constant;
1777 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1778 if (C->getSExtValue() == -1) {
1779 weight = CW_Constant;
1784 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1785 if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
1786 (C->getZExtValue() == 24)) {
1787 weight = CW_Constant;
1792 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1793 if (C->getZExtValue() == 1) {
1794 weight = CW_Constant;
1799 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1800 if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
1801 weight = CW_Constant;
1813 std::pair<unsigned, const TargetRegisterClass *>
1814 AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1815 StringRef Constraint,
1817 // We only support i8 and i16.
1819 //:FIXME: remove this assert for now since it gets sometimes executed
1820 // assert((VT == MVT::i16 || VT == MVT::i8) && "Wrong operand type.");
1822 if (Constraint.size() == 1) {
1823 switch (Constraint[0]) {
1824 case 'a': // Simple upper registers r16..r23.
1825 return std::make_pair(0U, &AVR::LD8loRegClass);
1826 case 'b': // Base pointer registers: y, z.
1827 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
1828 case 'd': // Upper registers r16..r31.
1829 return std::make_pair(0U, &AVR::LD8RegClass);
1830 case 'l': // Lower registers r0..r15.
1831 return std::make_pair(0U, &AVR::GPR8loRegClass);
1832 case 'e': // Pointer register pairs: x, y, z.
1833 return std::make_pair(0U, &AVR::PTRREGSRegClass);
1834 case 'q': // Stack pointer register: SPH:SPL.
1835 return std::make_pair(0U, &AVR::GPRSPRegClass);
1836 case 'r': // Any register: r0..r31.
1838 return std::make_pair(0U, &AVR::GPR8RegClass);
1840 assert(VT == MVT::i16 && "inline asm constraint too large");
1841 return std::make_pair(0U, &AVR::DREGSRegClass);
1842 case 't': // Temporary register: r0.
1843 return std::make_pair(unsigned(AVR::R0), &AVR::GPR8RegClass);
1844 case 'w': // Special upper register pairs: r24, r26, r28, r30.
1845 return std::make_pair(0U, &AVR::IWREGSRegClass);
1846 case 'x': // Pointer register pair X: r27:r26.
1848 return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
1849 case 'y': // Pointer register pair Y: r29:r28.
1851 return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
1852 case 'z': // Pointer register pair Z: r31:r30.
1854 return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
1860 return TargetLowering::getRegForInlineAsmConstraint(
1861 Subtarget.getRegisterInfo(), Constraint, VT);
1864 void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
1865 std::string &Constraint,
1866 std::vector<SDValue> &Ops,
1867 SelectionDAG &DAG) const {
1868 SDValue Result(0, 0);
1870 EVT Ty = Op.getValueType();
1872 // Currently only support length 1 constraints.
1873 if (Constraint.length() != 1) {
1877 char ConstraintLetter = Constraint[0];
1878 switch (ConstraintLetter) {
1881 // Deal with integers first:
1891 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1896 int64_t CVal64 = C->getSExtValue();
1897 uint64_t CUVal64 = C->getZExtValue();
1898 switch (ConstraintLetter) {
1900 if (!isUInt<6>(CUVal64))
1902 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1905 if (CVal64 < -63 || CVal64 > 0)
1907 Result = DAG.getTargetConstant(CVal64, DL, Ty);
1912 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1917 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1920 if (!isUInt<8>(CUVal64))
1922 // i8 type may be printed as a negative number,
1923 // e.g. 254 would be printed as -2,
1924 // so we force it to i16 at least.
1925 if (Ty.getSimpleVT() == MVT::i8) {
1928 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1933 Result = DAG.getTargetConstant(CVal64, DL, Ty);
1935 case 'O': // 8, 16, 24
1936 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
1938 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1943 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1946 if (CVal64 < -6 || CVal64 > 5)
1948 Result = DAG.getTargetConstant(CVal64, DL, Ty);
1955 const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op);
1956 if (!FC || !FC->isZero())
1958 // Soften float to i8 0
1959 Result = DAG.getTargetConstant(0, DL, MVT::i8);
1963 if (Result.getNode()) {
1964 Ops.push_back(Result);
1968 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1971 Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT,
1972 const MachineFunction &MF) const {
1975 if (VT == LLT::scalar(8)) {
1976 Reg = StringSwitch<unsigned>(RegName)
1977 .Case("r0", AVR::R0).Case("r1", AVR::R1).Case("r2", AVR::R2)
1978 .Case("r3", AVR::R3).Case("r4", AVR::R4).Case("r5", AVR::R5)
1979 .Case("r6", AVR::R6).Case("r7", AVR::R7).Case("r8", AVR::R8)
1980 .Case("r9", AVR::R9).Case("r10", AVR::R10).Case("r11", AVR::R11)
1981 .Case("r12", AVR::R12).Case("r13", AVR::R13).Case("r14", AVR::R14)
1982 .Case("r15", AVR::R15).Case("r16", AVR::R16).Case("r17", AVR::R17)
1983 .Case("r18", AVR::R18).Case("r19", AVR::R19).Case("r20", AVR::R20)
1984 .Case("r21", AVR::R21).Case("r22", AVR::R22).Case("r23", AVR::R23)
1985 .Case("r24", AVR::R24).Case("r25", AVR::R25).Case("r26", AVR::R26)
1986 .Case("r27", AVR::R27).Case("r28", AVR::R28).Case("r29", AVR::R29)
1987 .Case("r30", AVR::R30).Case("r31", AVR::R31)
1988 .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30)
1991 Reg = StringSwitch<unsigned>(RegName)
1992 .Case("r0", AVR::R1R0).Case("r2", AVR::R3R2)
1993 .Case("r4", AVR::R5R4).Case("r6", AVR::R7R6)
1994 .Case("r8", AVR::R9R8).Case("r10", AVR::R11R10)
1995 .Case("r12", AVR::R13R12).Case("r14", AVR::R15R14)
1996 .Case("r16", AVR::R17R16).Case("r18", AVR::R19R18)
1997 .Case("r20", AVR::R21R20).Case("r22", AVR::R23R22)
1998 .Case("r24", AVR::R25R24).Case("r26", AVR::R27R26)
1999 .Case("r28", AVR::R29R28).Case("r30", AVR::R31R30)
2000 .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30)
2007 report_fatal_error("Invalid register name global variable");
2010 } // end of namespace llvm