1 //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AVR uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "AVRISelLowering.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/Support/ErrorHandling.h"
28 #include "AVRMachineFunctionInfo.h"
29 #include "AVRTargetMachine.h"
30 #include "MCTargetDesc/AVRMCTargetDesc.h"
34 AVRTargetLowering::AVRTargetLowering(AVRTargetMachine &tm)
35 : TargetLowering(tm) {
36 // Set up the register classes.
37 addRegisterClass(MVT::i8, &AVR::GPR8RegClass);
38 addRegisterClass(MVT::i16, &AVR::DREGSRegClass);
40 // Compute derived properties from the register classes.
41 computeRegisterProperties(tm.getSubtargetImpl()->getRegisterInfo());
43 setBooleanContents(ZeroOrOneBooleanContent);
44 setBooleanVectorContents(ZeroOrOneBooleanContent);
45 setSchedulingPreference(Sched::RegPressure);
46 setStackPointerRegisterToSaveRestore(AVR::SP);
48 setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
49 setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
51 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
52 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
53 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand);
54 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand);
56 for (MVT VT : MVT::integer_valuetypes()) {
57 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
58 setLoadExtAction(N, VT, MVT::i1, Promote);
59 setLoadExtAction(N, VT, MVT::i8, Expand);
63 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
65 // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
66 // revert into a sub since we don't have an add with immediate instruction.
67 setOperationAction(ISD::ADD, MVT::i32, Custom);
68 setOperationAction(ISD::ADD, MVT::i64, Custom);
70 // our shift instructions are only able to shift 1 bit at a time, so handle
71 // this in a custom way.
72 setOperationAction(ISD::SRA, MVT::i8, Custom);
73 setOperationAction(ISD::SHL, MVT::i8, Custom);
74 setOperationAction(ISD::SRL, MVT::i8, Custom);
75 setOperationAction(ISD::SRA, MVT::i16, Custom);
76 setOperationAction(ISD::SHL, MVT::i16, Custom);
77 setOperationAction(ISD::SRL, MVT::i16, Custom);
78 setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand);
79 setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand);
80 setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand);
82 setOperationAction(ISD::ROTL, MVT::i8, Custom);
83 setOperationAction(ISD::ROTL, MVT::i16, Custom);
84 setOperationAction(ISD::ROTR, MVT::i8, Custom);
85 setOperationAction(ISD::ROTR, MVT::i16, Custom);
87 setOperationAction(ISD::BR_CC, MVT::i8, Custom);
88 setOperationAction(ISD::BR_CC, MVT::i16, Custom);
89 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
90 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
91 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
93 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
94 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
95 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
96 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
97 setOperationAction(ISD::SETCC, MVT::i8, Custom);
98 setOperationAction(ISD::SETCC, MVT::i16, Custom);
99 setOperationAction(ISD::SETCC, MVT::i32, Custom);
100 setOperationAction(ISD::SETCC, MVT::i64, Custom);
101 setOperationAction(ISD::SELECT, MVT::i8, Expand);
102 setOperationAction(ISD::SELECT, MVT::i16, Expand);
104 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
106 // Add support for postincrement and predecrement load/stores.
107 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
108 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
109 setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal);
110 setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal);
111 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
112 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
113 setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal);
114 setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal);
116 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
118 setOperationAction(ISD::VASTART, MVT::Other, Custom);
119 setOperationAction(ISD::VAEND, MVT::Other, Expand);
120 setOperationAction(ISD::VAARG, MVT::Other, Expand);
121 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
123 // Atomic operations which must be lowered to rtlib calls
124 for (MVT VT : MVT::integer_valuetypes()) {
125 setOperationAction(ISD::ATOMIC_SWAP, VT, Expand);
126 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand);
127 setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand);
128 setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand);
129 setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand);
130 setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand);
131 setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand);
134 // Division/remainder
135 setOperationAction(ISD::UDIV, MVT::i8, Expand);
136 setOperationAction(ISD::UDIV, MVT::i16, Expand);
137 setOperationAction(ISD::UREM, MVT::i8, Expand);
138 setOperationAction(ISD::UREM, MVT::i16, Expand);
139 setOperationAction(ISD::SDIV, MVT::i8, Expand);
140 setOperationAction(ISD::SDIV, MVT::i16, Expand);
141 setOperationAction(ISD::SREM, MVT::i8, Expand);
142 setOperationAction(ISD::SREM, MVT::i16, Expand);
144 // Make division and modulus custom
145 for (MVT VT : MVT::integer_valuetypes()) {
146 setOperationAction(ISD::UDIVREM, VT, Custom);
147 setOperationAction(ISD::SDIVREM, VT, Custom);
150 // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
151 setOperationAction(ISD::MUL, MVT::i8, Expand);
152 setOperationAction(ISD::MUL, MVT::i16, Expand);
154 // Expand 16 bit multiplications.
155 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
156 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
158 for (MVT VT : MVT::integer_valuetypes()) {
159 setOperationAction(ISD::MULHS, VT, Expand);
160 setOperationAction(ISD::MULHU, VT, Expand);
163 for (MVT VT : MVT::integer_valuetypes()) {
164 setOperationAction(ISD::CTPOP, VT, Expand);
165 setOperationAction(ISD::CTLZ, VT, Expand);
166 setOperationAction(ISD::CTTZ, VT, Expand);
169 for (MVT VT : MVT::integer_valuetypes()) {
170 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
171 // TODO: The generated code is pretty poor. Investigate using the
172 // same "shift and subtract with carry" trick that we do for
173 // extending 8-bit to 16-bit. This may require infrastructure
174 // improvements in how we treat 16-bit "registers" to be feasible.
177 // Division rtlib functions (not supported)
178 setLibcallName(RTLIB::SDIV_I8, nullptr);
179 setLibcallName(RTLIB::SDIV_I16, nullptr);
180 setLibcallName(RTLIB::SDIV_I32, nullptr);
181 setLibcallName(RTLIB::SDIV_I64, nullptr);
182 setLibcallName(RTLIB::SDIV_I128, nullptr);
183 setLibcallName(RTLIB::UDIV_I8, nullptr);
184 setLibcallName(RTLIB::UDIV_I16, nullptr);
185 setLibcallName(RTLIB::UDIV_I32, nullptr);
186 setLibcallName(RTLIB::UDIV_I64, nullptr);
187 setLibcallName(RTLIB::UDIV_I128, nullptr);
189 // Modulus rtlib functions (not supported)
190 setLibcallName(RTLIB::SREM_I8, nullptr);
191 setLibcallName(RTLIB::SREM_I16, nullptr);
192 setLibcallName(RTLIB::SREM_I32, nullptr);
193 setLibcallName(RTLIB::SREM_I64, nullptr);
194 setLibcallName(RTLIB::SREM_I128, nullptr);
195 setLibcallName(RTLIB::UREM_I8, nullptr);
196 setLibcallName(RTLIB::UREM_I16, nullptr);
197 setLibcallName(RTLIB::UREM_I32, nullptr);
198 setLibcallName(RTLIB::UREM_I64, nullptr);
199 setLibcallName(RTLIB::UREM_I128, nullptr);
201 // Division and modulus rtlib functions
202 setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4");
203 setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4");
204 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
205 setLibcallName(RTLIB::SDIVREM_I64, "__divmoddi4");
206 setLibcallName(RTLIB::SDIVREM_I128, "__divmodti4");
207 setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4");
208 setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4");
209 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
210 setLibcallName(RTLIB::UDIVREM_I64, "__udivmoddi4");
211 setLibcallName(RTLIB::UDIVREM_I128, "__udivmodti4");
213 // Several of the runtime library functions use a special calling conv
214 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN);
215 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN);
216 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN);
217 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN);
219 // Trigonometric rtlib functions
220 setLibcallName(RTLIB::SIN_F32, "sin");
221 setLibcallName(RTLIB::COS_F32, "cos");
223 setMinFunctionAlignment(1);
224 setMinimumJumpTableEntries(INT_MAX);
227 const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
256 EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
258 assert(!VT.isVector() && "No AVR SetCC type for vectors!");
262 SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
263 //:TODO: this function has to be completely rewritten to produce optimal
264 // code, for now it's producing very long but correct code.
266 const SDNode *N = Op.getNode();
267 EVT VT = Op.getValueType();
270 // Expand non-constant shifts to loops.
271 if (!isa<ConstantSDNode>(N->getOperand(1))) {
272 switch (Op.getOpcode()) {
274 llvm_unreachable("Invalid shift opcode!");
276 return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0),
279 return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),
282 return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0),
285 return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0),
288 return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),
293 uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
294 SDValue Victim = N->getOperand(0);
296 switch (Op.getOpcode()) {
313 llvm_unreachable("Invalid shift opcode");
316 while (ShiftAmount--) {
317 Victim = DAG.getNode(Opc8, dl, VT, Victim);
323 SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
324 unsigned Opcode = Op->getOpcode();
325 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
326 "Invalid opcode for Div/Rem lowering");
327 bool IsSigned = (Opcode == ISD::SDIVREM);
328 EVT VT = Op->getValueType(0);
329 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
332 switch (VT.getSimpleVT().SimpleTy) {
334 llvm_unreachable("Unexpected request for libcall!");
336 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
339 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
342 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
345 LC = IsSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
349 SDValue InChain = DAG.getEntryNode();
351 TargetLowering::ArgListTy Args;
352 TargetLowering::ArgListEntry Entry;
353 for (SDValue const &Value : Op->op_values()) {
355 Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext());
356 Entry.IsSExt = IsSigned;
357 Entry.IsZExt = !IsSigned;
358 Args.push_back(Entry);
361 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
362 getPointerTy(DAG.getDataLayout()));
364 Type *RetTy = (Type *)StructType::get(Ty, Ty);
367 TargetLowering::CallLoweringInfo CLI(DAG);
370 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
372 .setSExtResult(IsSigned)
373 .setZExtResult(!IsSigned);
375 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
376 return CallInfo.first;
379 SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
380 SelectionDAG &DAG) const {
381 auto DL = DAG.getDataLayout();
383 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
384 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
386 // Create the TargetGlobalAddress node, folding in the constant offset.
388 DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset);
389 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
392 SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
393 SelectionDAG &DAG) const {
394 auto DL = DAG.getDataLayout();
395 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
397 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL));
399 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
402 /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
403 static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) {
406 llvm_unreachable("Unknown condition code!");
408 return AVRCC::COND_EQ;
410 return AVRCC::COND_NE;
412 return AVRCC::COND_GE;
414 return AVRCC::COND_LT;
416 return AVRCC::COND_SH;
418 return AVRCC::COND_LO;
422 /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
423 /// the given operands.
424 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
425 SDValue &AVRcc, SelectionDAG &DAG,
428 EVT VT = LHS.getValueType();
429 bool UseTest = false;
435 // Swap operands and reverse the branching condition.
441 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
442 switch (C->getSExtValue()) {
444 // When doing lhs > -1 use a tst instruction on the top part of lhs
445 // and use brpl instead of using a chain of cp/cpc.
447 AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8);
451 // Turn lhs > 0 into 0 < lhs since 0 can be materialized with
452 // __zero_reg__ in lhs.
454 LHS = DAG.getConstant(0, DL, VT);
459 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
460 // us to fold the constant into the cmp instruction.
461 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
468 // Swap operands and reverse the branching condition.
474 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
475 switch (C->getSExtValue()) {
477 // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
478 // __zero_reg__ in lhs.
480 LHS = DAG.getConstant(0, DL, VT);
485 // When doing lhs < 0 use a tst instruction on the top part of lhs
486 // and use brmi instead of using a chain of cp/cpc.
488 AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8);
496 // Swap operands and reverse the branching condition.
502 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
503 // fold the constant into the cmp instruction.
504 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
505 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
509 // Swap operands and reverse the branching condition.
516 // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
517 // using the default and/or/xor expansion code which is much longer.
518 if (VT == MVT::i32) {
519 SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
520 DAG.getIntPtrConstant(0, DL));
521 SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
522 DAG.getIntPtrConstant(1, DL));
523 SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
524 DAG.getIntPtrConstant(0, DL));
525 SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
526 DAG.getIntPtrConstant(1, DL));
529 // When using tst we only care about the highest part.
530 SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi,
531 DAG.getIntPtrConstant(1, DL));
532 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
534 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
535 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
537 } else if (VT == MVT::i64) {
538 SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
539 DAG.getIntPtrConstant(0, DL));
540 SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
541 DAG.getIntPtrConstant(1, DL));
543 SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
544 DAG.getIntPtrConstant(0, DL));
545 SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
546 DAG.getIntPtrConstant(1, DL));
547 SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
548 DAG.getIntPtrConstant(0, DL));
549 SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
550 DAG.getIntPtrConstant(1, DL));
552 SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
553 DAG.getIntPtrConstant(0, DL));
554 SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
555 DAG.getIntPtrConstant(1, DL));
557 SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
558 DAG.getIntPtrConstant(0, DL));
559 SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
560 DAG.getIntPtrConstant(1, DL));
561 SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
562 DAG.getIntPtrConstant(0, DL));
563 SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
564 DAG.getIntPtrConstant(1, DL));
567 // When using tst we only care about the highest part.
568 SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3,
569 DAG.getIntPtrConstant(1, DL));
570 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
572 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS0, RHS0);
573 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp);
574 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp);
575 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp);
577 } else if (VT == MVT::i8 || VT == MVT::i16) {
579 // When using tst we only care about the highest part.
580 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue,
583 : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8,
584 LHS, DAG.getIntPtrConstant(1, DL)));
586 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS);
589 llvm_unreachable("Invalid comparison size");
592 // When using a test instruction AVRcc is already set.
594 AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8);
600 SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
601 SDValue Chain = Op.getOperand(0);
602 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
603 SDValue LHS = Op.getOperand(2);
604 SDValue RHS = Op.getOperand(3);
605 SDValue Dest = Op.getOperand(4);
609 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
611 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
615 SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
616 SDValue LHS = Op.getOperand(0);
617 SDValue RHS = Op.getOperand(1);
618 SDValue TrueV = Op.getOperand(2);
619 SDValue FalseV = Op.getOperand(3);
620 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
624 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
626 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
627 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
629 return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops);
632 SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
633 SDValue LHS = Op.getOperand(0);
634 SDValue RHS = Op.getOperand(1);
635 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
639 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL);
641 SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());
642 SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());
643 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
644 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
646 return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops);
649 SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
650 const MachineFunction &MF = DAG.getMachineFunction();
651 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
652 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
653 auto DL = DAG.getDataLayout();
656 // Vastart just stores the address of the VarArgsFrameIndex slot into the
657 // memory location argument.
658 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL));
660 return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
661 MachinePointerInfo(SV), 0);
664 SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
665 switch (Op.getOpcode()) {
667 llvm_unreachable("Don't know how to custom lower this!");
673 return LowerShifts(Op, DAG);
674 case ISD::GlobalAddress:
675 return LowerGlobalAddress(Op, DAG);
676 case ISD::BlockAddress:
677 return LowerBlockAddress(Op, DAG);
679 return LowerBR_CC(Op, DAG);
681 return LowerSELECT_CC(Op, DAG);
683 return LowerSETCC(Op, DAG);
685 return LowerVASTART(Op, DAG);
688 return LowerDivRem(Op, DAG);
694 /// Replace a node with an illegal result type
695 /// with a new node built out of custom code.
696 void AVRTargetLowering::ReplaceNodeResults(SDNode *N,
697 SmallVectorImpl<SDValue> &Results,
698 SelectionDAG &DAG) const {
701 switch (N->getOpcode()) {
703 // Convert add (x, imm) into sub (x, -imm).
704 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
705 SDValue Sub = DAG.getNode(
706 ISD::SUB, DL, N->getValueType(0), N->getOperand(0),
707 DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));
708 Results.push_back(Sub);
713 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
715 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
716 Results.push_back(Res.getValue(I));
723 /// Return true if the addressing mode represented
724 /// by AM is legal for this target, for a load/store of the specified type.
725 bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
726 const AddrMode &AM, Type *Ty,
728 int64_t Offs = AM.BaseOffs;
730 // Allow absolute addresses.
731 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) {
735 // Flash memory instructions only allow zero offsets.
736 if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) {
740 // Allow reg+<6bit> offset.
743 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) {
750 /// Returns true by value, base pointer and
751 /// offset pointer and addressing mode by reference if the node's address
752 /// can be legally represented as pre-indexed load / store address.
753 bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
755 ISD::MemIndexedMode &AM,
756 SelectionDAG &DAG) const {
761 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
762 VT = LD->getMemoryVT();
763 Op = LD->getBasePtr().getNode();
764 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
766 if (AVR::isProgramMemoryAccess(LD)) {
769 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
770 VT = ST->getMemoryVT();
771 Op = ST->getBasePtr().getNode();
772 if (AVR::isProgramMemoryAccess(ST)) {
779 if (VT != MVT::i8 && VT != MVT::i16) {
783 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
787 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
788 int RHSC = RHS->getSExtValue();
789 if (Op->getOpcode() == ISD::SUB)
792 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
796 Base = Op->getOperand(0);
797 Offset = DAG.getConstant(RHSC, DL, MVT::i8);
806 /// Returns true by value, base pointer and
807 /// offset pointer and addressing mode by reference if this node can be
808 /// combined with a load / store to form a post-indexed load / store.
809 bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
812 ISD::MemIndexedMode &AM,
813 SelectionDAG &DAG) const {
817 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
818 VT = LD->getMemoryVT();
819 if (LD->getExtensionType() != ISD::NON_EXTLOAD)
821 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
822 VT = ST->getMemoryVT();
823 if (AVR::isProgramMemoryAccess(ST)) {
830 if (VT != MVT::i8 && VT != MVT::i16) {
834 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
838 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
839 int RHSC = RHS->getSExtValue();
840 if (Op->getOpcode() == ISD::SUB)
842 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
846 Base = Op->getOperand(0);
847 Offset = DAG.getConstant(RHSC, DL, MVT::i8);
856 bool AVRTargetLowering::isOffsetFoldingLegal(
857 const GlobalAddressSDNode *GA) const {
861 //===----------------------------------------------------------------------===//
862 // Formal Arguments Calling Convention Implementation
863 //===----------------------------------------------------------------------===//
865 #include "AVRGenCallingConv.inc"
867 /// For each argument in a function store the number of pieces it is composed
869 static void parseFunctionArgs(const Function *F, const DataLayout *TD,
870 SmallVectorImpl<unsigned> &Out) {
871 for (Argument const &Arg : F->args()) {
872 unsigned Bytes = (TD->getTypeSizeInBits(Arg.getType()) + 7) / 8;
873 Out.push_back((Bytes + 1) / 2);
877 /// For external symbols there is no function prototype information so we
878 /// have to rely directly on argument sizes.
879 static void parseExternFuncCallArgs(const SmallVectorImpl<ISD::OutputArg> &In,
880 SmallVectorImpl<unsigned> &Out) {
881 for (unsigned i = 0, e = In.size(); i != e;) {
884 while ((i != e) && (In[i].PartOffset == Offset)) {
885 Offset += In[i].VT.getStoreSize();
893 static StringRef getFunctionName(TargetLowering::CallLoweringInfo &CLI) {
894 SDValue Callee = CLI.Callee;
896 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) {
897 return G->getSymbol();
900 if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
901 return G->getGlobal()->getName();
904 llvm_unreachable("don't know how to get the name for this callee");
907 /// Analyze incoming and outgoing function arguments. We need custom C++ code
908 /// to handle special constraints in the ABI like reversing the order of the
909 /// pieces of splitted arguments. In addition, all pieces of a certain argument
910 /// have to be passed either using registers or the stack but never mixing both.
911 static void analyzeStandardArguments(TargetLowering::CallLoweringInfo *CLI,
912 const Function *F, const DataLayout *TD,
913 const SmallVectorImpl<ISD::OutputArg> *Outs,
914 const SmallVectorImpl<ISD::InputArg> *Ins,
915 CallingConv::ID CallConv,
916 SmallVectorImpl<CCValAssign> &ArgLocs,
917 CCState &CCInfo, bool IsCall, bool IsVarArg) {
918 static const MCPhysReg RegList8[] = {AVR::R24, AVR::R22, AVR::R20,
919 AVR::R18, AVR::R16, AVR::R14,
920 AVR::R12, AVR::R10, AVR::R8};
921 static const MCPhysReg RegList16[] = {AVR::R25R24, AVR::R23R22, AVR::R21R20,
922 AVR::R19R18, AVR::R17R16, AVR::R15R14,
923 AVR::R13R12, AVR::R11R10, AVR::R9R8};
925 // Variadic functions do not need all the analisys below.
927 CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_Vararg);
929 CCInfo.AnalyzeFormalArguments(*Ins, ArgCC_AVR_Vararg);
934 // Fill in the Args array which will contain original argument sizes.
935 SmallVector<unsigned, 8> Args;
937 parseExternFuncCallArgs(*Outs, Args);
939 assert(F != nullptr && "function should not be null");
940 parseFunctionArgs(F, TD, Args);
943 unsigned RegsLeft = array_lengthof(RegList8), ValNo = 0;
944 // Variadic functions always use the stack.
945 bool UsesStack = false;
946 for (unsigned i = 0, pos = 0, e = Args.size(); i != e; ++i) {
947 unsigned Size = Args[i];
949 // If we have a zero-sized argument, don't attempt to lower it.
950 // AVR-GCC does not support zero-sized arguments and so we need not
951 // worry about ABI compatibility.
952 if (Size == 0) continue;
954 MVT LocVT = (IsCall) ? (*Outs)[pos].VT : (*Ins)[pos].VT;
956 // If we have plenty of regs to pass the whole argument do it.
957 if (!UsesStack && (Size <= RegsLeft)) {
958 const MCPhysReg *RegList = (LocVT == MVT::i16) ? RegList16 : RegList8;
960 for (unsigned j = 0; j != Size; ++j) {
961 unsigned Reg = CCInfo.AllocateReg(
962 ArrayRef<MCPhysReg>(RegList, array_lengthof(RegList8)));
964 CCValAssign::getReg(ValNo++, LocVT, Reg, LocVT, CCValAssign::Full));
968 // Reverse the order of the pieces to agree with the "big endian" format
969 // required in the calling convention ABI.
970 std::reverse(ArgLocs.begin() + pos, ArgLocs.begin() + pos + Size);
972 // Pass the rest of arguments using the stack.
974 for (unsigned j = 0; j != Size; ++j) {
975 unsigned Offset = CCInfo.AllocateStack(
976 TD->getTypeAllocSize(EVT(LocVT).getTypeForEVT(CCInfo.getContext())),
977 TD->getABITypeAlignment(
978 EVT(LocVT).getTypeForEVT(CCInfo.getContext())));
979 CCInfo.addLoc(CCValAssign::getMem(ValNo++, LocVT, Offset, LocVT,
987 static void analyzeBuiltinArguments(TargetLowering::CallLoweringInfo &CLI,
988 const Function *F, const DataLayout *TD,
989 const SmallVectorImpl<ISD::OutputArg> *Outs,
990 const SmallVectorImpl<ISD::InputArg> *Ins,
991 CallingConv::ID CallConv,
992 SmallVectorImpl<CCValAssign> &ArgLocs,
993 CCState &CCInfo, bool IsCall, bool IsVarArg) {
994 StringRef FuncName = getFunctionName(CLI);
996 if (FuncName.startswith("__udivmod") || FuncName.startswith("__divmod")) {
997 CCInfo.AnalyzeCallOperands(*Outs, ArgCC_AVR_BUILTIN_DIV);
999 analyzeStandardArguments(&CLI, F, TD, Outs, Ins,
1000 CallConv, ArgLocs, CCInfo,
1005 static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI,
1006 const Function *F, const DataLayout *TD,
1007 const SmallVectorImpl<ISD::OutputArg> *Outs,
1008 const SmallVectorImpl<ISD::InputArg> *Ins,
1009 CallingConv::ID CallConv,
1010 SmallVectorImpl<CCValAssign> &ArgLocs,
1011 CCState &CCInfo, bool IsCall, bool IsVarArg) {
1013 case CallingConv::AVR_BUILTIN: {
1014 analyzeBuiltinArguments(*CLI, F, TD, Outs, Ins,
1015 CallConv, ArgLocs, CCInfo,
1020 analyzeStandardArguments(CLI, F, TD, Outs, Ins,
1021 CallConv, ArgLocs, CCInfo,
1028 SDValue AVRTargetLowering::LowerFormalArguments(
1029 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1030 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG,
1031 SmallVectorImpl<SDValue> &InVals) const {
1032 MachineFunction &MF = DAG.getMachineFunction();
1033 MachineFrameInfo &MFI = MF.getFrameInfo();
1034 auto DL = DAG.getDataLayout();
1036 // Assign locations to all of the incoming arguments.
1037 SmallVector<CCValAssign, 16> ArgLocs;
1038 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1041 analyzeArguments(nullptr, MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo,
1045 for (CCValAssign &VA : ArgLocs) {
1047 // Arguments stored on registers.
1048 if (VA.isRegLoc()) {
1049 EVT RegVT = VA.getLocVT();
1050 const TargetRegisterClass *RC;
1051 if (RegVT == MVT::i8) {
1052 RC = &AVR::GPR8RegClass;
1053 } else if (RegVT == MVT::i16) {
1054 RC = &AVR::DREGSRegClass;
1056 llvm_unreachable("Unknown argument type!");
1059 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1060 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1062 // :NOTE: Clang should not promote any i8 into i16 but for safety the
1063 // following code will handle zexts or sexts generated by other
1064 // front ends. Otherwise:
1065 // If this is an 8 bit value, it is really passed promoted
1066 // to 16 bits. Insert an assert[sz]ext to capture this, then
1067 // truncate to the right size.
1068 switch (VA.getLocInfo()) {
1070 llvm_unreachable("Unknown loc info!");
1071 case CCValAssign::Full:
1073 case CCValAssign::BCvt:
1074 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1076 case CCValAssign::SExt:
1077 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1078 DAG.getValueType(VA.getValVT()));
1079 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1081 case CCValAssign::ZExt:
1082 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1083 DAG.getValueType(VA.getValVT()));
1084 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1088 InVals.push_back(ArgValue);
1091 assert(VA.isMemLoc());
1093 EVT LocVT = VA.getLocVT();
1095 // Create the frame index object for this incoming parameter.
1096 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1097 VA.getLocMemOffset(), true);
1099 // Create the SelectionDAG nodes corresponding to a load
1100 // from this parameter.
1101 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL));
1102 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1103 MachinePointerInfo::getFixedStack(MF, FI),
1108 // If the function takes variable number of arguments, make a frame index for
1109 // the start of the first vararg value... for expansion of llvm.va_start.
1111 unsigned StackSize = CCInfo.getNextStackOffset();
1112 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1114 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));
1120 //===----------------------------------------------------------------------===//
1121 // Call Calling Convention Implementation
1122 //===----------------------------------------------------------------------===//
1124 SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1125 SmallVectorImpl<SDValue> &InVals) const {
1126 SelectionDAG &DAG = CLI.DAG;
1128 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1129 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1130 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1131 SDValue Chain = CLI.Chain;
1132 SDValue Callee = CLI.Callee;
1133 bool &isTailCall = CLI.IsTailCall;
1134 CallingConv::ID CallConv = CLI.CallConv;
1135 bool isVarArg = CLI.IsVarArg;
1137 MachineFunction &MF = DAG.getMachineFunction();
1139 // AVR does not yet support tail call optimization.
1142 // Analyze operands of the call, assigning locations to each operand.
1143 SmallVector<CCValAssign, 16> ArgLocs;
1144 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1147 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1148 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1149 // node so that legalize doesn't hack it.
1150 const Function *F = nullptr;
1151 if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1152 const GlobalValue *GV = G->getGlobal();
1154 F = cast<Function>(GV);
1156 DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));
1157 } else if (const ExternalSymbolSDNode *ES =
1158 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1159 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1160 getPointerTy(DAG.getDataLayout()));
1163 analyzeArguments(&CLI, F, &DAG.getDataLayout(), &Outs, 0, CallConv, ArgLocs, CCInfo,
1166 // Get a count of how many bytes are to be pushed on the stack.
1167 unsigned NumBytes = CCInfo.getNextStackOffset();
1169 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1171 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1173 // First, walk the register assignments, inserting copies.
1175 bool HasStackArgs = false;
1176 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1177 CCValAssign &VA = ArgLocs[AI];
1178 EVT RegVT = VA.getLocVT();
1179 SDValue Arg = OutVals[AI];
1181 // Promote the value if needed. With Clang this should not happen.
1182 switch (VA.getLocInfo()) {
1184 llvm_unreachable("Unknown loc info!");
1185 case CCValAssign::Full:
1187 case CCValAssign::SExt:
1188 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
1190 case CCValAssign::ZExt:
1191 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
1193 case CCValAssign::AExt:
1194 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
1196 case CCValAssign::BCvt:
1197 Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg);
1201 // Stop when we encounter a stack argument, we need to process them
1202 // in reverse order in the loop below.
1203 if (VA.isMemLoc()) {
1204 HasStackArgs = true;
1208 // Arguments that can be passed on registers must be kept in the RegsToPass
1210 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1213 // Second, stack arguments have to walked in reverse order by inserting
1214 // chained stores, this ensures their order is not changed by the scheduler
1215 // and that the push instruction sequence generated is correct, otherwise they
1216 // can be freely intermixed.
1218 for (AE = AI, AI = ArgLocs.size(); AI != AE; --AI) {
1219 unsigned Loc = AI - 1;
1220 CCValAssign &VA = ArgLocs[Loc];
1221 SDValue Arg = OutVals[Loc];
1223 assert(VA.isMemLoc());
1225 // SP points to one stack slot further so add one to adjust it.
1226 SDValue PtrOff = DAG.getNode(
1227 ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
1228 DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),
1229 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));
1232 DAG.getStore(Chain, DL, Arg, PtrOff,
1233 MachinePointerInfo::getStack(MF, VA.getLocMemOffset()),
1238 // Build a sequence of copy-to-reg nodes chained together with token chain and
1239 // flag operands which copy the outgoing args into registers. The InFlag in
1240 // necessary since all emited instructions must be stuck together.
1242 for (auto Reg : RegsToPass) {
1243 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag);
1244 InFlag = Chain.getValue(1);
1247 // Returns a chain & a flag for retval copy to use.
1248 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1249 SmallVector<SDValue, 8> Ops;
1250 Ops.push_back(Chain);
1251 Ops.push_back(Callee);
1253 // Add argument registers to the end of the list so that they are known live
1255 for (auto Reg : RegsToPass) {
1256 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1259 // Add a register mask operand representing the call-preserved registers.
1260 const AVRTargetMachine &TM = (const AVRTargetMachine &)getTargetMachine();
1261 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
1262 const uint32_t *Mask =
1263 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1264 assert(Mask && "Missing call preserved mask for calling convention");
1265 Ops.push_back(DAG.getRegisterMask(Mask));
1267 if (InFlag.getNode()) {
1268 Ops.push_back(InFlag);
1271 Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);
1272 InFlag = Chain.getValue(1);
1274 // Create the CALLSEQ_END node.
1275 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
1276 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
1279 InFlag = Chain.getValue(1);
1282 // Handle result values, copying them out of physregs into vregs that we
1284 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG,
1288 /// Lower the result values of a call into the
1289 /// appropriate copies out of appropriate physical registers.
1291 SDValue AVRTargetLowering::LowerCallResult(
1292 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1293 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG,
1294 SmallVectorImpl<SDValue> &InVals) const {
1296 // Assign locations to each value returned by this call.
1297 SmallVector<CCValAssign, 16> RVLocs;
1298 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1301 // Handle runtime calling convs.
1302 auto CCFunction = CCAssignFnForReturn(CallConv);
1303 CCInfo.AnalyzeCallResult(Ins, CCFunction);
1305 if (CallConv != CallingConv::AVR_BUILTIN && RVLocs.size() > 1) {
1306 // Reverse splitted return values to get the "big endian" format required
1307 // to agree with the calling convention ABI.
1308 std::reverse(RVLocs.begin(), RVLocs.end());
1311 // Copy all of the result registers out of their specified physreg.
1312 for (CCValAssign const &RVLoc : RVLocs) {
1313 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1316 InFlag = Chain.getValue(2);
1317 InVals.push_back(Chain.getValue(0));
1323 //===----------------------------------------------------------------------===//
1324 // Return Value Calling Convention Implementation
1325 //===----------------------------------------------------------------------===//
1327 CCAssignFn *AVRTargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {
1329 case CallingConv::AVR_BUILTIN:
1330 return RetCC_AVR_BUILTIN;
1337 AVRTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
1338 MachineFunction &MF, bool isVarArg,
1339 const SmallVectorImpl<ISD::OutputArg> &Outs,
1340 LLVMContext &Context) const
1342 SmallVector<CCValAssign, 16> RVLocs;
1343 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1345 auto CCFunction = CCAssignFnForReturn(CallConv);
1346 return CCInfo.CheckReturn(Outs, CCFunction);
1350 AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1352 const SmallVectorImpl<ISD::OutputArg> &Outs,
1353 const SmallVectorImpl<SDValue> &OutVals,
1354 const SDLoc &dl, SelectionDAG &DAG) const {
1355 // CCValAssign - represent the assignment of the return value to locations.
1356 SmallVector<CCValAssign, 16> RVLocs;
1358 // CCState - Info about the registers and stack slot.
1359 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1362 // Analyze return values.
1363 auto CCFunction = CCAssignFnForReturn(CallConv);
1364 CCInfo.AnalyzeReturn(Outs, CCFunction);
1366 // If this is the first return lowered for this function, add the regs to
1367 // the liveout set for the function.
1368 MachineFunction &MF = DAG.getMachineFunction();
1369 unsigned e = RVLocs.size();
1371 // Reverse splitted return values to get the "big endian" format required
1372 // to agree with the calling convention ABI.
1374 std::reverse(RVLocs.begin(), RVLocs.end());
1378 SmallVector<SDValue, 4> RetOps(1, Chain);
1379 // Copy the result values into the output registers.
1380 for (unsigned i = 0; i != e; ++i) {
1381 CCValAssign &VA = RVLocs[i];
1382 assert(VA.isRegLoc() && "Can only return in registers!");
1384 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1386 // Guarantee that all emitted copies are stuck together with flags.
1387 Flag = Chain.getValue(1);
1388 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1391 // Don't emit the ret/reti instruction when the naked attribute is present in
1392 // the function being compiled.
1393 if (MF.getFunction()->getAttributes().hasAttribute(
1394 AttributeList::FunctionIndex, Attribute::Naked)) {
1399 (CallConv == CallingConv::AVR_INTR || CallConv == CallingConv::AVR_SIGNAL)
1403 RetOps[0] = Chain; // Update chain.
1405 if (Flag.getNode()) {
1406 RetOps.push_back(Flag);
1409 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1412 //===----------------------------------------------------------------------===//
1414 //===----------------------------------------------------------------------===//
1416 MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
1417 MachineBasicBlock *BB) const {
1419 const TargetRegisterClass *RC;
1420 MachineFunction *F = BB->getParent();
1421 MachineRegisterInfo &RI = F->getRegInfo();
1422 const AVRTargetMachine &TM = (const AVRTargetMachine &)getTargetMachine();
1423 const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
1424 DebugLoc dl = MI.getDebugLoc();
1426 switch (MI.getOpcode()) {
1428 llvm_unreachable("Invalid shift opcode!");
1431 RC = &AVR::GPR8RegClass;
1435 RC = &AVR::DREGSRegClass;
1439 RC = &AVR::GPR8RegClass;
1443 RC = &AVR::DREGSRegClass;
1447 RC = &AVR::GPR8RegClass;
1451 RC = &AVR::DREGSRegClass;
1455 RC = &AVR::GPR8RegClass;
1459 RC = &AVR::DREGSRegClass;
1463 RC = &AVR::GPR8RegClass;
1467 RC = &AVR::DREGSRegClass;
1471 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1472 MachineFunction::iterator I = BB->getParent()->begin();
1475 // Create loop block.
1476 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
1477 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);
1479 F->insert(I, LoopBB);
1480 F->insert(I, RemBB);
1482 // Update machine-CFG edges by transferring all successors of the current
1483 // block to the block containing instructions after shift.
1484 RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
1486 RemBB->transferSuccessorsAndUpdatePHIs(BB);
1488 // Add adges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB.
1489 BB->addSuccessor(LoopBB);
1490 BB->addSuccessor(RemBB);
1491 LoopBB->addSuccessor(RemBB);
1492 LoopBB->addSuccessor(LoopBB);
1494 unsigned ShiftAmtReg = RI.createVirtualRegister(&AVR::LD8RegClass);
1495 unsigned ShiftAmtReg2 = RI.createVirtualRegister(&AVR::LD8RegClass);
1496 unsigned ShiftReg = RI.createVirtualRegister(RC);
1497 unsigned ShiftReg2 = RI.createVirtualRegister(RC);
1498 unsigned ShiftAmtSrcReg = MI.getOperand(2).getReg();
1499 unsigned SrcReg = MI.getOperand(1).getReg();
1500 unsigned DstReg = MI.getOperand(0).getReg();
1505 BuildMI(BB, dl, TII.get(AVR::CPIRdK)).addReg(ShiftAmtSrcReg).addImm(0);
1506 BuildMI(BB, dl, TII.get(AVR::BREQk)).addMBB(RemBB);
1509 // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1510 // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB]
1511 // ShiftReg2 = shift ShiftReg
1512 // ShiftAmt2 = ShiftAmt - 1;
1513 BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftReg)
1518 BuildMI(LoopBB, dl, TII.get(AVR::PHI), ShiftAmtReg)
1519 .addReg(ShiftAmtSrcReg)
1521 .addReg(ShiftAmtReg2)
1523 BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);
1524 BuildMI(LoopBB, dl, TII.get(AVR::SUBIRdK), ShiftAmtReg2)
1525 .addReg(ShiftAmtReg)
1527 BuildMI(LoopBB, dl, TII.get(AVR::BRNEk)).addMBB(LoopBB);
1530 // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
1531 BuildMI(*RemBB, RemBB->begin(), dl, TII.get(AVR::PHI), DstReg)
1537 MI.eraseFromParent(); // The pseudo instruction is gone now.
1541 static bool isCopyMulResult(MachineBasicBlock::iterator const &I) {
1542 if (I->getOpcode() == AVR::COPY) {
1543 unsigned SrcReg = I->getOperand(1).getReg();
1544 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
1550 // The mul instructions wreak havock on our zero_reg R1. We need to clear it
1551 // after the result has been evacuated. This is probably not the best way to do
1552 // it, but it works for now.
1553 MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
1554 MachineBasicBlock *BB) const {
1555 const AVRTargetMachine &TM = (const AVRTargetMachine &)getTargetMachine();
1556 const TargetInstrInfo &TII = *TM.getSubtargetImpl()->getInstrInfo();
1557 MachineBasicBlock::iterator I(MI);
1558 ++I; // in any case insert *after* the mul instruction
1559 if (isCopyMulResult(I))
1561 if (isCopyMulResult(I))
1563 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)
1570 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1571 MachineBasicBlock *MBB) const {
1572 int Opc = MI.getOpcode();
1574 // Pseudo shift instructions with a non constant shift amount are expanded
1587 return insertShift(MI, MBB);
1590 return insertMul(MI, MBB);
1593 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
1594 "Unexpected instr type to insert");
1596 const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent()
1600 DebugLoc dl = MI.getDebugLoc();
1602 // To "insert" a SELECT instruction, we insert the diamond
1603 // control-flow pattern. The incoming instruction knows the
1604 // destination vreg to set, the condition code register to branch
1605 // on, the true/false values to select between, and a branch opcode
1608 MachineFunction *MF = MBB->getParent();
1609 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
1610 MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1611 MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1613 MachineFunction::iterator I;
1614 for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I);
1615 if (I != MF->end()) ++I;
1616 MF->insert(I, trueMBB);
1617 MF->insert(I, falseMBB);
1619 // Transfer remaining instructions and all successors of the current
1620 // block to the block which will contain the Phi node for the
1622 trueMBB->splice(trueMBB->begin(), MBB,
1623 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
1624 trueMBB->transferSuccessorsAndUpdatePHIs(MBB);
1626 AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm();
1627 BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB);
1628 BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB);
1629 MBB->addSuccessor(falseMBB);
1630 MBB->addSuccessor(trueMBB);
1632 // Unconditionally flow back to the true block
1633 BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB);
1634 falseMBB->addSuccessor(trueMBB);
1636 // Set up the Phi node to determine where we came from
1637 BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI), MI.getOperand(0).getReg())
1638 .addReg(MI.getOperand(1).getReg())
1640 .addReg(MI.getOperand(2).getReg())
1643 MI.eraseFromParent(); // The pseudo instruction is gone now.
1647 //===----------------------------------------------------------------------===//
1648 // Inline Asm Support
1649 //===----------------------------------------------------------------------===//
1651 AVRTargetLowering::ConstraintType
1652 AVRTargetLowering::getConstraintType(StringRef Constraint) const {
1653 if (Constraint.size() == 1) {
1654 // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
1655 switch (Constraint[0]) {
1656 case 'a': // Simple upper registers
1657 case 'b': // Base pointer registers pairs
1658 case 'd': // Upper register
1659 case 'l': // Lower registers
1660 case 'e': // Pointer register pairs
1661 case 'q': // Stack pointer register
1662 case 'r': // Any register
1663 case 'w': // Special upper register pairs
1664 return C_RegisterClass;
1665 case 't': // Temporary register
1666 case 'x': case 'X': // Pointer register pair X
1667 case 'y': case 'Y': // Pointer register pair Y
1668 case 'z': case 'Z': // Pointer register pair Z
1670 case 'Q': // A memory address based on Y or Z pointer with displacement.
1672 case 'G': // Floating point constant
1673 case 'I': // 6-bit positive integer constant
1674 case 'J': // 6-bit negative integer constant
1675 case 'K': // Integer constant (Range: 2)
1676 case 'L': // Integer constant (Range: 0)
1677 case 'M': // 8-bit integer constant
1678 case 'N': // Integer constant (Range: -1)
1679 case 'O': // Integer constant (Range: 8, 16, 24)
1680 case 'P': // Integer constant (Range: 1)
1681 case 'R': // Integer constant (Range: -6 to 5)x
1688 return TargetLowering::getConstraintType(Constraint);
1692 AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
1693 // Not sure if this is actually the right thing to do, but we got to do
1694 // *something* [agnat]
1695 switch (ConstraintCode[0]) {
1697 return InlineAsm::Constraint_Q;
1699 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
1702 AVRTargetLowering::ConstraintWeight
1703 AVRTargetLowering::getSingleConstraintMatchWeight(
1704 AsmOperandInfo &info, const char *constraint) const {
1705 ConstraintWeight weight = CW_Invalid;
1706 Value *CallOperandVal = info.CallOperandVal;
1708 // If we don't have a value, we can't do a match,
1709 // but allow it at the lowest weight.
1710 // (this behaviour has been copied from the ARM backend)
1711 if (!CallOperandVal) {
1715 // Look at the constraint type.
1716 switch (*constraint) {
1718 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
1723 weight = CW_Register;
1734 weight = CW_SpecificReg;
1737 if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) {
1739 weight = CW_Constant;
1744 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1745 if (isUInt<6>(C->getZExtValue())) {
1746 weight = CW_Constant;
1751 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1752 if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
1753 weight = CW_Constant;
1758 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1759 if (C->getZExtValue() == 2) {
1760 weight = CW_Constant;
1765 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1766 if (C->getZExtValue() == 0) {
1767 weight = CW_Constant;
1772 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1773 if (isUInt<8>(C->getZExtValue())) {
1774 weight = CW_Constant;
1779 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1780 if (C->getSExtValue() == -1) {
1781 weight = CW_Constant;
1786 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1787 if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
1788 (C->getZExtValue() == 24)) {
1789 weight = CW_Constant;
1794 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1795 if (C->getZExtValue() == 1) {
1796 weight = CW_Constant;
1801 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
1802 if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
1803 weight = CW_Constant;
1815 std::pair<unsigned, const TargetRegisterClass *>
1816 AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1817 StringRef Constraint,
1819 auto STI = static_cast<const AVRTargetMachine &>(this->getTargetMachine())
1820 .getSubtargetImpl();
1822 // We only support i8 and i16.
1824 //:FIXME: remove this assert for now since it gets sometimes executed
1825 // assert((VT == MVT::i16 || VT == MVT::i8) && "Wrong operand type.");
1827 if (Constraint.size() == 1) {
1828 switch (Constraint[0]) {
1829 case 'a': // Simple upper registers r16..r23.
1830 return std::make_pair(0U, &AVR::LD8loRegClass);
1831 case 'b': // Base pointer registers: y, z.
1832 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
1833 case 'd': // Upper registers r16..r31.
1834 return std::make_pair(0U, &AVR::LD8RegClass);
1835 case 'l': // Lower registers r0..r15.
1836 return std::make_pair(0U, &AVR::GPR8loRegClass);
1837 case 'e': // Pointer register pairs: x, y, z.
1838 return std::make_pair(0U, &AVR::PTRREGSRegClass);
1839 case 'q': // Stack pointer register: SPH:SPL.
1840 return std::make_pair(0U, &AVR::GPRSPRegClass);
1841 case 'r': // Any register: r0..r31.
1843 return std::make_pair(0U, &AVR::GPR8RegClass);
1845 assert(VT == MVT::i16 && "inline asm constraint too large");
1846 return std::make_pair(0U, &AVR::DREGSRegClass);
1847 case 't': // Temporary register: r0.
1848 return std::make_pair(unsigned(AVR::R0), &AVR::GPR8RegClass);
1849 case 'w': // Special upper register pairs: r24, r26, r28, r30.
1850 return std::make_pair(0U, &AVR::IWREGSRegClass);
1851 case 'x': // Pointer register pair X: r27:r26.
1853 return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
1854 case 'y': // Pointer register pair Y: r29:r28.
1856 return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
1857 case 'z': // Pointer register pair Z: r31:r30.
1859 return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
1865 return TargetLowering::getRegForInlineAsmConstraint(STI->getRegisterInfo(),
1869 void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
1870 std::string &Constraint,
1871 std::vector<SDValue> &Ops,
1872 SelectionDAG &DAG) const {
1873 SDValue Result(0, 0);
1875 EVT Ty = Op.getValueType();
1877 // Currently only support length 1 constraints.
1878 if (Constraint.length() != 1) {
1882 char ConstraintLetter = Constraint[0];
1883 switch (ConstraintLetter) {
1886 // Deal with integers first:
1896 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1901 int64_t CVal64 = C->getSExtValue();
1902 uint64_t CUVal64 = C->getZExtValue();
1903 switch (ConstraintLetter) {
1905 if (!isUInt<6>(CUVal64))
1907 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1910 if (CVal64 < -63 || CVal64 > 0)
1912 Result = DAG.getTargetConstant(CVal64, DL, Ty);
1917 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1922 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1925 if (!isUInt<8>(CUVal64))
1927 // i8 type may be printed as a negative number,
1928 // e.g. 254 would be printed as -2,
1929 // so we force it to i16 at least.
1930 if (Ty.getSimpleVT() == MVT::i8) {
1933 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1938 Result = DAG.getTargetConstant(CVal64, DL, Ty);
1940 case 'O': // 8, 16, 24
1941 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
1943 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1948 Result = DAG.getTargetConstant(CUVal64, DL, Ty);
1951 if (CVal64 < -6 || CVal64 > 5)
1953 Result = DAG.getTargetConstant(CVal64, DL, Ty);
1960 const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op);
1961 if (!FC || !FC->isZero())
1963 // Soften float to i8 0
1964 Result = DAG.getTargetConstant(0, DL, MVT::i8);
1968 if (Result.getNode()) {
1969 Ops.push_back(Result);
1973 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1976 unsigned AVRTargetLowering::getRegisterByName(const char *RegName,
1978 SelectionDAG &DAG) const {
1981 if (VT == MVT::i8) {
1982 Reg = StringSwitch<unsigned>(RegName)
1983 .Case("r0", AVR::R0).Case("r1", AVR::R1).Case("r2", AVR::R2)
1984 .Case("r3", AVR::R3).Case("r4", AVR::R4).Case("r5", AVR::R5)
1985 .Case("r6", AVR::R6).Case("r7", AVR::R7).Case("r8", AVR::R8)
1986 .Case("r9", AVR::R9).Case("r10", AVR::R10).Case("r11", AVR::R11)
1987 .Case("r12", AVR::R12).Case("r13", AVR::R13).Case("r14", AVR::R14)
1988 .Case("r15", AVR::R15).Case("r16", AVR::R16).Case("r17", AVR::R17)
1989 .Case("r18", AVR::R18).Case("r19", AVR::R19).Case("r20", AVR::R20)
1990 .Case("r21", AVR::R21).Case("r22", AVR::R22).Case("r23", AVR::R23)
1991 .Case("r24", AVR::R24).Case("r25", AVR::R25).Case("r26", AVR::R26)
1992 .Case("r27", AVR::R27).Case("r28", AVR::R28).Case("r29", AVR::R29)
1993 .Case("r30", AVR::R30).Case("r31", AVR::R31)
1994 .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30)
1997 Reg = StringSwitch<unsigned>(RegName)
1998 .Case("r0", AVR::R1R0).Case("r2", AVR::R3R2)
1999 .Case("r4", AVR::R5R4).Case("r6", AVR::R7R6)
2000 .Case("r8", AVR::R9R8).Case("r10", AVR::R11R10)
2001 .Case("r12", AVR::R13R12).Case("r14", AVR::R15R14)
2002 .Case("r16", AVR::R17R16).Case("r18", AVR::R19R18)
2003 .Case("r20", AVR::R21R20).Case("r22", AVR::R23R22)
2004 .Case("r24", AVR::R25R24).Case("r26", AVR::R27R26)
2005 .Case("r28", AVR::R29R28).Case("r30", AVR::R31R30)
2006 .Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30)
2013 report_fatal_error("Invalid register name global variable");
2016 } // end of namespace llvm