1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
13 //===----------------------------------------------------------------------===//
16 #include "HexagonISelLowering.h"
17 #include "HexagonMachineFunctionInfo.h"
18 #include "HexagonRegisterInfo.h"
19 #include "HexagonSubtarget.h"
20 #include "HexagonTargetMachine.h"
21 #include "HexagonTargetObjectFile.h"
22 #include "llvm/ADT/APInt.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/RuntimeLibcalls.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/SelectionDAG.h"
32 #include "llvm/CodeGen/ValueTypes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GlobalValue.h"
39 #include "llvm/IR/InlineAsm.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/MC/MCRegisterInfo.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CodeGen.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/Target/TargetCallingConv.h"
54 #include "llvm/Target/TargetMachine.h"
64 #define DEBUG_TYPE "hexagon-lowering"
66 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
67 cl::init(true), cl::Hidden,
68 cl::desc("Control jump table emission on Hexagon target"));
70 static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
71 cl::Hidden, cl::ZeroOrMore, cl::init(false),
72 cl::desc("Enable Hexagon SDNode scheduling"));
74 static cl::opt<bool> EnableFastMath("ffast-math",
75 cl::Hidden, cl::ZeroOrMore, cl::init(false),
76 cl::desc("Enable Fast Math processing"));
78 static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
79 cl::Hidden, cl::ZeroOrMore, cl::init(5),
80 cl::desc("Set minimum jump tables"));
82 static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
83 cl::Hidden, cl::ZeroOrMore, cl::init(6),
84 cl::desc("Max #stores to inline memcpy"));
86 static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
87 cl::Hidden, cl::ZeroOrMore, cl::init(4),
88 cl::desc("Max #stores to inline memcpy"));
90 static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
91 cl::Hidden, cl::ZeroOrMore, cl::init(6),
92 cl::desc("Max #stores to inline memmove"));
94 static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
95 cl::Hidden, cl::ZeroOrMore, cl::init(4),
96 cl::desc("Max #stores to inline memmove"));
98 static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
99 cl::Hidden, cl::ZeroOrMore, cl::init(8),
100 cl::desc("Max #stores to inline memset"));
102 static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
103 cl::Hidden, cl::ZeroOrMore, cl::init(4),
104 cl::desc("Max #stores to inline memset"));
109 class HexagonCCState : public CCState {
110 unsigned NumNamedVarArgParams;
113 HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
114 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
115 int NumNamedVarArgParams)
116 : CCState(CC, isVarArg, MF, locs, C),
117 NumNamedVarArgParams(NumNamedVarArgParams) {}
119 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
122 enum StridedLoadKind {
128 } // end anonymous namespace
130 // Implement calling convention for Hexagon.
132 static bool isHvxVectorType(MVT ty);
135 CC_Hexagon(unsigned ValNo, MVT ValVT,
136 MVT LocVT, CCValAssign::LocInfo LocInfo,
137 ISD::ArgFlagsTy ArgFlags, CCState &State);
140 CC_Hexagon32(unsigned ValNo, MVT ValVT,
141 MVT LocVT, CCValAssign::LocInfo LocInfo,
142 ISD::ArgFlagsTy ArgFlags, CCState &State);
145 CC_Hexagon64(unsigned ValNo, MVT ValVT,
146 MVT LocVT, CCValAssign::LocInfo LocInfo,
147 ISD::ArgFlagsTy ArgFlags, CCState &State);
150 CC_HexagonVector(unsigned ValNo, MVT ValVT,
151 MVT LocVT, CCValAssign::LocInfo LocInfo,
152 ISD::ArgFlagsTy ArgFlags, CCState &State);
155 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
156 MVT LocVT, CCValAssign::LocInfo LocInfo,
157 ISD::ArgFlagsTy ArgFlags, CCState &State);
160 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
161 MVT LocVT, CCValAssign::LocInfo LocInfo,
162 ISD::ArgFlagsTy ArgFlags, CCState &State);
165 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
166 MVT LocVT, CCValAssign::LocInfo LocInfo,
167 ISD::ArgFlagsTy ArgFlags, CCState &State);
170 RetCC_HexagonVector(unsigned ValNo, MVT ValVT,
171 MVT LocVT, CCValAssign::LocInfo LocInfo,
172 ISD::ArgFlagsTy ArgFlags, CCState &State);
175 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
176 MVT LocVT, CCValAssign::LocInfo LocInfo,
177 ISD::ArgFlagsTy ArgFlags, CCState &State) {
178 HexagonCCState &HState = static_cast<HexagonCCState &>(State);
180 if (ValNo < HState.getNumNamedVarArgParams()) {
181 // Deal with named arguments.
182 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
185 // Deal with un-named arguments.
187 if (ArgFlags.isByVal()) {
188 // If pass-by-value, the size allocated on stack is decided
189 // by ArgFlags.getByValSize(), not by the size of LocVT.
190 Offset = State.AllocateStack(ArgFlags.getByValSize(),
191 ArgFlags.getByValAlign());
192 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
195 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
198 if (ArgFlags.isSExt())
199 LocInfo = CCValAssign::SExt;
200 else if (ArgFlags.isZExt())
201 LocInfo = CCValAssign::ZExt;
203 LocInfo = CCValAssign::AExt;
205 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
206 Offset = State.AllocateStack(4, 4);
207 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
210 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
211 Offset = State.AllocateStack(8, 8);
212 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
215 if (LocVT == MVT::v2i64 || LocVT == MVT::v4i32 || LocVT == MVT::v8i16 ||
216 LocVT == MVT::v16i8) {
217 Offset = State.AllocateStack(16, 16);
218 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
221 if (LocVT == MVT::v4i64 || LocVT == MVT::v8i32 || LocVT == MVT::v16i16 ||
222 LocVT == MVT::v32i8) {
223 Offset = State.AllocateStack(32, 32);
224 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
227 if (LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 ||
228 LocVT == MVT::v64i8 || LocVT == MVT::v512i1) {
229 Offset = State.AllocateStack(64, 64);
230 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
233 if (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 ||
234 LocVT == MVT::v128i8 || LocVT == MVT::v1024i1) {
235 Offset = State.AllocateStack(128, 128);
236 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
239 if (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 || LocVT == MVT::v128i16 ||
240 LocVT == MVT::v256i8) {
241 Offset = State.AllocateStack(256, 256);
242 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
246 llvm_unreachable(nullptr);
249 static bool CC_Hexagon (unsigned ValNo, MVT ValVT, MVT LocVT,
250 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) {
251 if (ArgFlags.isByVal()) {
253 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(),
254 ArgFlags.getByValAlign());
255 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
259 if (LocVT == MVT::i1) {
261 } else if (LocVT == MVT::i8 || LocVT == MVT::i16) {
264 if (ArgFlags.isSExt())
265 LocInfo = CCValAssign::SExt;
266 else if (ArgFlags.isZExt())
267 LocInfo = CCValAssign::ZExt;
269 LocInfo = CCValAssign::AExt;
270 } else if (LocVT == MVT::v4i8 || LocVT == MVT::v2i16) {
272 LocInfo = CCValAssign::BCvt;
273 } else if (LocVT == MVT::v8i8 || LocVT == MVT::v4i16 || LocVT == MVT::v2i32) {
275 LocInfo = CCValAssign::BCvt;
278 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
279 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
283 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
284 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
288 if (LocVT == MVT::v8i32 || LocVT == MVT::v16i16 || LocVT == MVT::v32i8) {
289 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 32);
290 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
294 if (isHvxVectorType(LocVT)) {
295 if (!CC_HexagonVector(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
299 return true; // CC didn't match.
303 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
304 MVT LocVT, CCValAssign::LocInfo LocInfo,
305 ISD::ArgFlagsTy ArgFlags, CCState &State) {
306 static const MCPhysReg RegList[] = {
307 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
310 if (unsigned Reg = State.AllocateReg(RegList)) {
311 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
315 unsigned Offset = State.AllocateStack(4, 4);
316 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
320 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
321 MVT LocVT, CCValAssign::LocInfo LocInfo,
322 ISD::ArgFlagsTy ArgFlags, CCState &State) {
323 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
324 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
328 static const MCPhysReg RegList1[] = {
329 Hexagon::D1, Hexagon::D2
331 static const MCPhysReg RegList2[] = {
332 Hexagon::R1, Hexagon::R3
334 if (unsigned Reg = State.AllocateReg(RegList1, RegList2)) {
335 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
339 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
340 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
344 static bool CC_HexagonVector(unsigned ValNo, MVT ValVT,
345 MVT LocVT, CCValAssign::LocInfo LocInfo,
346 ISD::ArgFlagsTy ArgFlags, CCState &State) {
347 static const MCPhysReg VecLstS[] = {
348 Hexagon::V0, Hexagon::V1, Hexagon::V2, Hexagon::V3, Hexagon::V4,
349 Hexagon::V5, Hexagon::V6, Hexagon::V7, Hexagon::V8, Hexagon::V9,
350 Hexagon::V10, Hexagon::V11, Hexagon::V12, Hexagon::V13, Hexagon::V14,
353 static const MCPhysReg VecLstD[] = {
354 Hexagon::W0, Hexagon::W1, Hexagon::W2, Hexagon::W3, Hexagon::W4,
355 Hexagon::W5, Hexagon::W6, Hexagon::W7
357 auto &MF = State.getMachineFunction();
358 auto &HST = MF.getSubtarget<HexagonSubtarget>();
359 bool UseHVX = HST.useHVXOps();
360 bool UseHVXDbl = HST.useHVXDblOps();
362 if ((UseHVX && !UseHVXDbl) &&
363 (LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 ||
364 LocVT == MVT::v64i8 || LocVT == MVT::v512i1)) {
365 if (unsigned Reg = State.AllocateReg(VecLstS)) {
366 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
369 unsigned Offset = State.AllocateStack(64, 64);
370 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
373 if ((UseHVX && !UseHVXDbl) &&
374 (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 ||
375 LocVT == MVT::v128i8)) {
376 if (unsigned Reg = State.AllocateReg(VecLstD)) {
377 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
380 unsigned Offset = State.AllocateStack(128, 128);
381 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
385 if ((UseHVX && UseHVXDbl) &&
386 (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 || LocVT == MVT::v128i16 ||
387 LocVT == MVT::v256i8)) {
388 if (unsigned Reg = State.AllocateReg(VecLstD)) {
389 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
392 unsigned Offset = State.AllocateStack(256, 256);
393 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
396 if ((UseHVX && UseHVXDbl) &&
397 (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 ||
398 LocVT == MVT::v128i8 || LocVT == MVT::v1024i1)) {
399 if (unsigned Reg = State.AllocateReg(VecLstS)) {
400 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
403 unsigned Offset = State.AllocateStack(128, 128);
404 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
410 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
411 MVT LocVT, CCValAssign::LocInfo LocInfo,
412 ISD::ArgFlagsTy ArgFlags, CCState &State) {
413 auto &MF = State.getMachineFunction();
414 auto &HST = MF.getSubtarget<HexagonSubtarget>();
415 bool UseHVX = HST.useHVXOps();
416 bool UseHVXDbl = HST.useHVXDblOps();
418 if (LocVT == MVT::i1) {
419 // Return values of type MVT::i1 still need to be assigned to R0, but
420 // the value type needs to remain i1. LowerCallResult will deal with it,
421 // but it needs to recognize i1 as the value type.
423 } else if (LocVT == MVT::i8 || LocVT == MVT::i16) {
426 if (ArgFlags.isSExt())
427 LocInfo = CCValAssign::SExt;
428 else if (ArgFlags.isZExt())
429 LocInfo = CCValAssign::ZExt;
431 LocInfo = CCValAssign::AExt;
432 } else if (LocVT == MVT::v4i8 || LocVT == MVT::v2i16) {
434 LocInfo = CCValAssign::BCvt;
435 } else if (LocVT == MVT::v8i8 || LocVT == MVT::v4i16 || LocVT == MVT::v2i32) {
437 LocInfo = CCValAssign::BCvt;
438 } else if (LocVT == MVT::v64i8 || LocVT == MVT::v32i16 ||
439 LocVT == MVT::v16i32 || LocVT == MVT::v8i64 ||
440 LocVT == MVT::v512i1) {
443 LocInfo = CCValAssign::Full;
444 } else if (LocVT == MVT::v128i8 || LocVT == MVT::v64i16 ||
445 LocVT == MVT::v32i32 || LocVT == MVT::v16i64 ||
446 (LocVT == MVT::v1024i1 && UseHVX && UseHVXDbl)) {
449 LocInfo = CCValAssign::Full;
450 } else if (LocVT == MVT::v256i8 || LocVT == MVT::v128i16 ||
451 LocVT == MVT::v64i32 || LocVT == MVT::v32i64) {
454 LocInfo = CCValAssign::Full;
456 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
457 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
461 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
462 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
465 if (LocVT == MVT::v16i32 || LocVT == MVT::v32i32 || LocVT == MVT::v64i32) {
466 if (!RetCC_HexagonVector(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
469 return true; // CC didn't match.
472 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
473 MVT LocVT, CCValAssign::LocInfo LocInfo,
474 ISD::ArgFlagsTy ArgFlags, CCState &State) {
475 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
476 // Note that use of registers beyond R1 is not ABI compliant. However there
477 // are (experimental) IR passes which generate internal functions that
478 // return structs using these additional registers.
479 static const uint16_t RegList[] = { Hexagon::R0, Hexagon::R1,
480 Hexagon::R2, Hexagon::R3,
481 Hexagon::R4, Hexagon::R5 };
482 if (unsigned Reg = State.AllocateReg(RegList)) {
483 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
491 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
492 MVT LocVT, CCValAssign::LocInfo LocInfo,
493 ISD::ArgFlagsTy ArgFlags, CCState &State) {
494 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
495 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
496 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
504 static bool RetCC_HexagonVector(unsigned ValNo, MVT ValVT,
505 MVT LocVT, CCValAssign::LocInfo LocInfo,
506 ISD::ArgFlagsTy ArgFlags, CCState &State) {
507 auto &MF = State.getMachineFunction();
508 auto &HST = MF.getSubtarget<HexagonSubtarget>();
509 bool UseHVX = HST.useHVXOps();
510 bool UseHVXDbl = HST.useHVXDblOps();
512 if (LocVT == MVT::v16i32) {
513 if (unsigned Reg = State.AllocateReg(Hexagon::V0)) {
514 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
517 } else if (LocVT == MVT::v32i32) {
518 unsigned Req = (UseHVX && UseHVXDbl) ? Hexagon::V0 : Hexagon::W0;
519 if (unsigned Reg = State.AllocateReg(Req)) {
520 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
523 } else if (LocVT == MVT::v64i32) {
524 if (unsigned Reg = State.AllocateReg(Hexagon::W0)) {
525 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
533 void HexagonTargetLowering::promoteLdStType(MVT VT, MVT PromotedLdStVT) {
534 if (VT != PromotedLdStVT) {
535 setOperationAction(ISD::LOAD, VT, Promote);
536 AddPromotedToType(ISD::LOAD, VT, PromotedLdStVT);
538 setOperationAction(ISD::STORE, VT, Promote);
539 AddPromotedToType(ISD::STORE, VT, PromotedLdStVT);
544 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
549 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
550 /// by "Src" to address "Dst" of size "Size". Alignment information is
551 /// specified by the specific parameter attribute. The copy will be passed as
552 /// a byval function parameter. Sometimes what we are copying is the end of a
553 /// larger object, the part that does not fit in registers.
554 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
555 SDValue Chain, ISD::ArgFlagsTy Flags,
556 SelectionDAG &DAG, const SDLoc &dl) {
557 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
558 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
559 /*isVolatile=*/false, /*AlwaysInline=*/false,
560 /*isTailCall=*/false,
561 MachinePointerInfo(), MachinePointerInfo());
564 static bool isHvxVectorType(MVT Ty) {
565 switch (Ty.SimpleTy) {
587 HexagonTargetLowering::CanLowerReturn(
588 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
589 const SmallVectorImpl<ISD::OutputArg> &Outs,
590 LLVMContext &Context) const {
591 SmallVector<CCValAssign, 16> RVLocs;
592 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
593 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
596 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
597 // passed by value, the function prototype is modified to return void and
598 // the value is stored in memory pointed by a pointer passed by caller.
600 HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
602 const SmallVectorImpl<ISD::OutputArg> &Outs,
603 const SmallVectorImpl<SDValue> &OutVals,
604 const SDLoc &dl, SelectionDAG &DAG) const {
605 // CCValAssign - represent the assignment of the return value to locations.
606 SmallVector<CCValAssign, 16> RVLocs;
608 // CCState - Info about the registers and stack slot.
609 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
612 // Analyze return values of ISD::RET
613 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
616 SmallVector<SDValue, 4> RetOps(1, Chain);
618 // Copy the result values into the output registers.
619 for (unsigned i = 0; i != RVLocs.size(); ++i) {
620 CCValAssign &VA = RVLocs[i];
622 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
624 // Guarantee that all emitted copies are stuck together with flags.
625 Flag = Chain.getValue(1);
626 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
629 RetOps[0] = Chain; // Update chain.
631 // Add the flag if we have it.
633 RetOps.push_back(Flag);
635 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
638 bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
639 // If either no tail call or told not to tail call at all, don't.
641 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
642 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
648 /// LowerCallResult - Lower the result values of an ISD::CALL into the
649 /// appropriate copies out of appropriate physical registers. This assumes that
650 /// Chain/Glue are the input chain/glue to use, and that TheCall is the call
651 /// being lowered. Returns a SDNode with the same number of values as the
653 SDValue HexagonTargetLowering::LowerCallResult(
654 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool isVarArg,
655 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
656 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
657 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
658 // Assign locations to each value returned by this call.
659 SmallVector<CCValAssign, 16> RVLocs;
661 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
664 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
666 // Copy all of the result registers out of their specified physreg.
667 for (unsigned i = 0; i != RVLocs.size(); ++i) {
669 if (RVLocs[i].getValVT() == MVT::i1) {
670 // Return values of type MVT::i1 require special handling. The reason
671 // is that MVT::i1 is associated with the PredRegs register class, but
672 // values of that type are still returned in R0. Generate an explicit
673 // copy into a predicate register from R0, and treat the value of the
674 // predicate register as the call result.
675 auto &MRI = DAG.getMachineFunction().getRegInfo();
676 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
678 // FR0 = (Value, Chain, Glue)
679 unsigned PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
680 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
681 FR0.getValue(0), FR0.getValue(2));
682 // TPR = (Chain, Glue)
683 // Don't glue this CopyFromReg, because it copies from a virtual
684 // register. If it is glued to the call, InstrEmitter will add it
685 // as an implicit def to the call (EmitMachineNode).
686 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
687 Glue = TPR.getValue(1);
689 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
690 RVLocs[i].getValVT(), Glue);
691 Glue = RetVal.getValue(2);
693 InVals.push_back(RetVal.getValue(0));
694 Chain = RetVal.getValue(1);
700 /// LowerCall - Functions arguments are copied from virtual regs to
701 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
703 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
704 SmallVectorImpl<SDValue> &InVals) const {
705 SelectionDAG &DAG = CLI.DAG;
707 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
708 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
709 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
710 SDValue Chain = CLI.Chain;
711 SDValue Callee = CLI.Callee;
712 bool &IsTailCall = CLI.IsTailCall;
713 CallingConv::ID CallConv = CLI.CallConv;
714 bool IsVarArg = CLI.IsVarArg;
715 bool DoesNotReturn = CLI.DoesNotReturn;
717 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
718 MachineFunction &MF = DAG.getMachineFunction();
719 auto PtrVT = getPointerTy(MF.getDataLayout());
721 // Check for varargs.
722 unsigned NumNamedVarArgParams = -1U;
723 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee)) {
724 const GlobalValue *GV = GAN->getGlobal();
725 Callee = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
726 if (const Function* F = dyn_cast<Function>(GV)) {
727 // If a function has zero args and is a vararg function, that's
728 // disallowed so it must be an undeclared function. Do not assume
729 // varargs if the callee is undefined.
730 if (F->isVarArg() && F->getFunctionType()->getNumParams() != 0)
731 NumNamedVarArgParams = F->getFunctionType()->getNumParams();
735 // Analyze operands of the call, assigning locations to each operand.
736 SmallVector<CCValAssign, 16> ArgLocs;
737 HexagonCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
738 *DAG.getContext(), NumNamedVarArgParams);
741 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
743 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
745 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
746 if (Attr.getValueAsString() == "true")
750 bool StructAttrFlag = MF.getFunction()->hasStructRetAttr();
751 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
752 IsVarArg, IsStructRet,
754 Outs, OutVals, Ins, DAG);
755 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
756 CCValAssign &VA = ArgLocs[i];
762 DEBUG(dbgs() << (IsTailCall ? "Eligible for Tail Call\n"
763 : "Argument must be passed on stack. "
764 "Not eligible for Tail Call\n"));
766 // Get a count of how many bytes are to be pushed on the stack.
767 unsigned NumBytes = CCInfo.getNextStackOffset();
768 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
769 SmallVector<SDValue, 8> MemOpChains;
771 auto &HRI = *Subtarget.getRegisterInfo();
773 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
775 bool NeedsArgAlign = false;
776 unsigned LargestAlignSeen = 0;
777 // Walk the register/memloc assignments, inserting copies/loads.
778 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
779 CCValAssign &VA = ArgLocs[i];
780 SDValue Arg = OutVals[i];
781 ISD::ArgFlagsTy Flags = Outs[i].Flags;
782 // Record if we need > 8 byte alignment on an argument.
783 bool ArgAlign = isHvxVectorType(VA.getValVT());
784 NeedsArgAlign |= ArgAlign;
786 // Promote the value if needed.
787 switch (VA.getLocInfo()) {
789 // Loc info must be one of Full, SExt, ZExt, or AExt.
790 llvm_unreachable("Unknown loc info!");
791 case CCValAssign::BCvt:
792 case CCValAssign::Full:
794 case CCValAssign::SExt:
795 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
797 case CCValAssign::ZExt:
798 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
800 case CCValAssign::AExt:
801 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
806 unsigned LocMemOffset = VA.getLocMemOffset();
807 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
808 StackPtr.getValueType());
809 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
811 LargestAlignSeen = std::max(LargestAlignSeen,
812 VA.getLocVT().getStoreSizeInBits() >> 3);
813 if (Flags.isByVal()) {
814 // The argument is a struct passed by value. According to LLVM, "Arg"
816 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
819 MachinePointerInfo LocPI = MachinePointerInfo::getStack(
820 DAG.getMachineFunction(), LocMemOffset);
821 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
822 MemOpChains.push_back(S);
827 // Arguments that can be passed on register must be kept at RegsToPass
830 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
833 if (NeedsArgAlign && Subtarget.hasV60TOps()) {
834 DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
835 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
836 // V6 vectors passed by value have 64 or 128 byte alignment depending
837 // on whether we are 64 byte vector mode or 128 byte.
838 bool UseHVXDbl = Subtarget.useHVXDblOps();
839 assert(Subtarget.useHVXOps());
840 const unsigned ObjAlign = UseHVXDbl ? 128 : 64;
841 LargestAlignSeen = std::max(LargestAlignSeen, ObjAlign);
842 MFI.ensureMaxAlignment(LargestAlignSeen);
844 // Transform all store nodes into one single node because all store
845 // nodes are independent of each other.
846 if (!MemOpChains.empty())
847 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
851 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
852 Glue = Chain.getValue(1);
855 // Build a sequence of copy-to-reg nodes chained together with token
856 // chain and flag operands which copy the outgoing args into registers.
857 // The Glue is necessary since all emitted instructions must be
860 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
861 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
862 RegsToPass[i].second, Glue);
863 Glue = Chain.getValue(1);
866 // For tail calls lower the arguments to the 'real' stack slot.
868 // Force all the incoming stack arguments to be loaded from the stack
869 // before any new outgoing arguments are stored to the stack, because the
870 // outgoing stack slots may alias the incoming argument stack slots, and
871 // the alias isn't otherwise explicit. This is slightly more conservative
872 // than necessary, because it means that each store effectively depends
873 // on every argument instead of just those arguments it would clobber.
875 // Do not flag preceding copytoreg stuff together with the following stuff.
877 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
878 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
879 RegsToPass[i].second, Glue);
880 Glue = Chain.getValue(1);
885 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
886 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
888 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
889 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
890 // node so that legalize doesn't hack it.
891 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
892 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
893 } else if (ExternalSymbolSDNode *S =
894 dyn_cast<ExternalSymbolSDNode>(Callee)) {
895 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
898 // Returns a chain & a flag for retval copy to use.
899 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
900 SmallVector<SDValue, 8> Ops;
901 Ops.push_back(Chain);
902 Ops.push_back(Callee);
904 // Add argument registers to the end of the list so that they are
905 // known live into the call.
906 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
907 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
908 RegsToPass[i].second.getValueType()));
911 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
912 assert(Mask && "Missing call preserved mask for calling convention");
913 Ops.push_back(DAG.getRegisterMask(Mask));
919 MF.getFrameInfo().setHasTailCall();
920 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
923 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
924 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
925 Glue = Chain.getValue(1);
927 // Create the CALLSEQ_END node.
928 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
929 DAG.getIntPtrConstant(0, dl, true), Glue, dl);
930 Glue = Chain.getValue(1);
932 // Handle result values, copying them out of physregs into vregs that we
934 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
935 InVals, OutVals, Callee);
938 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
939 SDValue &Base, SDValue &Offset,
940 bool &IsInc, SelectionDAG &DAG) {
941 if (Ptr->getOpcode() != ISD::ADD)
944 auto &HST = static_cast<const HexagonSubtarget&>(DAG.getSubtarget());
945 bool UseHVX = HST.useHVXOps();
946 bool UseHVXDbl = HST.useHVXDblOps();
948 bool ValidHVXDblType =
949 (UseHVX && UseHVXDbl) && (VT == MVT::v32i32 || VT == MVT::v16i64 ||
950 VT == MVT::v64i16 || VT == MVT::v128i8);
952 UseHVX && !UseHVXDbl && (VT == MVT::v16i32 || VT == MVT::v8i64 ||
953 VT == MVT::v32i16 || VT == MVT::v64i8);
955 if (ValidHVXDblType || ValidHVXType ||
956 VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
957 IsInc = (Ptr->getOpcode() == ISD::ADD);
958 Base = Ptr->getOperand(0);
959 Offset = Ptr->getOperand(1);
960 // Ensure that Offset is a constant.
961 return isa<ConstantSDNode>(Offset);
967 /// getPostIndexedAddressParts - returns true by value, base pointer and
968 /// offset pointer and addressing mode by reference if this node can be
969 /// combined with a load / store to form a post-indexed load / store.
970 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
973 ISD::MemIndexedMode &AM,
974 SelectionDAG &DAG) const
979 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
980 VT = LD->getMemoryVT();
981 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
982 VT = ST->getMemoryVT();
983 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore())
990 bool isLegal = getIndexedAddressParts(Op, VT, Base, Offset, IsInc, DAG);
992 auto &HII = *Subtarget.getInstrInfo();
993 int32_t OffsetVal = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
994 if (HII.isValidAutoIncImm(VT, OffsetVal)) {
995 AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
1004 HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
1005 SDNode *Node = Op.getNode();
1006 MachineFunction &MF = DAG.getMachineFunction();
1007 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
1008 switch (Node->getOpcode()) {
1009 case ISD::INLINEASM: {
1010 unsigned NumOps = Node->getNumOperands();
1011 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1012 --NumOps; // Ignore the flag operand.
1014 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1015 if (FuncInfo.hasClobberLR())
1018 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1019 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1020 ++i; // Skip the ID value.
1022 switch (InlineAsm::getKind(Flags)) {
1023 default: llvm_unreachable("Bad flags!");
1024 case InlineAsm::Kind_RegDef:
1025 case InlineAsm::Kind_RegUse:
1026 case InlineAsm::Kind_Imm:
1027 case InlineAsm::Kind_Clobber:
1028 case InlineAsm::Kind_Mem: {
1029 for (; NumVals; --NumVals, ++i) {}
1032 case InlineAsm::Kind_RegDefEarlyClobber: {
1033 for (; NumVals; --NumVals, ++i) {
1035 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1037 // Check it to be lr
1038 const HexagonRegisterInfo *QRI = Subtarget.getRegisterInfo();
1039 if (Reg == QRI->getRARegister()) {
1040 FuncInfo.setHasClobberLR(true);
1049 } // Node->getOpcode
1053 // Need to transform ISD::PREFETCH into something that doesn't inherit
1054 // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
1056 SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
1057 SelectionDAG &DAG) const {
1058 SDValue Chain = Op.getOperand(0);
1059 SDValue Addr = Op.getOperand(1);
1060 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
1061 // if the "reg" is fed by an "add".
1063 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
1064 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
1067 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
1068 // is marked as having side-effects, while the register read on Hexagon does
1069 // not have any. TableGen refuses to accept the direct pattern from that node
1070 // to the A4_tfrcpp.
1071 SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
1072 SelectionDAG &DAG) const {
1073 SDValue Chain = Op.getOperand(0);
1075 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1076 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
1079 SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
1080 SelectionDAG &DAG) const {
1081 SDValue Chain = Op.getOperand(0);
1082 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1083 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
1084 if (IntNo == Intrinsic::hexagon_prefetch) {
1085 SDValue Addr = Op.getOperand(2);
1087 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
1088 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
1094 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1095 SelectionDAG &DAG) const {
1096 SDValue Chain = Op.getOperand(0);
1097 SDValue Size = Op.getOperand(1);
1098 SDValue Align = Op.getOperand(2);
1101 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
1102 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
1104 unsigned A = AlignConst->getSExtValue();
1105 auto &HFI = *Subtarget.getFrameLowering();
1106 // "Zero" means natural stack alignment.
1108 A = HFI.getStackAlignment();
1111 dbgs () << __func__ << " Align: " << A << " Size: ";
1112 Size.getNode()->dump(&DAG);
1116 SDValue AC = DAG.getConstant(A, dl, MVT::i32);
1117 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1118 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
1120 DAG.ReplaceAllUsesOfValueWith(Op, AA);
1124 SDValue HexagonTargetLowering::LowerFormalArguments(
1125 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1126 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1127 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1128 MachineFunction &MF = DAG.getMachineFunction();
1129 MachineFrameInfo &MFI = MF.getFrameInfo();
1130 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1131 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
1133 // Assign locations to all of the incoming arguments.
1134 SmallVector<CCValAssign, 16> ArgLocs;
1135 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1138 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
1140 // For LLVM, in the case when returning a struct by value (>8byte),
1141 // the first argument is a pointer that points to the location on caller's
1142 // stack where the return value will be stored. For Hexagon, the location on
1143 // caller's stack is passed only when the struct size is smaller than (and
1144 // equal to) 8 bytes. If not, no address will be passed into callee and
1145 // callee return the result direclty through R0/R1.
1147 SmallVector<SDValue, 8> MemOps;
1148 bool UseHVX = Subtarget.useHVXOps(), UseHVXDbl = Subtarget.useHVXDblOps();
1150 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1151 CCValAssign &VA = ArgLocs[i];
1152 ISD::ArgFlagsTy Flags = Ins[i].Flags;
1154 unsigned StackLocation;
1157 if ( (VA.isRegLoc() && !Flags.isByVal())
1158 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
1159 // Arguments passed in registers
1160 // 1. int, long long, ptr args that get allocated in register.
1161 // 2. Large struct that gets an register to put its address in.
1162 EVT RegVT = VA.getLocVT();
1163 if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
1164 RegVT == MVT::i32 || RegVT == MVT::f32) {
1166 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
1167 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1168 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1169 // Treat values of type MVT::i1 specially: they are passed in
1170 // registers of type i32, but they need to remain as values of
1171 // type i1 for consistency of the argument lowering.
1172 if (VA.getValVT() == MVT::i1) {
1173 // Generate a copy into a predicate register and use the value
1174 // of the register as the "InVal".
1176 RegInfo.createVirtualRegister(&Hexagon::PredRegsRegClass);
1177 SDNode *T = DAG.getMachineNode(Hexagon::C2_tfrrp, dl, MVT::i1,
1179 Copy = DAG.getCopyToReg(Copy.getValue(1), dl, PReg, SDValue(T, 0));
1180 Copy = DAG.getCopyFromReg(Copy, dl, PReg, MVT::i1);
1182 InVals.push_back(Copy);
1183 Chain = Copy.getValue(1);
1184 } else if (RegVT == MVT::i64 || RegVT == MVT::f64) {
1186 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
1187 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1188 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1191 } else if ((RegVT == MVT::v8i64 || RegVT == MVT::v16i32 ||
1192 RegVT == MVT::v32i16 || RegVT == MVT::v64i8)) {
1194 RegInfo.createVirtualRegister(&Hexagon::VectorRegsRegClass);
1195 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1196 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1197 } else if (UseHVX && UseHVXDbl &&
1198 ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 ||
1199 RegVT == MVT::v64i16 || RegVT == MVT::v128i8))) {
1201 RegInfo.createVirtualRegister(&Hexagon::VectorRegs128BRegClass);
1202 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1203 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1206 } else if ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 ||
1207 RegVT == MVT::v64i16 || RegVT == MVT::v128i8)) {
1209 RegInfo.createVirtualRegister(&Hexagon::VecDblRegsRegClass);
1210 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1211 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1212 } else if (UseHVX && UseHVXDbl &&
1213 ((RegVT == MVT::v32i64 || RegVT == MVT::v64i32 ||
1214 RegVT == MVT::v128i16 || RegVT == MVT::v256i8))) {
1216 RegInfo.createVirtualRegister(&Hexagon::VecDblRegs128BRegClass);
1217 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1218 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1219 } else if (RegVT == MVT::v512i1 || RegVT == MVT::v1024i1) {
1220 assert(0 && "need to support VecPred regs");
1222 RegInfo.createVirtualRegister(&Hexagon::VecPredRegsRegClass);
1223 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1224 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1228 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
1229 assert (0 && "ByValSize must be bigger than 8 bytes");
1232 assert(VA.isMemLoc());
1234 if (Flags.isByVal()) {
1235 // If it's a byval parameter, then we need to compute the
1236 // "real" size, not the size of the pointer.
1237 ObjSize = Flags.getByValSize();
1239 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
1242 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
1243 // Create the frame index object for this incoming parameter...
1244 FI = MFI.CreateFixedObject(ObjSize, StackLocation, true);
1246 // Create the SelectionDAG nodes cordl, responding to a load
1247 // from this parameter.
1248 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1250 if (Flags.isByVal()) {
1251 // If it's a pass-by-value aggregate, then do not dereference the stack
1252 // location. Instead, we should generate a reference to the stack
1254 InVals.push_back(FIN);
1257 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
1262 if (!MemOps.empty())
1263 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1266 // This will point to the next argument passed via stack.
1267 int FrameIndex = MFI.CreateFixedObject(Hexagon_PointerSize,
1269 CCInfo.getNextStackOffset(),
1271 FuncInfo.setVarArgsFrameIndex(FrameIndex);
1278 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
1279 // VASTART stores the address of the VarArgsFrameIndex slot into the
1280 // memory location argument.
1281 MachineFunction &MF = DAG.getMachineFunction();
1282 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
1283 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
1284 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1285 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
1286 MachinePointerInfo(SV));
1289 // Creates a SPLAT instruction for a constant value VAL.
1290 static SDValue createSplat(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
1292 if (VT.getSimpleVT() == MVT::v4i8)
1293 return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);
1295 if (VT.getSimpleVT() == MVT::v4i16)
1296 return DAG.getNode(HexagonISD::VSPLATH, dl, VT, Val);
1301 static bool isSExtFree(SDValue N) {
1302 // A sign-extend of a truncate of a sign-extend is free.
1303 if (N.getOpcode() == ISD::TRUNCATE &&
1304 N.getOperand(0).getOpcode() == ISD::AssertSext)
1306 // We have sign-extended loads.
1307 if (N.getOpcode() == ISD::LOAD)
1312 SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1315 SDValue LHS = Op.getOperand(0);
1316 SDValue RHS = Op.getOperand(1);
1317 SDValue Cmp = Op.getOperand(2);
1318 ISD::CondCode CC = cast<CondCodeSDNode>(Cmp)->get();
1320 EVT VT = Op.getValueType();
1321 EVT LHSVT = LHS.getValueType();
1322 EVT RHSVT = RHS.getValueType();
1324 if (LHSVT == MVT::v2i16) {
1325 assert(ISD::isSignedIntSetCC(CC) || ISD::isUnsignedIntSetCC(CC));
1326 unsigned ExtOpc = ISD::isSignedIntSetCC(CC) ? ISD::SIGN_EXTEND
1328 SDValue LX = DAG.getNode(ExtOpc, dl, MVT::v2i32, LHS);
1329 SDValue RX = DAG.getNode(ExtOpc, dl, MVT::v2i32, RHS);
1330 SDValue SC = DAG.getNode(ISD::SETCC, dl, MVT::v2i1, LX, RX, Cmp);
1334 // Treat all other vector types as legal.
1338 // Equals and not equals should use sign-extend, not zero-extend, since
1339 // we can represent small negative values in the compare instructions.
1340 // The LLVM default is to use zero-extend arbitrarily in these cases.
1341 if ((CC == ISD::SETEQ || CC == ISD::SETNE) &&
1342 (RHSVT == MVT::i8 || RHSVT == MVT::i16) &&
1343 (LHSVT == MVT::i8 || LHSVT == MVT::i16)) {
1344 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1345 if (C && C->getAPIntValue().isNegative()) {
1346 LHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, LHS);
1347 RHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, RHS);
1348 return DAG.getNode(ISD::SETCC, dl, Op.getValueType(),
1349 LHS, RHS, Op.getOperand(2));
1351 if (isSExtFree(LHS) || isSExtFree(RHS)) {
1352 LHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, LHS);
1353 RHS = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, RHS);
1354 return DAG.getNode(ISD::SETCC, dl, Op.getValueType(),
1355 LHS, RHS, Op.getOperand(2));
1362 HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
1363 SDValue PredOp = Op.getOperand(0);
1364 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1365 EVT OpVT = Op1.getValueType();
1368 if (OpVT == MVT::v2i16) {
1369 SDValue X1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op1);
1370 SDValue X2 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i32, Op2);
1371 SDValue SL = DAG.getNode(ISD::VSELECT, DL, MVT::v2i32, PredOp, X1, X2);
1372 SDValue TR = DAG.getNode(ISD::TRUNCATE, DL, MVT::v2i16, SL);
1379 // Handle only specific vector loads.
1380 SDValue HexagonTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1381 EVT VT = Op.getValueType();
1383 LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
1384 SDValue Chain = LoadNode->getChain();
1385 SDValue Ptr = Op.getOperand(1);
1386 SDValue LoweredLoad;
1388 SDValue Base = LoadNode->getBasePtr();
1389 ISD::LoadExtType Ext = LoadNode->getExtensionType();
1390 unsigned Alignment = LoadNode->getAlignment();
1393 if(Ext == ISD::NON_EXTLOAD)
1394 Ext = ISD::ZEXTLOAD;
1396 if (VT == MVT::v4i16) {
1397 if (Alignment == 2) {
1400 Loads[0] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Base,
1401 LoadNode->getPointerInfo(), MVT::i16, Alignment,
1402 LoadNode->getMemOperand()->getFlags());
1404 SDValue Increment = DAG.getConstant(2, DL, MVT::i32);
1405 Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1406 Loads[1] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1407 LoadNode->getPointerInfo(), MVT::i16, Alignment,
1408 LoadNode->getMemOperand()->getFlags());
1409 // SHL 16, then OR base and base+2.
1410 SDValue ShiftAmount = DAG.getConstant(16, DL, MVT::i32);
1411 SDValue Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[1], ShiftAmount);
1412 SDValue Tmp2 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[0]);
1414 Increment = DAG.getConstant(4, DL, MVT::i32);
1415 Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1416 Loads[2] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1417 LoadNode->getPointerInfo(), MVT::i16, Alignment,
1418 LoadNode->getMemOperand()->getFlags());
1420 Increment = DAG.getConstant(6, DL, MVT::i32);
1421 Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
1422 Loads[3] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
1423 LoadNode->getPointerInfo(), MVT::i16, Alignment,
1424 LoadNode->getMemOperand()->getFlags());
1425 // SHL 16, then OR base+4 and base+6.
1426 Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[3], ShiftAmount);
1427 SDValue Tmp4 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[2]);
1428 // Combine to i64. This could be optimised out later if we can
1429 // affect reg allocation of this code.
1430 Result = DAG.getNode(HexagonISD::COMBINE, DL, MVT::i64, Tmp4, Tmp2);
1431 LoadChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1432 Loads[0].getValue(1), Loads[1].getValue(1),
1433 Loads[2].getValue(1), Loads[3].getValue(1));
1435 // Perform default type expansion.
1436 Result = DAG.getLoad(MVT::i64, DL, Chain, Ptr, LoadNode->getPointerInfo(),
1437 LoadNode->getAlignment(),
1438 LoadNode->getMemOperand()->getFlags());
1439 LoadChain = Result.getValue(1);
1442 llvm_unreachable("Custom lowering unsupported load");
1444 Result = DAG.getNode(ISD::BITCAST, DL, VT, Result);
1445 // Since we pretend to lower a load, we need the original chain
1446 // info attached to the result.
1447 SDValue Ops[] = { Result, LoadChain };
1449 return DAG.getMergeValues(Ops, DL);
1453 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1454 EVT ValTy = Op.getValueType();
1455 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
1456 unsigned Align = CPN->getAlignment();
1457 bool IsPositionIndependent = isPositionIndependent();
1458 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1460 unsigned Offset = 0;
1462 if (CPN->isMachineConstantPoolEntry())
1463 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Align, Offset,
1466 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Align, Offset,
1469 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&
1470 "Inconsistent target flag encountered");
1472 if (IsPositionIndependent)
1473 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
1474 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
1478 HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1479 EVT VT = Op.getValueType();
1480 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1481 if (isPositionIndependent()) {
1482 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
1483 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
1486 SDValue T = DAG.getTargetJumpTable(Idx, VT);
1487 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
1491 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1492 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1493 MachineFunction &MF = DAG.getMachineFunction();
1494 MachineFrameInfo &MFI = MF.getFrameInfo();
1495 MFI.setReturnAddressIsTaken(true);
1497 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1500 EVT VT = Op.getValueType();
1502 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1504 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1505 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1506 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1507 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1508 MachinePointerInfo());
1511 // Return LR, which contains the return address. Mark it an implicit live-in.
1512 unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1513 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1517 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1518 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1519 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1520 MFI.setFrameAddressIsTaken(true);
1522 EVT VT = Op.getValueType();
1524 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1525 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1526 HRI.getFrameRegister(), VT);
1528 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1529 MachinePointerInfo());
1534 HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {
1536 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1540 HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
1542 auto *GAN = cast<GlobalAddressSDNode>(Op);
1543 auto PtrVT = getPointerTy(DAG.getDataLayout());
1544 auto *GV = GAN->getGlobal();
1545 int64_t Offset = GAN->getOffset();
1547 auto &HLOF = *HTM.getObjFileLowering();
1548 Reloc::Model RM = HTM.getRelocationModel();
1550 if (RM == Reloc::Static) {
1551 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1552 const GlobalObject *GO = GV->getBaseObject();
1553 if (GO && HLOF.isGlobalInSmallSection(GO, HTM))
1554 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1555 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1558 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1560 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1561 HexagonII::MO_PCREL);
1562 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1566 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1567 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1568 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1569 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1572 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1574 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1575 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1577 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1579 Reloc::Model RM = HTM.getRelocationModel();
1580 if (RM == Reloc::Static) {
1581 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1582 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1585 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL);
1586 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1590 HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
1592 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1593 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, PtrVT,
1594 HexagonII::MO_PCREL);
1595 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1599 HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
1600 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1601 unsigned char OperandFlags) const {
1602 MachineFunction &MF = DAG.getMachineFunction();
1603 MachineFrameInfo &MFI = MF.getFrameInfo();
1604 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1606 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1607 GA->getValueType(0),
1610 // Create Operands for the call.The Operands should have the following:
1612 // 2. Callee which in this case is the Global address value.
1613 // 3. Registers live into the call.In this case its R0, as we
1614 // have just one argument to be passed.
1616 // Note: The order is important.
1618 const auto &HRI = *Subtarget.getRegisterInfo();
1619 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1620 assert(Mask && "Missing call preserved mask for calling convention");
1621 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1622 DAG.getRegisterMask(Mask), Glue };
1623 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1625 // Inform MFI that function has calls.
1626 MFI.setAdjustsStack(true);
1628 Glue = Chain.getValue(1);
1629 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1633 // Lower using the intial executable model for TLS addresses
1636 HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
1637 SelectionDAG &DAG) const {
1639 int64_t Offset = GA->getOffset();
1640 auto PtrVT = getPointerTy(DAG.getDataLayout());
1642 // Get the thread pointer.
1643 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1645 bool IsPositionIndependent = isPositionIndependent();
1647 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1649 // First generate the TLS symbol address
1650 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1653 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1655 if (IsPositionIndependent) {
1656 // Generate the GOT pointer in case of position independent code
1657 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
1659 // Add the TLS Symbol address to GOT pointer.This gives
1660 // GOT relative relocation for the symbol.
1661 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1664 // Load the offset value for TLS symbol.This offset is relative to
1666 SDValue LoadOffset =
1667 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1669 // Address of the thread local variable is the add of thread
1670 // pointer and the offset of the variable.
1671 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1675 // Lower using the local executable model for TLS addresses
1678 HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
1679 SelectionDAG &DAG) const {
1681 int64_t Offset = GA->getOffset();
1682 auto PtrVT = getPointerTy(DAG.getDataLayout());
1684 // Get the thread pointer.
1685 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1686 // Generate the TLS symbol address
1687 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1688 HexagonII::MO_TPREL);
1689 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1691 // Address of the thread local variable is the add of thread
1692 // pointer and the offset of the variable.
1693 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1697 // Lower using the general dynamic model for TLS addresses
1700 HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1701 SelectionDAG &DAG) const {
1703 int64_t Offset = GA->getOffset();
1704 auto PtrVT = getPointerTy(DAG.getDataLayout());
1706 // First generate the TLS symbol address
1707 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1708 HexagonII::MO_GDGOT);
1710 // Then, generate the GOT pointer
1711 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1713 // Add the TLS symbol and the GOT pointer
1714 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1715 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1717 // Copy over the argument to R0
1719 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
1720 InFlag = Chain.getValue(1);
1723 static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
1724 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
1725 : HexagonII::MO_GDPLT;
1727 return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
1728 Hexagon::R0, Flags);
1732 // Lower TLS addresses.
1734 // For now for dynamic models, we only support the general dynamic model.
1737 HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1738 SelectionDAG &DAG) const {
1739 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1741 switch (HTM.getTLSModel(GA->getGlobal())) {
1742 case TLSModel::GeneralDynamic:
1743 case TLSModel::LocalDynamic:
1744 return LowerToTLSGeneralDynamicModel(GA, DAG);
1745 case TLSModel::InitialExec:
1746 return LowerToTLSInitialExecModel(GA, DAG);
1747 case TLSModel::LocalExec:
1748 return LowerToTLSLocalExecModel(GA, DAG);
1750 llvm_unreachable("Bogus TLS model");
1753 //===----------------------------------------------------------------------===//
1754 // TargetLowering Implementation
1755 //===----------------------------------------------------------------------===//
1757 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1758 const HexagonSubtarget &ST)
1759 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1761 bool IsV4 = !Subtarget.hasV5TOps();
1762 auto &HRI = *Subtarget.getRegisterInfo();
1763 bool UseHVX = Subtarget.useHVXOps();
1764 bool UseHVXSgl = Subtarget.useHVXSglOps();
1765 bool UseHVXDbl = Subtarget.useHVXDblOps();
1767 setPrefLoopAlignment(4);
1768 setPrefFunctionAlignment(4);
1769 setMinFunctionAlignment(2);
1770 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1772 setMaxAtomicSizeInBitsSupported(64);
1773 setMinCmpXchgSizeInBits(32);
1775 if (EnableHexSDNodeSched)
1776 setSchedulingPreference(Sched::VLIW);
1778 setSchedulingPreference(Sched::Source);
1780 // Limits for inline expansion of memcpy/memmove
1781 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL;
1782 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL;
1783 MaxStoresPerMemmove = MaxStoresPerMemmoveCL;
1784 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL;
1785 MaxStoresPerMemset = MaxStoresPerMemsetCL;
1786 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL;
1789 // Set up register classes.
1792 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1793 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1794 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1795 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1796 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1797 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1798 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1799 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1800 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1801 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1802 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1804 if (Subtarget.hasV5TOps()) {
1805 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1806 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1809 if (Subtarget.hasV60TOps()) {
1810 if (Subtarget.useHVXSglOps()) {
1811 addRegisterClass(MVT::v64i8, &Hexagon::VectorRegsRegClass);
1812 addRegisterClass(MVT::v32i16, &Hexagon::VectorRegsRegClass);
1813 addRegisterClass(MVT::v16i32, &Hexagon::VectorRegsRegClass);
1814 addRegisterClass(MVT::v8i64, &Hexagon::VectorRegsRegClass);
1815 addRegisterClass(MVT::v128i8, &Hexagon::VecDblRegsRegClass);
1816 addRegisterClass(MVT::v64i16, &Hexagon::VecDblRegsRegClass);
1817 addRegisterClass(MVT::v32i32, &Hexagon::VecDblRegsRegClass);
1818 addRegisterClass(MVT::v16i64, &Hexagon::VecDblRegsRegClass);
1819 addRegisterClass(MVT::v512i1, &Hexagon::VecPredRegsRegClass);
1820 } else if (Subtarget.useHVXDblOps()) {
1821 addRegisterClass(MVT::v128i8, &Hexagon::VectorRegs128BRegClass);
1822 addRegisterClass(MVT::v64i16, &Hexagon::VectorRegs128BRegClass);
1823 addRegisterClass(MVT::v32i32, &Hexagon::VectorRegs128BRegClass);
1824 addRegisterClass(MVT::v16i64, &Hexagon::VectorRegs128BRegClass);
1825 addRegisterClass(MVT::v256i8, &Hexagon::VecDblRegs128BRegClass);
1826 addRegisterClass(MVT::v128i16, &Hexagon::VecDblRegs128BRegClass);
1827 addRegisterClass(MVT::v64i32, &Hexagon::VecDblRegs128BRegClass);
1828 addRegisterClass(MVT::v32i64, &Hexagon::VecDblRegs128BRegClass);
1829 addRegisterClass(MVT::v1024i1, &Hexagon::VecPredRegs128BRegClass);
1834 // Handling of scalar operations.
1836 // All operations default to "legal", except:
1837 // - indexed loads and stores (pre-/post-incremented),
1838 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1839 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1840 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1841 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1842 // which default to "expand" for at least one type.
1845 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); // Default: expand
1846 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); // Default: expand
1848 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1849 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
1850 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1851 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1852 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1853 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1854 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1855 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1856 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1857 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
1858 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1859 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1861 // Custom legalize GlobalAddress nodes into CONST32.
1862 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1863 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1864 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1866 // Hexagon needs to optimize cases with negative constants.
1867 setOperationAction(ISD::SETCC, MVT::i8, Custom);
1868 setOperationAction(ISD::SETCC, MVT::i16, Custom);
1870 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1871 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1872 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1873 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1875 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1876 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1877 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1880 setMinimumJumpTableEntries(MinimumJumpTables);
1882 setMinimumJumpTableEntries(std::numeric_limits<int>::max());
1883 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1885 // Hexagon has instructions for add/sub with carry. The problem with
1886 // modeling these instructions is that they produce 2 results: Rdd and Px.
1887 // To model the update of Px, we will have to use Defs[p0..p3] which will
1888 // cause any predicate live range to spill. So, we pretend we dont't have
1889 // these instructions.
1890 setOperationAction(ISD::ADDE, MVT::i8, Expand);
1891 setOperationAction(ISD::ADDE, MVT::i16, Expand);
1892 setOperationAction(ISD::ADDE, MVT::i32, Expand);
1893 setOperationAction(ISD::ADDE, MVT::i64, Expand);
1894 setOperationAction(ISD::SUBE, MVT::i8, Expand);
1895 setOperationAction(ISD::SUBE, MVT::i16, Expand);
1896 setOperationAction(ISD::SUBE, MVT::i32, Expand);
1897 setOperationAction(ISD::SUBE, MVT::i64, Expand);
1898 setOperationAction(ISD::ADDC, MVT::i8, Expand);
1899 setOperationAction(ISD::ADDC, MVT::i16, Expand);
1900 setOperationAction(ISD::ADDC, MVT::i32, Expand);
1901 setOperationAction(ISD::ADDC, MVT::i64, Expand);
1902 setOperationAction(ISD::SUBC, MVT::i8, Expand);
1903 setOperationAction(ISD::SUBC, MVT::i16, Expand);
1904 setOperationAction(ISD::SUBC, MVT::i32, Expand);
1905 setOperationAction(ISD::SUBC, MVT::i64, Expand);
1907 // Only add and sub that detect overflow are the saturating ones.
1908 for (MVT VT : MVT::integer_valuetypes()) {
1909 setOperationAction(ISD::UADDO, VT, Expand);
1910 setOperationAction(ISD::SADDO, VT, Expand);
1911 setOperationAction(ISD::USUBO, VT, Expand);
1912 setOperationAction(ISD::SSUBO, VT, Expand);
1915 setOperationAction(ISD::CTLZ, MVT::i8, Promote);
1916 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
1917 setOperationAction(ISD::CTTZ, MVT::i8, Promote);
1918 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
1920 // In V5, popcount can count # of 1s in i64 but returns i32.
1921 // On V4 it will be expanded (set later).
1922 setOperationAction(ISD::CTPOP, MVT::i8, Promote);
1923 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
1924 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
1925 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
1927 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1928 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
1929 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
1930 setOperationAction(ISD::BSWAP, MVT::i64, Legal);
1932 // We custom lower i64 to i64 mul, so that it is not considered as a legal
1933 // operation. There is a pattern that will match i64 mul and transform it
1934 // to a series of instructions.
1935 setOperationAction(ISD::MUL, MVT::i64, Expand);
1937 for (unsigned IntExpOp :
1938 { ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
1939 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
1940 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
1941 ISD::SMUL_LOHI, ISD::UMUL_LOHI }) {
1942 setOperationAction(IntExpOp, MVT::i32, Expand);
1943 setOperationAction(IntExpOp, MVT::i64, Expand);
1946 for (unsigned FPExpOp :
1947 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
1948 ISD::FPOW, ISD::FCOPYSIGN}) {
1949 setOperationAction(FPExpOp, MVT::f32, Expand);
1950 setOperationAction(FPExpOp, MVT::f64, Expand);
1953 // No extending loads from i32.
1954 for (MVT VT : MVT::integer_valuetypes()) {
1955 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1956 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1957 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1959 // Turn FP truncstore into trunc + store.
1960 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1961 // Turn FP extload into load/fpextend.
1962 for (MVT VT : MVT::fp_valuetypes())
1963 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1965 // Expand BR_CC and SELECT_CC for all integer and fp types.
1966 for (MVT VT : MVT::integer_valuetypes()) {
1967 setOperationAction(ISD::BR_CC, VT, Expand);
1968 setOperationAction(ISD::SELECT_CC, VT, Expand);
1970 for (MVT VT : MVT::fp_valuetypes()) {
1971 setOperationAction(ISD::BR_CC, VT, Expand);
1972 setOperationAction(ISD::SELECT_CC, VT, Expand);
1974 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1977 // Handling of vector operations.
1980 // Custom lower v4i16 load only. Let v4i16 store to be
1981 // promoted for now.
1982 promoteLdStType(MVT::v4i8, MVT::i32);
1983 promoteLdStType(MVT::v2i16, MVT::i32);
1984 promoteLdStType(MVT::v8i8, MVT::i64);
1985 promoteLdStType(MVT::v2i32, MVT::i64);
1987 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1988 setOperationAction(ISD::STORE, MVT::v4i16, Promote);
1989 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::i64);
1990 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::i64);
1992 // Set the action for vector operations to "expand", then override it with
1993 // either "custom" or "legal" for specific cases.
1994 static const unsigned VectExpOps[] = {
1995 // Integer arithmetic:
1996 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
1997 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::ADDC,
1998 ISD::SUBC, ISD::SADDO, ISD::UADDO, ISD::SSUBO, ISD::USUBO,
1999 ISD::SMUL_LOHI, ISD::UMUL_LOHI,
2001 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
2002 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,
2003 // Floating point arithmetic/math functions:
2004 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
2005 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
2006 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,
2007 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,
2008 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,
2009 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS,
2011 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool,
2013 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR,
2014 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT,
2015 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
2016 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE
2019 for (MVT VT : MVT::vector_valuetypes()) {
2020 for (unsigned VectExpOp : VectExpOps)
2021 setOperationAction(VectExpOp, VT, Expand);
2023 // Expand all extending loads and truncating stores:
2024 for (MVT TargetVT : MVT::vector_valuetypes()) {
2027 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
2028 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
2029 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
2030 setTruncStoreAction(VT, TargetVT, Expand);
2033 // Normalize all inputs to SELECT to be vectors of i32.
2034 if (VT.getVectorElementType() != MVT::i32) {
2035 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
2036 setOperationAction(ISD::SELECT, VT, Promote);
2037 AddPromotedToType(ISD::SELECT, VT, VT32);
2039 setOperationAction(ISD::SRA, VT, Custom);
2040 setOperationAction(ISD::SHL, VT, Custom);
2041 setOperationAction(ISD::SRL, VT, Custom);
2044 // Types natively supported:
2045 for (MVT NativeVT : {MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v32i1, MVT::v64i1,
2046 MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v1i32,
2047 MVT::v2i32, MVT::v1i64}) {
2048 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom);
2049 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom);
2050 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom);
2051 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom);
2052 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom);
2053 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom);
2055 setOperationAction(ISD::ADD, NativeVT, Legal);
2056 setOperationAction(ISD::SUB, NativeVT, Legal);
2057 setOperationAction(ISD::MUL, NativeVT, Legal);
2058 setOperationAction(ISD::AND, NativeVT, Legal);
2059 setOperationAction(ISD::OR, NativeVT, Legal);
2060 setOperationAction(ISD::XOR, NativeVT, Legal);
2063 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
2064 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom);
2065 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
2066 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
2070 setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i8, Custom);
2071 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i16, Custom);
2072 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i32, Custom);
2073 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i64, Custom);
2074 // We try to generate the vpack{e/o} instructions. If we fail
2075 // we fall back upon ExpandOp.
2076 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
2077 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
2078 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i8, Custom);
2079 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i16, Custom);
2080 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
2081 } else if (UseHVXDbl) {
2082 setOperationAction(ISD::CONCAT_VECTORS, MVT::v256i8, Custom);
2083 setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i16, Custom);
2084 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i32, Custom);
2085 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i64, Custom);
2086 // We try to generate the vpack{e/o} instructions. If we fail
2087 // we fall back upon ExpandOp.
2088 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v128i8, Custom);
2089 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i16, Custom);
2090 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
2091 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v128i8, Custom);
2092 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i16, Custom);
2093 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
2095 llvm_unreachable("Unrecognized HVX mode");
2098 // Subtarget-specific operation actions.
2100 if (Subtarget.hasV5TOps()) {
2101 setOperationAction(ISD::FMA, MVT::f64, Expand);
2102 setOperationAction(ISD::FADD, MVT::f64, Expand);
2103 setOperationAction(ISD::FSUB, MVT::f64, Expand);
2104 setOperationAction(ISD::FMUL, MVT::f64, Expand);
2106 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
2107 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
2109 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
2110 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
2111 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
2112 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
2113 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
2114 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
2115 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
2116 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
2117 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
2118 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
2119 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
2120 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
2122 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
2123 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Expand);
2124 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
2125 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
2126 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
2127 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
2128 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
2129 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
2130 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
2132 setOperationAction(ISD::CTPOP, MVT::i8, Expand);
2133 setOperationAction(ISD::CTPOP, MVT::i16, Expand);
2134 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
2135 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
2137 // Expand these operations for both f32 and f64:
2138 for (unsigned FPExpOpV4 :
2139 {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FABS, ISD::FNEG, ISD::FMA}) {
2140 setOperationAction(FPExpOpV4, MVT::f32, Expand);
2141 setOperationAction(FPExpOpV4, MVT::f64, Expand);
2144 for (ISD::CondCode FPExpCCV4 :
2145 {ISD::SETOEQ, ISD::SETOGT, ISD::SETOLT, ISD::SETOGE, ISD::SETOLE,
2146 ISD::SETUO, ISD::SETO}) {
2147 setCondCodeAction(FPExpCCV4, MVT::f32, Expand);
2148 setCondCodeAction(FPExpCCV4, MVT::f64, Expand);
2152 // Handling of indexed loads/stores: default is "expand".
2154 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
2155 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
2156 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
2160 for (MVT VT : {MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
2161 MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64}) {
2162 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
2163 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
2165 } else if (UseHVXDbl) {
2166 for (MVT VT : {MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64,
2167 MVT::v256i8, MVT::v128i16, MVT::v64i32, MVT::v32i64}) {
2168 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
2169 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
2173 computeRegisterProperties(&HRI);
2176 // Library calls for unsupported operations
2178 bool FastMath = EnableFastMath;
2180 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
2181 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
2182 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
2183 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
2184 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
2185 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
2186 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
2187 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
2189 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
2190 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
2191 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
2192 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
2193 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
2194 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
2197 // Handle single-precision floating point operations on V4.
2199 setLibcallName(RTLIB::ADD_F32, "__hexagon_fast_addsf3");
2200 setLibcallName(RTLIB::SUB_F32, "__hexagon_fast_subsf3");
2201 setLibcallName(RTLIB::MUL_F32, "__hexagon_fast_mulsf3");
2202 setLibcallName(RTLIB::OGT_F32, "__hexagon_fast_gtsf2");
2203 setLibcallName(RTLIB::OLT_F32, "__hexagon_fast_ltsf2");
2204 // Double-precision compares.
2205 setLibcallName(RTLIB::OGT_F64, "__hexagon_fast_gtdf2");
2206 setLibcallName(RTLIB::OLT_F64, "__hexagon_fast_ltdf2");
2208 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
2209 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
2210 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
2211 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
2212 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
2213 // Double-precision compares.
2214 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
2215 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
2219 // This is the only fast library function for sqrtd.
2221 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
2223 // Prefix is: nothing for "slow-math",
2224 // "fast2_" for V4 fast-math and V5+ fast-math double-precision
2225 // (actually, keep fast-math and fast-math2 separate for now)
2227 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
2228 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
2229 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
2230 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
2231 // Calling __hexagon_fast2_divsf3 with fast-math on V5 (ok).
2232 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
2234 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
2235 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
2236 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
2237 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
2238 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
2241 if (Subtarget.hasV5TOps()) {
2243 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
2245 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
2248 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
2249 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
2250 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
2251 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
2252 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
2253 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
2254 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
2255 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
2256 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
2257 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
2258 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
2259 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
2260 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
2261 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
2262 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
2263 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
2264 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
2265 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
2266 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
2267 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
2268 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
2269 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
2270 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
2271 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
2272 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
2273 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
2274 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
2275 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
2276 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
2277 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
2280 // These cause problems when the shift amount is non-constant.
2281 setLibcallName(RTLIB::SHL_I128, nullptr);
2282 setLibcallName(RTLIB::SRL_I128, nullptr);
2283 setLibcallName(RTLIB::SRA_I128, nullptr);
2286 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
2287 switch ((HexagonISD::NodeType)Opcode) {
2288 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
2289 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
2290 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
2291 case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
2292 case HexagonISD::CALL: return "HexagonISD::CALL";
2293 case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
2294 case HexagonISD::CALLR: return "HexagonISD::CALLR";
2295 case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
2296 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
2297 case HexagonISD::CONST32: return "HexagonISD::CONST32";
2298 case HexagonISD::CP: return "HexagonISD::CP";
2299 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
2300 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
2301 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
2302 case HexagonISD::EXTRACTURP: return "HexagonISD::EXTRACTURP";
2303 case HexagonISD::INSERT: return "HexagonISD::INSERT";
2304 case HexagonISD::INSERTRP: return "HexagonISD::INSERTRP";
2305 case HexagonISD::JT: return "HexagonISD::JT";
2306 case HexagonISD::PACKHL: return "HexagonISD::PACKHL";
2307 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
2308 case HexagonISD::SHUFFEB: return "HexagonISD::SHUFFEB";
2309 case HexagonISD::SHUFFEH: return "HexagonISD::SHUFFEH";
2310 case HexagonISD::SHUFFOB: return "HexagonISD::SHUFFOB";
2311 case HexagonISD::SHUFFOH: return "HexagonISD::SHUFFOH";
2312 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
2313 case HexagonISD::VCMPBEQ: return "HexagonISD::VCMPBEQ";
2314 case HexagonISD::VCMPBGT: return "HexagonISD::VCMPBGT";
2315 case HexagonISD::VCMPBGTU: return "HexagonISD::VCMPBGTU";
2316 case HexagonISD::VCMPHEQ: return "HexagonISD::VCMPHEQ";
2317 case HexagonISD::VCMPHGT: return "HexagonISD::VCMPHGT";
2318 case HexagonISD::VCMPHGTU: return "HexagonISD::VCMPHGTU";
2319 case HexagonISD::VCMPWEQ: return "HexagonISD::VCMPWEQ";
2320 case HexagonISD::VCMPWGT: return "HexagonISD::VCMPWGT";
2321 case HexagonISD::VCMPWGTU: return "HexagonISD::VCMPWGTU";
2322 case HexagonISD::VCOMBINE: return "HexagonISD::VCOMBINE";
2323 case HexagonISD::VPACK: return "HexagonISD::VPACK";
2324 case HexagonISD::VSHLH: return "HexagonISD::VSHLH";
2325 case HexagonISD::VSHLW: return "HexagonISD::VSHLW";
2326 case HexagonISD::VSPLATB: return "HexagonISD::VSPLTB";
2327 case HexagonISD::VSPLATH: return "HexagonISD::VSPLATH";
2328 case HexagonISD::VSRAH: return "HexagonISD::VSRAH";
2329 case HexagonISD::VSRAW: return "HexagonISD::VSRAW";
2330 case HexagonISD::VSRLH: return "HexagonISD::VSRLH";
2331 case HexagonISD::VSRLW: return "HexagonISD::VSRLW";
2332 case HexagonISD::VSXTBH: return "HexagonISD::VSXTBH";
2333 case HexagonISD::VSXTBW: return "HexagonISD::VSXTBW";
2334 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
2335 case HexagonISD::OP_END: break;
2340 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
2341 EVT MTy1 = EVT::getEVT(Ty1);
2342 EVT MTy2 = EVT::getEVT(Ty2);
2343 if (!MTy1.isSimple() || !MTy2.isSimple())
2345 return (MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32);
2348 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
2349 if (!VT1.isSimple() || !VT2.isSimple())
2351 return (VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32);
2354 bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
2355 return isOperationLegalOrCustom(ISD::FMA, VT);
2358 // Should we expand the build vector with shuffles?
2359 bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
2360 unsigned DefinedValues) const {
2361 // Hexagon vector shuffle operates on element sizes of bytes or halfwords
2362 EVT EltVT = VT.getVectorElementType();
2363 int EltBits = EltVT.getSizeInBits();
2364 if ((EltBits != 8) && (EltBits != 16))
2367 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
2370 static StridedLoadKind isStridedLoad(const ArrayRef<int> &Mask) {
2371 int even_start = -2;
2373 size_t mask_len = Mask.size();
2374 for (auto idx : Mask) {
2375 if ((idx - even_start) == 2)
2380 if (even_start == (int)(mask_len * 2) - 2)
2381 return StridedLoadKind::Even;
2382 for (auto idx : Mask) {
2383 if ((idx - odd_start) == 2)
2388 if (odd_start == (int)(mask_len * 2) - 1)
2389 return StridedLoadKind::Odd;
2391 return StridedLoadKind::NoPattern;
2394 bool HexagonTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
2396 if (Subtarget.useHVXOps())
2397 return isStridedLoad(Mask) != StridedLoadKind::NoPattern;
2401 // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2402 // to select data from, V3 is the permutation.
2404 HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
2406 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2407 SDValue V1 = Op.getOperand(0);
2408 SDValue V2 = Op.getOperand(1);
2410 EVT VT = Op.getValueType();
2411 bool UseHVX = Subtarget.useHVXOps();
2416 if (SVN->isSplat()) {
2417 int Lane = SVN->getSplatIndex();
2418 if (Lane == -1) Lane = 0;
2420 // Test if V1 is a SCALAR_TO_VECTOR.
2421 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR)
2422 return createSplat(DAG, dl, VT, V1.getOperand(0));
2424 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
2425 // (and probably will turn into a SCALAR_TO_VECTOR once legalization
2427 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
2428 !isa<ConstantSDNode>(V1.getOperand(0))) {
2429 bool IsScalarToVector = true;
2430 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) {
2431 if (!V1.getOperand(i).isUndef()) {
2432 IsScalarToVector = false;
2436 if (IsScalarToVector)
2437 return createSplat(DAG, dl, VT, V1.getOperand(0));
2439 return createSplat(DAG, dl, VT, DAG.getConstant(Lane, dl, MVT::i32));
2443 ArrayRef<int> Mask = SVN->getMask();
2444 size_t MaskLen = Mask.size();
2445 int ElemSizeInBits = VT.getScalarSizeInBits();
2446 if ((Subtarget.useHVXSglOps() && (ElemSizeInBits * MaskLen) == 64 * 8) ||
2447 (Subtarget.useHVXDblOps() && (ElemSizeInBits * MaskLen) == 128 * 8)) {
2448 // Return 1 for odd and 2 of even
2449 StridedLoadKind Pattern = isStridedLoad(Mask);
2451 if (Pattern == StridedLoadKind::NoPattern)
2454 SDValue Vec0 = Op.getOperand(0);
2455 SDValue Vec1 = Op.getOperand(1);
2456 SDValue StridePattern = DAG.getConstant(Pattern, dl, MVT::i32);
2457 SDValue Ops[] = { Vec1, Vec0, StridePattern };
2458 return DAG.getNode(HexagonISD::VPACK, dl, VT, Ops);
2460 // We used to assert in the "else" part here, but that is bad for Halide
2461 // Halide creates intermediate double registers by interleaving two
2462 // concatenated vector registers. The interleaving requires vector_shuffle
2463 // nodes and we shouldn't barf on a double register result of a
2464 // vector_shuffle because it is most likely an intermediate result.
2466 // FIXME: We need to support more general vector shuffles. See
2467 // below the comment from the ARM backend that deals in the general
2468 // case with the vector shuffles. For now, let expand handle these.
2471 // If the shuffle is not directly supported and it has 4 elements, use
2472 // the PerfectShuffle-generated table to synthesize it from other shuffles.
2475 // If BUILD_VECTOR has same base element repeated several times,
2477 static bool isCommonSplatElement(BuildVectorSDNode *BVN) {
2478 unsigned NElts = BVN->getNumOperands();
2479 SDValue V0 = BVN->getOperand(0);
2481 for (unsigned i = 1, e = NElts; i != e; ++i) {
2482 if (BVN->getOperand(i) != V0)
2488 // Lower a vector shift. Try to convert
2489 // <VT> = SHL/SRA/SRL <VT> by <VT> to Hexagon specific
2490 // <VT> = SHL/SRA/SRL <VT> by <IT/i32>.
2492 HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
2493 BuildVectorSDNode *BVN = nullptr;
2494 SDValue V1 = Op.getOperand(0);
2495 SDValue V2 = Op.getOperand(1);
2498 EVT VT = Op.getValueType();
2500 if ((BVN = dyn_cast<BuildVectorSDNode>(V1.getNode())) &&
2501 isCommonSplatElement(BVN))
2503 else if ((BVN = dyn_cast<BuildVectorSDNode>(V2.getNode())) &&
2504 isCommonSplatElement(BVN))
2509 SDValue CommonSplat = BVN->getOperand(0);
2512 if (VT.getSimpleVT() == MVT::v4i16) {
2513 switch (Op.getOpcode()) {
2515 Result = DAG.getNode(HexagonISD::VSRAH, dl, VT, V3, CommonSplat);
2518 Result = DAG.getNode(HexagonISD::VSHLH, dl, VT, V3, CommonSplat);
2521 Result = DAG.getNode(HexagonISD::VSRLH, dl, VT, V3, CommonSplat);
2526 } else if (VT.getSimpleVT() == MVT::v2i32) {
2527 switch (Op.getOpcode()) {
2529 Result = DAG.getNode(HexagonISD::VSRAW, dl, VT, V3, CommonSplat);
2532 Result = DAG.getNode(HexagonISD::VSHLW, dl, VT, V3, CommonSplat);
2535 Result = DAG.getNode(HexagonISD::VSRLW, dl, VT, V3, CommonSplat);
2544 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
2548 HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2549 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
2551 EVT VT = Op.getValueType();
2553 unsigned Size = VT.getSizeInBits();
2555 // Only handle vectors of 64 bits or shorter.
2559 APInt APSplatBits, APSplatUndef;
2560 unsigned SplatBitSize;
2562 unsigned NElts = BVN->getNumOperands();
2564 // Try to generate a SPLAT instruction.
2565 if ((VT.getSimpleVT() == MVT::v4i8 || VT.getSimpleVT() == MVT::v4i16) &&
2566 (BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
2567 HasAnyUndefs, 0, true) && SplatBitSize <= 16)) {
2568 unsigned SplatBits = APSplatBits.getZExtValue();
2569 int32_t SextVal = ((int32_t) (SplatBits << (32 - SplatBitSize)) >>
2570 (32 - SplatBitSize));
2571 return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, dl, MVT::i32));
2574 // Try to generate COMBINE to build v2i32 vectors.
2575 if (VT.getSimpleVT() == MVT::v2i32) {
2576 SDValue V0 = BVN->getOperand(0);
2577 SDValue V1 = BVN->getOperand(1);
2580 V0 = DAG.getConstant(0, dl, MVT::i32);
2582 V1 = DAG.getConstant(0, dl, MVT::i32);
2584 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(V0);
2585 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(V1);
2586 // If the element isn't a constant, it is in a register:
2587 // generate a COMBINE Register Register instruction.
2589 return DAG.getNode(HexagonISD::COMBINE, dl, VT, V1, V0);
2591 // If one of the operands is an 8 bit integer constant, generate
2592 // a COMBINE Immediate Immediate instruction.
2593 if (isInt<8>(C0->getSExtValue()) ||
2594 isInt<8>(C1->getSExtValue()))
2595 return DAG.getNode(HexagonISD::COMBINE, dl, VT, V1, V0);
2598 // Try to generate a S2_packhl to build v2i16 vectors.
2599 if (VT.getSimpleVT() == MVT::v2i16) {
2600 for (unsigned i = 0, e = NElts; i != e; ++i) {
2601 if (BVN->getOperand(i).isUndef())
2603 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(BVN->getOperand(i));
2604 // If the element isn't a constant, it is in a register:
2605 // generate a S2_packhl instruction.
2607 SDValue pack = DAG.getNode(HexagonISD::PACKHL, dl, MVT::v4i16,
2608 BVN->getOperand(1), BVN->getOperand(0));
2610 return DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::v2i16,
2616 // In the general case, generate a CONST32 or a CONST64 for constant vectors,
2617 // and insert_vector_elt for all the other cases.
2619 unsigned EltSize = Size / NElts;
2621 uint64_t Mask = ~uint64_t(0ULL) >> (64 - EltSize);
2622 bool HasNonConstantElements = false;
2624 for (unsigned i = 0, e = NElts; i != e; ++i) {
2625 // LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon's
2626 // combine, const64, etc. are Big Endian.
2627 unsigned OpIdx = NElts - i - 1;
2628 SDValue Operand = BVN->getOperand(OpIdx);
2629 if (Operand.isUndef())
2633 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Operand))
2634 Val = Cst->getSExtValue();
2636 HasNonConstantElements = true;
2639 Res = (Res << EltSize) | Val;
2646 ConstVal = DAG.getConstant(Res, dl, MVT::i64);
2648 ConstVal = DAG.getConstant(Res, dl, MVT::i32);
2650 // When there are non constant operands, add them with INSERT_VECTOR_ELT to
2651 // ConstVal, the constant part of the vector.
2652 if (HasNonConstantElements) {
2653 EVT EltVT = VT.getVectorElementType();
2654 SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), dl, MVT::i64);
2655 SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2656 DAG.getConstant(32, dl, MVT::i64));
2658 for (unsigned i = 0, e = NElts; i != e; ++i) {
2659 // LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon
2661 unsigned OpIdx = NElts - i - 1;
2662 SDValue Operand = BVN->getOperand(OpIdx);
2663 if (isa<ConstantSDNode>(Operand))
2664 // This operand is already in ConstVal.
2667 if (VT.getSizeInBits() == 64 &&
2668 Operand.getValueSizeInBits() == 32) {
2669 SDValue C = DAG.getConstant(0, dl, MVT::i32);
2670 Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
2673 SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
2674 SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
2675 SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2676 const SDValue Ops[] = {ConstVal, Operand, Combined};
2678 if (VT.getSizeInBits() == 32)
2679 ConstVal = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, Ops);
2681 ConstVal = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, Ops);
2685 return DAG.getNode(ISD::BITCAST, dl, VT, ConstVal);
2689 HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2690 SelectionDAG &DAG) const {
2692 bool UseHVX = Subtarget.useHVXOps();
2693 EVT VT = Op.getValueType();
2694 unsigned NElts = Op.getNumOperands();
2695 SDValue Vec0 = Op.getOperand(0);
2696 EVT VecVT = Vec0.getValueType();
2697 unsigned Width = VecVT.getSizeInBits();
2700 MVT ST = VecVT.getSimpleVT();
2701 // We are trying to concat two v2i16 to a single v4i16, or two v4i8
2702 // into a single v8i8.
2703 if (ST == MVT::v2i16 || ST == MVT::v4i8)
2704 return DAG.getNode(HexagonISD::COMBINE, dl, VT, Op.getOperand(1), Vec0);
2707 assert((Width == 64*8 && Subtarget.useHVXSglOps()) ||
2708 (Width == 128*8 && Subtarget.useHVXDblOps()));
2709 SDValue Vec1 = Op.getOperand(1);
2710 MVT OpTy = Subtarget.useHVXSglOps() ? MVT::v16i32 : MVT::v32i32;
2711 MVT ReTy = Subtarget.useHVXSglOps() ? MVT::v32i32 : MVT::v64i32;
2712 SDValue B0 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec0);
2713 SDValue B1 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec1);
2714 SDValue VC = DAG.getNode(HexagonISD::VCOMBINE, dl, ReTy, B1, B0);
2715 return DAG.getNode(ISD::BITCAST, dl, VT, VC);
2719 if (VT.getSizeInBits() != 32 && VT.getSizeInBits() != 64)
2722 SDValue C0 = DAG.getConstant(0, dl, MVT::i64);
2723 SDValue C32 = DAG.getConstant(32, dl, MVT::i64);
2724 SDValue W = DAG.getConstant(Width, dl, MVT::i64);
2725 // Create the "width" part of the argument to insert_rp/insertp_rp.
2726 SDValue S = DAG.getNode(ISD::SHL, dl, MVT::i64, W, C32);
2729 for (unsigned i = 0, e = NElts; i != e; ++i) {
2730 unsigned N = NElts-i-1;
2731 SDValue OpN = Op.getOperand(N);
2733 if (VT.getSizeInBits() == 64 && OpN.getValueSizeInBits() == 32) {
2734 SDValue C = DAG.getConstant(0, dl, MVT::i32);
2735 OpN = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, OpN);
2737 SDValue Idx = DAG.getConstant(N, dl, MVT::i64);
2738 SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, W);
2739 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, S, Offset);
2740 if (VT.getSizeInBits() == 32)
2741 V = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, {V, OpN, Or});
2742 else if (VT.getSizeInBits() == 64)
2743 V = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, {V, OpN, Or});
2748 return DAG.getNode(ISD::BITCAST, dl, VT, V);
2752 HexagonTargetLowering::LowerEXTRACT_SUBVECTOR_HVX(SDValue Op,
2753 SelectionDAG &DAG) const {
2754 EVT VT = Op.getOperand(0).getValueType();
2756 bool UseHVX = Subtarget.useHVXOps();
2757 bool UseHVXSgl = Subtarget.useHVXSglOps();
2760 if (!VT.isVector() || !UseHVX)
2763 EVT ResVT = Op.getValueType();
2764 unsigned ResSize = ResVT.getSizeInBits();
2765 unsigned VectorSizeInBits = UseHVXSgl ? (64 * 8) : (128 * 8);
2766 unsigned OpSize = VT.getSizeInBits();
2768 // We deal only with cases where the result is the vector size
2769 // and the vector operand is a double register.
2770 if (!(ResVT.isByteSized() && ResSize == VectorSizeInBits) ||
2771 !(VT.isByteSized() && OpSize == 2 * VectorSizeInBits))
2774 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2777 unsigned Val = Cst->getZExtValue();
2779 // These two will get lowered to an appropriate EXTRACT_SUBREG in ISel.
2781 SDValue Vec = Op.getOperand(0);
2782 return DAG.getTargetExtractSubreg(Hexagon::vsub_lo, dl, ResVT, Vec);
2785 if (ResVT.getVectorNumElements() == Val) {
2786 SDValue Vec = Op.getOperand(0);
2787 return DAG.getTargetExtractSubreg(Hexagon::vsub_hi, dl, ResVT, Vec);
2794 HexagonTargetLowering::LowerEXTRACT_VECTOR(SDValue Op,
2795 SelectionDAG &DAG) const {
2796 // If we are dealing with EXTRACT_SUBVECTOR on a HVX type, we may
2797 // be able to simplify it to an EXTRACT_SUBREG.
2798 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && Subtarget.useHVXOps() &&
2799 isHvxVectorType(Op.getValueType().getSimpleVT()))
2800 return LowerEXTRACT_SUBVECTOR_HVX(Op, DAG);
2802 EVT VT = Op.getValueType();
2803 int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
2805 SDValue Idx = Op.getOperand(1);
2806 SDValue Vec = Op.getOperand(0);
2807 EVT VecVT = Vec.getValueType();
2808 EVT EltVT = VecVT.getVectorElementType();
2809 int EltSize = EltVT.getSizeInBits();
2810 SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT ?
2811 EltSize : VTN * EltSize, dl, MVT::i64);
2813 // Constant element number.
2814 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Idx)) {
2815 uint64_t X = CI->getZExtValue();
2816 SDValue Offset = DAG.getConstant(X * EltSize, dl, MVT::i32);
2817 const SDValue Ops[] = {Vec, Width, Offset};
2819 ConstantSDNode *CW = dyn_cast<ConstantSDNode>(Width);
2820 assert(CW && "Non constant width in LowerEXTRACT_VECTOR");
2823 MVT SVT = VecVT.getSimpleVT();
2824 uint64_t W = CW->getZExtValue();
2827 // Translate this node into EXTRACT_SUBREG.
2828 unsigned Subreg = (X == 0) ? Hexagon::isub_lo : 0;
2831 Subreg = Hexagon::isub_lo;
2832 else if (SVT == MVT::v2i32 && X == 1)
2833 Subreg = Hexagon::isub_hi;
2834 else if (SVT == MVT::v4i16 && X == 2)
2835 Subreg = Hexagon::isub_hi;
2836 else if (SVT == MVT::v8i8 && X == 4)
2837 Subreg = Hexagon::isub_hi;
2839 llvm_unreachable("Bad offset");
2840 N = DAG.getTargetExtractSubreg(Subreg, dl, MVT::i32, Vec);
2842 } else if (SVT.getSizeInBits() == 32) {
2843 N = DAG.getNode(HexagonISD::EXTRACTU, dl, MVT::i32, Ops);
2844 } else if (SVT.getSizeInBits() == 64) {
2845 N = DAG.getNode(HexagonISD::EXTRACTU, dl, MVT::i64, Ops);
2846 if (VT.getSizeInBits() == 32)
2847 N = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, N);
2851 return DAG.getNode(ISD::BITCAST, dl, VT, N);
2854 // Variable element number.
2855 SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
2856 DAG.getConstant(EltSize, dl, MVT::i32));
2857 SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2858 DAG.getConstant(32, dl, MVT::i64));
2859 SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2861 const SDValue Ops[] = {Vec, Combined};
2864 if (VecVT.getSizeInBits() == 32) {
2865 N = DAG.getNode(HexagonISD::EXTRACTURP, dl, MVT::i32, Ops);
2867 N = DAG.getNode(HexagonISD::EXTRACTURP, dl, MVT::i64, Ops);
2868 if (VT.getSizeInBits() == 32)
2869 N = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, N);
2871 return DAG.getNode(ISD::BITCAST, dl, VT, N);
2875 HexagonTargetLowering::LowerINSERT_VECTOR(SDValue Op,
2876 SelectionDAG &DAG) const {
2877 EVT VT = Op.getValueType();
2878 int VTN = VT.isVector() ? VT.getVectorNumElements() : 1;
2880 SDValue Vec = Op.getOperand(0);
2881 SDValue Val = Op.getOperand(1);
2882 SDValue Idx = Op.getOperand(2);
2883 EVT VecVT = Vec.getValueType();
2884 EVT EltVT = VecVT.getVectorElementType();
2885 int EltSize = EltVT.getSizeInBits();
2886 SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::INSERT_VECTOR_ELT ?
2887 EltSize : VTN * EltSize, dl, MVT::i64);
2889 if (ConstantSDNode *C = cast<ConstantSDNode>(Idx)) {
2890 SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, dl, MVT::i32);
2891 const SDValue Ops[] = {Vec, Val, Width, Offset};
2894 if (VT.getSizeInBits() == 32)
2895 N = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, Ops);
2896 else if (VT.getSizeInBits() == 64)
2897 N = DAG.getNode(HexagonISD::INSERT, dl, MVT::i64, Ops);
2901 return DAG.getNode(ISD::BITCAST, dl, VT, N);
2904 // Variable element number.
2905 SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
2906 DAG.getConstant(EltSize, dl, MVT::i32));
2907 SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
2908 DAG.getConstant(32, dl, MVT::i64));
2909 SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
2911 if (VT.getSizeInBits() == 64 && Val.getValueSizeInBits() == 32) {
2912 SDValue C = DAG.getConstant(0, dl, MVT::i32);
2913 Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
2916 const SDValue Ops[] = {Vec, Val, Combined};
2919 if (VT.getSizeInBits() == 32)
2920 N = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i32, Ops);
2921 else if (VT.getSizeInBits() == 64)
2922 N = DAG.getNode(HexagonISD::INSERTRP, dl, MVT::i64, Ops);
2926 return DAG.getNode(ISD::BITCAST, dl, VT, N);
2930 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
2931 // Assuming the caller does not have either a signext or zeroext modifier, and
2932 // only one value is accepted, any reasonable truncation is allowed.
2933 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2936 // FIXME: in principle up to 64-bit could be made safe, but it would be very
2937 // fragile at the moment: any support for multiple value returns would be
2938 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2939 return Ty1->getPrimitiveSizeInBits() <= 32;
2943 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
2944 SDValue Chain = Op.getOperand(0);
2945 SDValue Offset = Op.getOperand(1);
2946 SDValue Handler = Op.getOperand(2);
2948 auto PtrVT = getPointerTy(DAG.getDataLayout());
2950 // Mark function as containing a call to EH_RETURN.
2951 HexagonMachineFunctionInfo *FuncInfo =
2952 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
2953 FuncInfo->setHasEHReturn();
2955 unsigned OffsetReg = Hexagon::R28;
2958 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
2959 DAG.getIntPtrConstant(4, dl));
2960 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
2961 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
2963 // Not needed we already use it as explict input to EH_RETURN.
2964 // MF.getRegInfo().addLiveOut(OffsetReg);
2966 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
2970 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2971 unsigned Opc = Op.getOpcode();
2975 Op.getNode()->dumpr(&DAG);
2976 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
2977 errs() << "Check for a non-legal type in this operation\n";
2979 llvm_unreachable("Should not custom lower this!");
2980 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
2981 case ISD::INSERT_SUBVECTOR: return LowerINSERT_VECTOR(Op, DAG);
2982 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR(Op, DAG);
2983 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_VECTOR(Op, DAG);
2984 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR(Op, DAG);
2985 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2986 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2989 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
2990 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2991 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2992 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
2993 // Frame & Return address. Currently unimplemented.
2994 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
2995 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
2996 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2997 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
2998 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
2999 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3000 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3001 case ISD::VASTART: return LowerVASTART(Op, DAG);
3002 // Custom lower some vector loads.
3003 case ISD::LOAD: return LowerLOAD(Op, DAG);
3004 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3005 case ISD::SETCC: return LowerSETCC(Op, DAG);
3006 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3007 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3008 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3009 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
3010 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3011 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
3015 /// Returns relocation base for the given PIC jumptable.
3017 HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3018 SelectionDAG &DAG) const {
3019 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3020 EVT VT = Table.getValueType();
3021 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
3022 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
3025 //===----------------------------------------------------------------------===//
3026 // Inline Assembly Support
3027 //===----------------------------------------------------------------------===//
3029 TargetLowering::ConstraintType
3030 HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
3031 if (Constraint.size() == 1) {
3032 switch (Constraint[0]) {
3035 if (Subtarget.useHVXOps())
3040 return TargetLowering::getConstraintType(Constraint);
3043 std::pair<unsigned, const TargetRegisterClass*>
3044 HexagonTargetLowering::getRegForInlineAsmConstraint(
3045 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3046 bool UseHVX = Subtarget.useHVXOps(), UseHVXDbl = Subtarget.useHVXDblOps();
3048 if (Constraint.size() == 1) {
3049 switch (Constraint[0]) {
3051 switch (VT.SimpleTy) {
3053 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
3059 return std::make_pair(0U, &Hexagon::IntRegsRegClass);
3062 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
3065 switch (VT.getSizeInBits()) {
3067 llvm_unreachable("getRegForInlineAsmConstraint Unhandled vector size");
3069 return std::make_pair(0U, &Hexagon::VecPredRegsRegClass);
3071 return std::make_pair(0U, &Hexagon::VecPredRegs128BRegClass);
3074 switch (VT.getSizeInBits()) {
3076 llvm_unreachable("getRegForInlineAsmConstraint Unhandled vector size");
3078 return std::make_pair(0U, &Hexagon::VectorRegsRegClass);
3080 if (Subtarget.hasV60TOps() && UseHVX && UseHVXDbl)
3081 return std::make_pair(0U, &Hexagon::VectorRegs128BRegClass);
3082 return std::make_pair(0U, &Hexagon::VecDblRegsRegClass);
3084 return std::make_pair(0U, &Hexagon::VecDblRegs128BRegClass);
3088 llvm_unreachable("Unknown asm register class");
3092 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3095 /// isFPImmLegal - Returns true if the target can instruction select the
3096 /// specified FP immediate natively. If false, the legalizer will
3097 /// materialize the FP immediate as a load from a constant pool.
3098 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3099 return Subtarget.hasV5TOps();
3102 /// isLegalAddressingMode - Return true if the addressing mode represented by
3103 /// AM is legal for this target, for a load/store of the specified type.
3104 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
3105 const AddrMode &AM, Type *Ty,
3106 unsigned AS) const {
3107 if (Ty->isSized()) {
3108 // When LSR detects uses of the same base address to access different
3109 // types (e.g. unions), it will assume a conservative type for these
3111 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3112 // The type Ty passed here would then be "void". Skip the alignment
3113 // checks, but do not return false right away, since that confuses
3114 // LSR into crashing.
3115 unsigned A = DL.getABITypeAlignment(Ty);
3116 // The base offset must be a multiple of the alignment.
3117 if ((AM.BaseOffs % A) != 0)
3119 // The shifted offset must fit in 11 bits.
3120 if (!isInt<11>(AM.BaseOffs >> Log2_32(A)))
3124 // No global is ever allowed as a base.
3128 int Scale = AM.Scale;
3132 case 0: // No scale reg, "r+i", "r", or just "i".
3134 default: // No scaled addressing mode.
3140 /// Return true if folding a constant offset with the given GlobalAddress is
3141 /// legal. It is frequently not legal in PIC relocation models.
3142 bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
3144 return HTM.getRelocationModel() == Reloc::Static;
3147 /// isLegalICmpImmediate - Return true if the specified immediate is legal
3148 /// icmp immediate, that is the target has icmp instructions which can compare
3149 /// a register against the immediate without having to materialize the
3150 /// immediate into a register.
3151 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3152 return Imm >= -512 && Imm <= 511;
3155 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3156 /// for tail call optimization. Targets which want to do tail call
3157 /// optimization should implement this function.
3158 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
3160 CallingConv::ID CalleeCC,
3162 bool isCalleeStructRet,
3163 bool isCallerStructRet,
3164 const SmallVectorImpl<ISD::OutputArg> &Outs,
3165 const SmallVectorImpl<SDValue> &OutVals,
3166 const SmallVectorImpl<ISD::InputArg> &Ins,
3167 SelectionDAG& DAG) const {
3168 const Function *CallerF = DAG.getMachineFunction().getFunction();
3169 CallingConv::ID CallerCC = CallerF->getCallingConv();
3170 bool CCMatch = CallerCC == CalleeCC;
3172 // ***************************************************************************
3173 // Look for obvious safe cases to perform tail call optimization that do not
3174 // require ABI changes.
3175 // ***************************************************************************
3177 // If this is a tail call via a function pointer, then don't do it!
3178 if (!isa<GlobalAddressSDNode>(Callee) &&
3179 !isa<ExternalSymbolSDNode>(Callee)) {
3183 // Do not optimize if the calling conventions do not match and the conventions
3184 // used are not C or Fast.
3186 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3187 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3188 // If R & E, then ok.
3193 // Do not tail call optimize vararg calls.
3197 // Also avoid tail call optimization if either caller or callee uses struct
3198 // return semantics.
3199 if (isCalleeStructRet || isCallerStructRet)
3202 // In addition to the cases above, we also disable Tail Call Optimization if
3203 // the calling convention code that at least one outgoing argument needs to
3204 // go on the stack. We cannot check that here because at this point that
3205 // information is not available.
3209 /// Returns the target specific optimal type for load and store operations as
3210 /// a result of memset, memcpy, and memmove lowering.
3212 /// If DstAlign is zero that means it's safe to destination alignment can
3213 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3214 /// a need to check it against alignment requirement, probably because the
3215 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
3216 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3217 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3218 /// does not need to be loaded. It returns EVT::Other if the type should be
3219 /// determined using generic target-independent logic.
3220 EVT HexagonTargetLowering::getOptimalMemOpType(uint64_t Size,
3221 unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset,
3222 bool MemcpyStrSrc, MachineFunction &MF) const {
3224 auto Aligned = [](unsigned GivenA, unsigned MinA) -> bool {
3225 return (GivenA % MinA) == 0;
3228 if (Size >= 8 && Aligned(DstAlign, 8) && (IsMemset || Aligned(SrcAlign, 8)))
3230 if (Size >= 4 && Aligned(DstAlign, 4) && (IsMemset || Aligned(SrcAlign, 4)))
3232 if (Size >= 2 && Aligned(DstAlign, 2) && (IsMemset || Aligned(SrcAlign, 2)))
3238 bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
3239 unsigned AS, unsigned Align, bool *Fast) const {
3243 switch (VT.getSimpleVT().SimpleTy) {
3263 std::pair<const TargetRegisterClass*, uint8_t>
3264 HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3266 const TargetRegisterClass *RRC = nullptr;
3269 switch (VT.SimpleTy) {
3271 return TargetLowering::findRepresentativeClass(TRI, VT);
3276 RRC = &Hexagon::VectorRegsRegClass;
3282 if (Subtarget.hasV60TOps() && Subtarget.useHVXOps() &&
3283 Subtarget.useHVXDblOps())
3284 RRC = &Hexagon::VectorRegs128BRegClass;
3286 RRC = &Hexagon::VecDblRegsRegClass;
3292 RRC = &Hexagon::VecDblRegs128BRegClass;
3295 return std::make_pair(RRC, Cost);
3298 Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
3299 AtomicOrdering Ord) const {
3300 BasicBlock *BB = Builder.GetInsertBlock();
3301 Module *M = BB->getParent()->getParent();
3302 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
3303 unsigned SZ = Ty->getPrimitiveSizeInBits();
3304 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3305 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3306 : Intrinsic::hexagon_L4_loadd_locked;
3307 Value *Fn = Intrinsic::getDeclaration(M, IntID);
3308 return Builder.CreateCall(Fn, Addr, "larx");
3311 /// Perform a store-conditional operation to Addr. Return the status of the
3312 /// store. This should be 0 if the store succeeded, non-zero otherwise.
3313 Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
3314 Value *Val, Value *Addr, AtomicOrdering Ord) const {
3315 BasicBlock *BB = Builder.GetInsertBlock();
3316 Module *M = BB->getParent()->getParent();
3317 Type *Ty = Val->getType();
3318 unsigned SZ = Ty->getPrimitiveSizeInBits();
3319 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3320 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3321 : Intrinsic::hexagon_S4_stored_locked;
3322 Value *Fn = Intrinsic::getDeclaration(M, IntID);
3323 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3324 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3325 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3329 TargetLowering::AtomicExpansionKind
3330 HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
3331 // Do not expand loads and stores that don't exceed 64 bits.
3332 return LI->getType()->getPrimitiveSizeInBits() > 64
3333 ? AtomicExpansionKind::LLOnly
3334 : AtomicExpansionKind::None;
3337 bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
3338 // Do not expand loads and stores that don't exceed 64 bits.
3339 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
3342 bool HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
3343 AtomicCmpXchgInst *AI) const {
3344 const DataLayout &DL = AI->getModule()->getDataLayout();
3345 unsigned Size = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
3346 return Size >= 4 && Size <= 8;