1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the interfaces that Hexagon uses to lower LLVM code
10 // into a selection DAG.
12 //===----------------------------------------------------------------------===//
14 #include "HexagonISelLowering.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonSubtarget.h"
19 #include "HexagonTargetMachine.h"
20 #include "HexagonTargetObjectFile.h"
21 #include "llvm/ADT/APInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/RuntimeLibcalls.h"
31 #include "llvm/CodeGen/SelectionDAG.h"
32 #include "llvm/CodeGen/TargetCallingConv.h"
33 #include "llvm/CodeGen/ValueTypes.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/CallingConv.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicsHexagon.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/MC/MCRegisterInfo.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CodeGen.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Target/TargetMachine.h"
66 #define DEBUG_TYPE "hexagon-lowering"
68 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
69 cl::init(true), cl::Hidden,
70 cl::desc("Control jump table emission on Hexagon target"));
72 static cl::opt<bool> EnableHexSDNodeSched("enable-hexagon-sdnode-sched",
73 cl::Hidden, cl::ZeroOrMore, cl::init(false),
74 cl::desc("Enable Hexagon SDNode scheduling"));
76 static cl::opt<bool> EnableFastMath("ffast-math",
77 cl::Hidden, cl::ZeroOrMore, cl::init(false),
78 cl::desc("Enable Fast Math processing"));
80 static cl::opt<int> MinimumJumpTables("minimum-jump-tables",
81 cl::Hidden, cl::ZeroOrMore, cl::init(5),
82 cl::desc("Set minimum jump tables"));
84 static cl::opt<int> MaxStoresPerMemcpyCL("max-store-memcpy",
85 cl::Hidden, cl::ZeroOrMore, cl::init(6),
86 cl::desc("Max #stores to inline memcpy"));
88 static cl::opt<int> MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os",
89 cl::Hidden, cl::ZeroOrMore, cl::init(4),
90 cl::desc("Max #stores to inline memcpy"));
92 static cl::opt<int> MaxStoresPerMemmoveCL("max-store-memmove",
93 cl::Hidden, cl::ZeroOrMore, cl::init(6),
94 cl::desc("Max #stores to inline memmove"));
96 static cl::opt<int> MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os",
97 cl::Hidden, cl::ZeroOrMore, cl::init(4),
98 cl::desc("Max #stores to inline memmove"));
100 static cl::opt<int> MaxStoresPerMemsetCL("max-store-memset",
101 cl::Hidden, cl::ZeroOrMore, cl::init(8),
102 cl::desc("Max #stores to inline memset"));
104 static cl::opt<int> MaxStoresPerMemsetOptSizeCL("max-store-memset-Os",
105 cl::Hidden, cl::ZeroOrMore, cl::init(4),
106 cl::desc("Max #stores to inline memset"));
108 static cl::opt<bool> AlignLoads("hexagon-align-loads",
109 cl::Hidden, cl::init(false),
110 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
113 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden,
115 cl::desc("Disable minimum alignment of 1 for "
116 "arguments passed by value on stack"));
120 class HexagonCCState : public CCState {
121 unsigned NumNamedVarArgParams = 0;
124 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
125 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
126 unsigned NumNamedArgs)
127 : CCState(CC, IsVarArg, MF, locs, C),
128 NumNamedVarArgParams(NumNamedArgs) {}
129 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
132 } // end anonymous namespace
135 // Implement calling convention for Hexagon.
137 static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
138 CCValAssign::LocInfo &LocInfo,
139 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
140 static const MCPhysReg ArgRegs[] = {
141 Hexagon::R0, Hexagon::R1, Hexagon::R2,
142 Hexagon::R3, Hexagon::R4, Hexagon::R5
144 const unsigned NumArgRegs = array_lengthof(ArgRegs);
145 unsigned RegNum = State.getFirstUnallocated(ArgRegs);
147 // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
148 if (RegNum != NumArgRegs && RegNum % 2 == 1)
149 State.AllocateReg(ArgRegs[RegNum]);
151 // Always return false here, as this function only makes sure that the first
152 // unallocated register has an even register number and does not actually
153 // allocate a register for the current argument.
157 #include "HexagonGenCallingConv.inc"
161 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
166 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
167 /// by "Src" to address "Dst" of size "Size". Alignment information is
168 /// specified by the specific parameter attribute. The copy will be passed as
169 /// a byval function parameter. Sometimes what we are copying is the end of a
170 /// larger object, the part that does not fit in registers.
171 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
172 SDValue Chain, ISD::ArgFlagsTy Flags,
173 SelectionDAG &DAG, const SDLoc &dl) {
174 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
175 return DAG.getMemcpy(
176 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
177 /*isVolatile=*/false, /*AlwaysInline=*/false,
178 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
182 HexagonTargetLowering::CanLowerReturn(
183 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
184 const SmallVectorImpl<ISD::OutputArg> &Outs,
185 LLVMContext &Context) const {
186 SmallVector<CCValAssign, 16> RVLocs;
187 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
189 if (MF.getSubtarget<HexagonSubtarget>().useHVXOps())
190 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
191 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
194 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
195 // passed by value, the function prototype is modified to return void and
196 // the value is stored in memory pointed by a pointer passed by caller.
198 HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
200 const SmallVectorImpl<ISD::OutputArg> &Outs,
201 const SmallVectorImpl<SDValue> &OutVals,
202 const SDLoc &dl, SelectionDAG &DAG) const {
203 // CCValAssign - represent the assignment of the return value to locations.
204 SmallVector<CCValAssign, 16> RVLocs;
206 // CCState - Info about the registers and stack slot.
207 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
210 // Analyze return values of ISD::RET
211 if (Subtarget.useHVXOps())
212 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX);
214 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
217 SmallVector<SDValue, 4> RetOps(1, Chain);
219 // Copy the result values into the output registers.
220 for (unsigned i = 0; i != RVLocs.size(); ++i) {
221 CCValAssign &VA = RVLocs[i];
223 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
225 // Guarantee that all emitted copies are stuck together with flags.
226 Flag = Chain.getValue(1);
227 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
230 RetOps[0] = Chain; // Update chain.
232 // Add the flag if we have it.
234 RetOps.push_back(Flag);
236 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
239 bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
240 // If either no tail call or told not to tail call at all, don't.
241 return CI->isTailCall();
244 Register HexagonTargetLowering::getRegisterByName(
245 const char* RegName, LLT VT, const MachineFunction &) const {
246 // Just support r19, the linux kernel uses it.
247 Register Reg = StringSwitch<Register>(RegName)
248 .Case("r0", Hexagon::R0)
249 .Case("r1", Hexagon::R1)
250 .Case("r2", Hexagon::R2)
251 .Case("r3", Hexagon::R3)
252 .Case("r4", Hexagon::R4)
253 .Case("r5", Hexagon::R5)
254 .Case("r6", Hexagon::R6)
255 .Case("r7", Hexagon::R7)
256 .Case("r8", Hexagon::R8)
257 .Case("r9", Hexagon::R9)
258 .Case("r10", Hexagon::R10)
259 .Case("r11", Hexagon::R11)
260 .Case("r12", Hexagon::R12)
261 .Case("r13", Hexagon::R13)
262 .Case("r14", Hexagon::R14)
263 .Case("r15", Hexagon::R15)
264 .Case("r16", Hexagon::R16)
265 .Case("r17", Hexagon::R17)
266 .Case("r18", Hexagon::R18)
267 .Case("r19", Hexagon::R19)
268 .Case("r20", Hexagon::R20)
269 .Case("r21", Hexagon::R21)
270 .Case("r22", Hexagon::R22)
271 .Case("r23", Hexagon::R23)
272 .Case("r24", Hexagon::R24)
273 .Case("r25", Hexagon::R25)
274 .Case("r26", Hexagon::R26)
275 .Case("r27", Hexagon::R27)
276 .Case("r28", Hexagon::R28)
277 .Case("r29", Hexagon::R29)
278 .Case("r30", Hexagon::R30)
279 .Case("r31", Hexagon::R31)
280 .Case("r1:0", Hexagon::D0)
281 .Case("r3:2", Hexagon::D1)
282 .Case("r5:4", Hexagon::D2)
283 .Case("r7:6", Hexagon::D3)
284 .Case("r9:8", Hexagon::D4)
285 .Case("r11:10", Hexagon::D5)
286 .Case("r13:12", Hexagon::D6)
287 .Case("r15:14", Hexagon::D7)
288 .Case("r17:16", Hexagon::D8)
289 .Case("r19:18", Hexagon::D9)
290 .Case("r21:20", Hexagon::D10)
291 .Case("r23:22", Hexagon::D11)
292 .Case("r25:24", Hexagon::D12)
293 .Case("r27:26", Hexagon::D13)
294 .Case("r29:28", Hexagon::D14)
295 .Case("r31:30", Hexagon::D15)
296 .Case("sp", Hexagon::R29)
297 .Case("fp", Hexagon::R30)
298 .Case("lr", Hexagon::R31)
299 .Case("p0", Hexagon::P0)
300 .Case("p1", Hexagon::P1)
301 .Case("p2", Hexagon::P2)
302 .Case("p3", Hexagon::P3)
303 .Case("sa0", Hexagon::SA0)
304 .Case("lc0", Hexagon::LC0)
305 .Case("sa1", Hexagon::SA1)
306 .Case("lc1", Hexagon::LC1)
307 .Case("m0", Hexagon::M0)
308 .Case("m1", Hexagon::M1)
309 .Case("usr", Hexagon::USR)
310 .Case("ugp", Hexagon::UGP)
311 .Default(Register());
315 report_fatal_error("Invalid register name global variable");
318 /// LowerCallResult - Lower the result values of an ISD::CALL into the
319 /// appropriate copies out of appropriate physical registers. This assumes that
320 /// Chain/Glue are the input chain/glue to use, and that TheCall is the call
321 /// being lowered. Returns a SDNode with the same number of values as the
323 SDValue HexagonTargetLowering::LowerCallResult(
324 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
325 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
326 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
327 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
328 // Assign locations to each value returned by this call.
329 SmallVector<CCValAssign, 16> RVLocs;
331 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
334 if (Subtarget.useHVXOps())
335 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX);
337 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
339 // Copy all of the result registers out of their specified physreg.
340 for (unsigned i = 0; i != RVLocs.size(); ++i) {
342 if (RVLocs[i].getValVT() == MVT::i1) {
343 // Return values of type MVT::i1 require special handling. The reason
344 // is that MVT::i1 is associated with the PredRegs register class, but
345 // values of that type are still returned in R0. Generate an explicit
346 // copy into a predicate register from R0, and treat the value of the
347 // predicate register as the call result.
348 auto &MRI = DAG.getMachineFunction().getRegInfo();
349 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
351 // FR0 = (Value, Chain, Glue)
352 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
353 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
354 FR0.getValue(0), FR0.getValue(2));
355 // TPR = (Chain, Glue)
356 // Don't glue this CopyFromReg, because it copies from a virtual
357 // register. If it is glued to the call, InstrEmitter will add it
358 // as an implicit def to the call (EmitMachineNode).
359 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
360 Glue = TPR.getValue(1);
361 Chain = TPR.getValue(0);
363 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
364 RVLocs[i].getValVT(), Glue);
365 Glue = RetVal.getValue(2);
366 Chain = RetVal.getValue(1);
368 InVals.push_back(RetVal.getValue(0));
374 /// LowerCall - Functions arguments are copied from virtual regs to
375 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
377 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
378 SmallVectorImpl<SDValue> &InVals) const {
379 SelectionDAG &DAG = CLI.DAG;
381 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
382 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
383 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
384 SDValue Chain = CLI.Chain;
385 SDValue Callee = CLI.Callee;
386 CallingConv::ID CallConv = CLI.CallConv;
387 bool IsVarArg = CLI.IsVarArg;
388 bool DoesNotReturn = CLI.DoesNotReturn;
390 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
391 MachineFunction &MF = DAG.getMachineFunction();
392 MachineFrameInfo &MFI = MF.getFrameInfo();
393 auto PtrVT = getPointerTy(MF.getDataLayout());
395 unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0;
396 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
397 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
399 // Linux ABI treats var-arg calls the same way as regular ones.
400 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
402 // Analyze operands of the call, assigning locations to each operand.
403 SmallVector<CCValAssign, 16> ArgLocs;
404 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(),
407 if (Subtarget.useHVXOps())
408 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
409 else if (DisableArgsMinAlignment)
410 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
412 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
414 if (CLI.IsTailCall) {
415 bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
416 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
417 IsVarArg, IsStructRet, StructAttrFlag, Outs,
419 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
420 CCValAssign &VA = ArgLocs[i];
422 CLI.IsTailCall = false;
426 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
427 : "Argument must be passed on stack. "
428 "Not eligible for Tail Call\n"));
430 // Get a count of how many bytes are to be pushed on the stack.
431 unsigned NumBytes = CCInfo.getNextStackOffset();
432 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
433 SmallVector<SDValue, 8> MemOpChains;
435 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
437 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
439 bool NeedsArgAlign = false;
440 Align LargestAlignSeen;
441 // Walk the register/memloc assignments, inserting copies/loads.
442 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
443 CCValAssign &VA = ArgLocs[i];
444 SDValue Arg = OutVals[i];
445 ISD::ArgFlagsTy Flags = Outs[i].Flags;
446 // Record if we need > 8 byte alignment on an argument.
447 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());
448 NeedsArgAlign |= ArgAlign;
450 // Promote the value if needed.
451 switch (VA.getLocInfo()) {
453 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
454 llvm_unreachable("Unknown loc info!");
455 case CCValAssign::Full:
457 case CCValAssign::BCvt:
458 Arg = DAG.getBitcast(VA.getLocVT(), Arg);
460 case CCValAssign::SExt:
461 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
463 case CCValAssign::ZExt:
464 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
466 case CCValAssign::AExt:
467 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
472 unsigned LocMemOffset = VA.getLocMemOffset();
473 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
474 StackPtr.getValueType());
475 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
477 LargestAlignSeen = std::max(
478 LargestAlignSeen, Align(VA.getLocVT().getStoreSizeInBits() / 8));
479 if (Flags.isByVal()) {
480 // The argument is a struct passed by value. According to LLVM, "Arg"
482 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
485 MachinePointerInfo LocPI = MachinePointerInfo::getStack(
486 DAG.getMachineFunction(), LocMemOffset);
487 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
488 MemOpChains.push_back(S);
493 // Arguments that can be passed on register must be kept at RegsToPass
496 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
499 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
500 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
501 Align VecAlign(HRI.getSpillAlignment(Hexagon::HvxVRRegClass));
502 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
503 MFI.ensureMaxAlignment(LargestAlignSeen);
505 // Transform all store nodes into one single node because all store
506 // nodes are independent of each other.
507 if (!MemOpChains.empty())
508 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
511 if (!CLI.IsTailCall) {
512 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
513 Glue = Chain.getValue(1);
516 // Build a sequence of copy-to-reg nodes chained together with token
517 // chain and flag operands which copy the outgoing args into registers.
518 // The Glue is necessary since all emitted instructions must be
520 if (!CLI.IsTailCall) {
521 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
522 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
523 RegsToPass[i].second, Glue);
524 Glue = Chain.getValue(1);
527 // For tail calls lower the arguments to the 'real' stack slot.
529 // Force all the incoming stack arguments to be loaded from the stack
530 // before any new outgoing arguments are stored to the stack, because the
531 // outgoing stack slots may alias the incoming argument stack slots, and
532 // the alias isn't otherwise explicit. This is slightly more conservative
533 // than necessary, because it means that each store effectively depends
534 // on every argument instead of just those arguments it would clobber.
536 // Do not flag preceding copytoreg stuff together with the following stuff.
538 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
539 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
540 RegsToPass[i].second, Glue);
541 Glue = Chain.getValue(1);
546 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
547 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
549 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
550 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
551 // node so that legalize doesn't hack it.
552 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
553 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
554 } else if (ExternalSymbolSDNode *S =
555 dyn_cast<ExternalSymbolSDNode>(Callee)) {
556 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
559 // Returns a chain & a flag for retval copy to use.
560 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
561 SmallVector<SDValue, 8> Ops;
562 Ops.push_back(Chain);
563 Ops.push_back(Callee);
565 // Add argument registers to the end of the list so that they are
566 // known live into the call.
567 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
568 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
569 RegsToPass[i].second.getValueType()));
572 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
573 assert(Mask && "Missing call preserved mask for calling convention");
574 Ops.push_back(DAG.getRegisterMask(Mask));
579 if (CLI.IsTailCall) {
580 MFI.setHasTailCall();
581 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
584 // Set this here because we need to know this for "hasFP" in frame lowering.
585 // The target-independent code calls getFrameRegister before setting it, and
586 // getFrameRegister uses hasFP to determine whether the function has FP.
587 MFI.setHasCalls(true);
589 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
590 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
591 Glue = Chain.getValue(1);
593 // Create the CALLSEQ_END node.
594 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
595 DAG.getIntPtrConstant(0, dl, true), Glue, dl);
596 Glue = Chain.getValue(1);
598 // Handle result values, copying them out of physregs into vregs that we
600 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
601 InVals, OutVals, Callee);
604 /// Returns true by value, base pointer and offset pointer and addressing
605 /// mode by reference if this node can be combined with a load / store to
606 /// form a post-indexed load / store.
607 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
608 SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM,
609 SelectionDAG &DAG) const {
610 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N);
613 EVT VT = LSN->getMemoryVT();
616 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
617 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
618 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
619 VT == MVT::v4i16 || VT == MVT::v8i8 ||
620 Subtarget.isHVXVectorType(VT.getSimpleVT());
624 if (Op->getOpcode() != ISD::ADD)
626 Base = Op->getOperand(0);
627 Offset = Op->getOperand(1);
628 if (!isa<ConstantSDNode>(Offset.getNode()))
632 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
633 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
637 HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
638 MachineFunction &MF = DAG.getMachineFunction();
639 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
640 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
641 unsigned LR = HRI.getRARegister();
643 if ((Op.getOpcode() != ISD::INLINEASM &&
644 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
647 unsigned NumOps = Op.getNumOperands();
648 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
649 --NumOps; // Ignore the flag operand.
651 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
652 unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
653 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
654 ++i; // Skip the ID value.
656 switch (InlineAsm::getKind(Flags)) {
658 llvm_unreachable("Bad flags!");
659 case InlineAsm::Kind_RegUse:
660 case InlineAsm::Kind_Imm:
661 case InlineAsm::Kind_Mem:
664 case InlineAsm::Kind_Clobber:
665 case InlineAsm::Kind_RegDef:
666 case InlineAsm::Kind_RegDefEarlyClobber: {
667 for (; NumVals; --NumVals, ++i) {
668 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
671 HMFI.setHasClobberLR(true);
682 // Need to transform ISD::PREFETCH into something that doesn't inherit
683 // all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
685 SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
686 SelectionDAG &DAG) const {
687 SDValue Chain = Op.getOperand(0);
688 SDValue Addr = Op.getOperand(1);
689 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
690 // if the "reg" is fed by an "add".
692 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
693 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
696 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
697 // is marked as having side-effects, while the register read on Hexagon does
698 // not have any. TableGen refuses to accept the direct pattern from that node
700 SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
701 SelectionDAG &DAG) const {
702 SDValue Chain = Op.getOperand(0);
704 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
705 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
708 SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
709 SelectionDAG &DAG) const {
710 SDValue Chain = Op.getOperand(0);
711 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
712 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
713 if (IntNo == Intrinsic::hexagon_prefetch) {
714 SDValue Addr = Op.getOperand(2);
716 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
717 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
723 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
724 SelectionDAG &DAG) const {
725 SDValue Chain = Op.getOperand(0);
726 SDValue Size = Op.getOperand(1);
727 SDValue Align = Op.getOperand(2);
730 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
731 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
733 unsigned A = AlignConst->getSExtValue();
734 auto &HFI = *Subtarget.getFrameLowering();
735 // "Zero" means natural stack alignment.
737 A = HFI.getStackAlign().value();
740 dbgs () << __func__ << " Align: " << A << " Size: ";
741 Size.getNode()->dump(&DAG);
745 SDValue AC = DAG.getConstant(A, dl, MVT::i32);
746 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
747 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
749 DAG.ReplaceAllUsesOfValueWith(Op, AA);
753 SDValue HexagonTargetLowering::LowerFormalArguments(
754 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
755 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
756 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
757 MachineFunction &MF = DAG.getMachineFunction();
758 MachineFrameInfo &MFI = MF.getFrameInfo();
759 MachineRegisterInfo &MRI = MF.getRegInfo();
761 // Linux ABI treats var-arg calls the same way as regular ones.
762 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
764 // Assign locations to all of the incoming arguments.
765 SmallVector<CCValAssign, 16> ArgLocs;
766 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
768 MF.getFunction().getFunctionType()->getNumParams());
770 if (Subtarget.useHVXOps())
771 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
772 else if (DisableArgsMinAlignment)
773 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
775 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
777 // For LLVM, in the case when returning a struct by value (>8byte),
778 // the first argument is a pointer that points to the location on caller's
779 // stack where the return value will be stored. For Hexagon, the location on
780 // caller's stack is passed only when the struct size is smaller than (and
781 // equal to) 8 bytes. If not, no address will be passed into callee and
782 // callee return the result direclty through R0/R1.
783 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) {
784 switch (RC.getID()) {
785 case Hexagon::IntRegsRegClassID:
786 return Reg - Hexagon::R0 + 1;
787 case Hexagon::DoubleRegsRegClassID:
788 return (Reg - Hexagon::D0 + 1) * 2;
789 case Hexagon::HvxVRRegClassID:
790 return Reg - Hexagon::V0 + 1;
791 case Hexagon::HvxWRRegClassID:
792 return (Reg - Hexagon::W0 + 1) * 2;
794 llvm_unreachable("Unexpected register class");
797 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());
798 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
799 HFL.FirstVarArgSavedReg = 0;
800 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
802 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
803 CCValAssign &VA = ArgLocs[i];
804 ISD::ArgFlagsTy Flags = Ins[i].Flags;
805 bool ByVal = Flags.isByVal();
807 // Arguments passed in registers:
808 // 1. 32- and 64-bit values and HVX vectors are passed directly,
809 // 2. Large structs are passed via an address, and the address is
810 // passed in a register.
811 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
812 llvm_unreachable("ByValSize must be bigger than 8 bytes");
814 bool InReg = VA.isRegLoc() &&
815 (!ByVal || (ByVal && Flags.getByValSize() > 8));
818 MVT RegVT = VA.getLocVT();
819 if (VA.getLocInfo() == CCValAssign::BCvt)
820 RegVT = VA.getValVT();
822 const TargetRegisterClass *RC = getRegClassFor(RegVT);
823 Register VReg = MRI.createVirtualRegister(RC);
824 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
826 // Treat values of type MVT::i1 specially: they are passed in
827 // registers of type i32, but they need to remain as values of
828 // type i1 for consistency of the argument lowering.
829 if (VA.getValVT() == MVT::i1) {
830 assert(RegVT.getSizeInBits() <= 32);
831 SDValue T = DAG.getNode(ISD::AND, dl, RegVT,
832 Copy, DAG.getConstant(1, dl, RegVT));
833 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT),
837 unsigned RegSize = RegVT.getSizeInBits();
838 assert(RegSize == 32 || RegSize == 64 ||
839 Subtarget.isHVXVectorType(RegVT));
842 InVals.push_back(Copy);
843 MRI.addLiveIn(VA.getLocReg(), VReg);
844 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
846 assert(VA.isMemLoc() && "Argument should be passed in memory");
848 // If it's a byval parameter, then we need to compute the
849 // "real" size, not the size of the pointer.
850 unsigned ObjSize = Flags.isByVal()
851 ? Flags.getByValSize()
852 : VA.getLocVT().getStoreSizeInBits() / 8;
854 // Create the frame index object for this incoming parameter.
855 int Offset = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
856 int FI = MFI.CreateFixedObject(ObjSize, Offset, true);
857 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
859 if (Flags.isByVal()) {
860 // If it's a pass-by-value aggregate, then do not dereference the stack
861 // location. Instead, we should generate a reference to the stack
863 InVals.push_back(FIN);
865 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
866 MachinePointerInfo::getFixedStack(MF, FI, 0));
872 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
873 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
874 MRI.addLiveIn(Hexagon::R0+i);
877 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
878 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
879 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
881 // Create Frame index for the start of register saved area.
882 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
883 bool RequiresPadding = (NumVarArgRegs & 1);
884 int RegSaveAreaSizePlusPadding = RequiresPadding
885 ? (NumVarArgRegs + 1) * 4
888 if (RegSaveAreaSizePlusPadding > 0) {
889 // The offset to saved register area should be 8 byte aligned.
890 int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
891 if (!(RegAreaStart % 8))
892 RegAreaStart = (RegAreaStart + 7) & -8;
894 int RegSaveAreaFrameIndex =
895 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);
896 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
898 // This will point to the next argument passed via stack.
899 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
900 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
901 HMFI.setVarArgsFrameIndex(FI);
903 // This will point to the next argument passed via stack, when
904 // there is no saved register area.
905 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
906 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
907 HMFI.setRegSavedAreaStartFrameIndex(FI);
908 HMFI.setVarArgsFrameIndex(FI);
913 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
914 // This will point to the next argument passed via stack.
915 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset();
916 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
917 HMFI.setVarArgsFrameIndex(FI);
924 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
925 // VASTART stores the address of the VarArgsFrameIndex slot into the
926 // memory location argument.
927 MachineFunction &MF = DAG.getMachineFunction();
928 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
929 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
930 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
932 if (!Subtarget.isEnvironmentMusl()) {
933 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
934 MachinePointerInfo(SV));
936 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
937 auto &HFL = *Subtarget.getFrameLowering();
939 SmallVector<SDValue, 8> MemOps;
941 // Get frame index of va_list.
942 SDValue FIN = Op.getOperand(1);
944 // If first Vararg register is odd, add 4 bytes to start of
945 // saved register area to point to the first register location.
946 // This is because the saved register area has to be 8 byte aligned.
947 // Incase of an odd start register, there will be 4 bytes of padding in
948 // the beginning of saved register area. If all registers area used up,
949 // the following condition will handle it correctly.
950 SDValue SavedRegAreaStartFrameIndex =
951 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
953 auto PtrVT = getPointerTy(DAG.getDataLayout());
955 if (HFL.FirstVarArgSavedReg & 1)
956 SavedRegAreaStartFrameIndex =
957 DAG.getNode(ISD::ADD, DL, PtrVT,
958 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
960 DAG.getIntPtrConstant(4, DL));
962 // Store the saved register area start pointer.
964 DAG.getStore(Op.getOperand(0), DL,
965 SavedRegAreaStartFrameIndex,
966 FIN, MachinePointerInfo(SV));
967 MemOps.push_back(Store);
969 // Store saved register area end pointer.
970 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
971 FIN, DAG.getIntPtrConstant(4, DL));
972 Store = DAG.getStore(Op.getOperand(0), DL,
973 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
975 FIN, MachinePointerInfo(SV, 4));
976 MemOps.push_back(Store);
978 // Store overflow area pointer.
979 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
980 FIN, DAG.getIntPtrConstant(4, DL));
981 Store = DAG.getStore(Op.getOperand(0), DL,
982 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
984 FIN, MachinePointerInfo(SV, 8));
985 MemOps.push_back(Store);
987 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
991 HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
992 // Assert that the linux ABI is enabled for the current compilation.
993 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled");
994 SDValue Chain = Op.getOperand(0);
995 SDValue DestPtr = Op.getOperand(1);
996 SDValue SrcPtr = Op.getOperand(2);
997 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
998 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
1000 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore,
1001 // we need to memcopy 12 bytes from va_list to another similar list.
1002 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr,
1003 DAG.getIntPtrConstant(12, DL), Align(4),
1004 /*isVolatile*/ false, false, false,
1005 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
1008 SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1009 const SDLoc &dl(Op);
1010 SDValue LHS = Op.getOperand(0);
1011 SDValue RHS = Op.getOperand(1);
1012 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1016 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1017 MVT ElemTy = OpTy.getVectorElementType();
1018 assert(ElemTy.isScalarInteger());
1019 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1020 OpTy.getVectorNumElements());
1021 return DAG.getSetCC(dl, ResTy,
1022 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy),
1023 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC);
1026 // Treat all other vector types as legal.
1027 if (ResTy.isVector())
1030 // Comparisons of short integers should use sign-extend, not zero-extend,
1031 // since we can represent small negative values in the compare instructions.
1032 // The LLVM default is to use zero-extend arbitrarily in these cases.
1033 auto isSExtFree = [this](SDValue N) {
1034 switch (N.getOpcode()) {
1035 case ISD::TRUNCATE: {
1036 // A sign-extend of a truncate of a sign-extend is free.
1037 SDValue Op = N.getOperand(0);
1038 if (Op.getOpcode() != ISD::AssertSext)
1040 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
1041 unsigned ThisBW = ty(N).getSizeInBits();
1042 unsigned OrigBW = OrigTy.getSizeInBits();
1043 // The type that was sign-extended to get the AssertSext must be
1044 // narrower than the type of N (so that N has still the same value
1045 // as the original).
1046 return ThisBW >= OrigBW;
1049 // We have sign-extended loads.
1055 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1056 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1057 bool IsNegative = C && C->getAPIntValue().isNegative();
1058 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1059 return DAG.getSetCC(dl, ResTy,
1060 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32),
1061 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC);
1068 HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
1069 SDValue PredOp = Op.getOperand(0);
1070 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1072 const SDLoc &dl(Op);
1074 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1075 MVT ElemTy = OpTy.getVectorElementType();
1076 assert(ElemTy.isScalarInteger());
1077 MVT WideTy = MVT::getVectorVT(MVT::getIntegerVT(2*ElemTy.getSizeInBits()),
1078 OpTy.getVectorNumElements());
1079 // Generate (trunc (select (_, sext, sext))).
1080 return DAG.getSExtOrTrunc(
1081 DAG.getSelect(dl, WideTy, PredOp,
1082 DAG.getSExtOrTrunc(Op1, dl, WideTy),
1083 DAG.getSExtOrTrunc(Op2, dl, WideTy)),
1091 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
1092 EVT ValTy = Op.getValueType();
1093 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
1094 Constant *CVal = nullptr;
1095 bool isVTi1Type = false;
1096 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) {
1097 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1098 IRBuilder<> IRB(CV->getContext());
1099 SmallVector<Constant*, 128> NewConst;
1100 unsigned VecLen = CV->getNumOperands();
1101 assert(isPowerOf2_32(VecLen) &&
1102 "conversion only supported for pow2 VectorSize");
1103 for (unsigned i = 0; i < VecLen; ++i)
1104 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));
1106 CVal = ConstantVector::get(NewConst);
1110 Align Alignment = CPN->getAlign();
1111 bool IsPositionIndependent = isPositionIndependent();
1112 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1114 unsigned Offset = 0;
1116 if (CPN->isMachineConstantPoolEntry())
1117 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Alignment,
1119 else if (isVTi1Type)
1120 T = DAG.getTargetConstantPool(CVal, ValTy, Alignment, Offset, TF);
1122 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Alignment, Offset,
1125 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&
1126 "Inconsistent target flag encountered");
1128 if (IsPositionIndependent)
1129 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
1130 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
1134 HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
1135 EVT VT = Op.getValueType();
1136 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1137 if (isPositionIndependent()) {
1138 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
1139 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
1142 SDValue T = DAG.getTargetJumpTable(Idx, VT);
1143 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
1147 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
1148 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1149 MachineFunction &MF = DAG.getMachineFunction();
1150 MachineFrameInfo &MFI = MF.getFrameInfo();
1151 MFI.setReturnAddressIsTaken(true);
1153 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1156 EVT VT = Op.getValueType();
1158 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1160 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1161 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1162 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1163 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1164 MachinePointerInfo());
1167 // Return LR, which contains the return address. Mark it an implicit live-in.
1168 unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1169 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1173 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1174 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1175 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1176 MFI.setFrameAddressIsTaken(true);
1178 EVT VT = Op.getValueType();
1180 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1181 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1182 HRI.getFrameRegister(), VT);
1184 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1185 MachinePointerInfo());
1190 HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {
1192 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1196 HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {
1198 auto *GAN = cast<GlobalAddressSDNode>(Op);
1199 auto PtrVT = getPointerTy(DAG.getDataLayout());
1200 auto *GV = GAN->getGlobal();
1201 int64_t Offset = GAN->getOffset();
1203 auto &HLOF = *HTM.getObjFileLowering();
1204 Reloc::Model RM = HTM.getRelocationModel();
1206 if (RM == Reloc::Static) {
1207 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1208 const GlobalObject *GO = GV->getBaseObject();
1209 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1210 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1211 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1214 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1216 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1217 HexagonII::MO_PCREL);
1218 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1222 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1223 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1224 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1225 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1228 // Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1230 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1231 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1233 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1235 Reloc::Model RM = HTM.getRelocationModel();
1236 if (RM == Reloc::Static) {
1237 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1238 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1241 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT, 0, HexagonII::MO_PCREL);
1242 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1246 HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
1248 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1249 SDValue GOTSym = DAG.getTargetExternalSymbol(HEXAGON_GOT_SYM_NAME, PtrVT,
1250 HexagonII::MO_PCREL);
1251 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1255 HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
1256 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1257 unsigned char OperandFlags) const {
1258 MachineFunction &MF = DAG.getMachineFunction();
1259 MachineFrameInfo &MFI = MF.getFrameInfo();
1260 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1262 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1263 GA->getValueType(0),
1266 // Create Operands for the call.The Operands should have the following:
1268 // 2. Callee which in this case is the Global address value.
1269 // 3. Registers live into the call.In this case its R0, as we
1270 // have just one argument to be passed.
1272 // Note: The order is important.
1274 const auto &HRI = *Subtarget.getRegisterInfo();
1275 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1276 assert(Mask && "Missing call preserved mask for calling convention");
1277 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1278 DAG.getRegisterMask(Mask), Glue };
1279 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1281 // Inform MFI that function has calls.
1282 MFI.setAdjustsStack(true);
1284 Glue = Chain.getValue(1);
1285 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1289 // Lower using the intial executable model for TLS addresses
1292 HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
1293 SelectionDAG &DAG) const {
1295 int64_t Offset = GA->getOffset();
1296 auto PtrVT = getPointerTy(DAG.getDataLayout());
1298 // Get the thread pointer.
1299 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1301 bool IsPositionIndependent = isPositionIndependent();
1303 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1305 // First generate the TLS symbol address
1306 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1309 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1311 if (IsPositionIndependent) {
1312 // Generate the GOT pointer in case of position independent code
1313 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(Sym, DAG);
1315 // Add the TLS Symbol address to GOT pointer.This gives
1316 // GOT relative relocation for the symbol.
1317 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1320 // Load the offset value for TLS symbol.This offset is relative to
1322 SDValue LoadOffset =
1323 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1325 // Address of the thread local variable is the add of thread
1326 // pointer and the offset of the variable.
1327 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1331 // Lower using the local executable model for TLS addresses
1334 HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
1335 SelectionDAG &DAG) const {
1337 int64_t Offset = GA->getOffset();
1338 auto PtrVT = getPointerTy(DAG.getDataLayout());
1340 // Get the thread pointer.
1341 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1342 // Generate the TLS symbol address
1343 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1344 HexagonII::MO_TPREL);
1345 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1347 // Address of the thread local variable is the add of thread
1348 // pointer and the offset of the variable.
1349 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1353 // Lower using the general dynamic model for TLS addresses
1356 HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
1357 SelectionDAG &DAG) const {
1359 int64_t Offset = GA->getOffset();
1360 auto PtrVT = getPointerTy(DAG.getDataLayout());
1362 // First generate the TLS symbol address
1363 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1364 HexagonII::MO_GDGOT);
1366 // Then, generate the GOT pointer
1367 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1369 // Add the TLS symbol and the GOT pointer
1370 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1371 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1373 // Copy over the argument to R0
1375 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InFlag);
1376 InFlag = Chain.getValue(1);
1379 static_cast<const HexagonSubtarget &>(DAG.getSubtarget()).useLongCalls()
1380 ? HexagonII::MO_GDPLT | HexagonII::HMOTF_ConstExtended
1381 : HexagonII::MO_GDPLT;
1383 return GetDynamicTLSAddr(DAG, Chain, GA, InFlag, PtrVT,
1384 Hexagon::R0, Flags);
1388 // Lower TLS addresses.
1390 // For now for dynamic models, we only support the general dynamic model.
1393 HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1394 SelectionDAG &DAG) const {
1395 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1397 switch (HTM.getTLSModel(GA->getGlobal())) {
1398 case TLSModel::GeneralDynamic:
1399 case TLSModel::LocalDynamic:
1400 return LowerToTLSGeneralDynamicModel(GA, DAG);
1401 case TLSModel::InitialExec:
1402 return LowerToTLSInitialExecModel(GA, DAG);
1403 case TLSModel::LocalExec:
1404 return LowerToTLSLocalExecModel(GA, DAG);
1406 llvm_unreachable("Bogus TLS model");
1409 //===----------------------------------------------------------------------===//
1410 // TargetLowering Implementation
1411 //===----------------------------------------------------------------------===//
1413 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
1414 const HexagonSubtarget &ST)
1415 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1417 auto &HRI = *Subtarget.getRegisterInfo();
1419 setPrefLoopAlignment(Align(16));
1420 setMinFunctionAlignment(Align(4));
1421 setPrefFunctionAlignment(Align(16));
1422 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1423 setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
1424 setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
1426 setMaxAtomicSizeInBitsSupported(64);
1427 setMinCmpXchgSizeInBits(32);
1429 if (EnableHexSDNodeSched)
1430 setSchedulingPreference(Sched::VLIW);
1432 setSchedulingPreference(Sched::Source);
1434 // Limits for inline expansion of memcpy/memmove
1435 MaxStoresPerMemcpy = MaxStoresPerMemcpyCL;
1436 MaxStoresPerMemcpyOptSize = MaxStoresPerMemcpyOptSizeCL;
1437 MaxStoresPerMemmove = MaxStoresPerMemmoveCL;
1438 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmoveOptSizeCL;
1439 MaxStoresPerMemset = MaxStoresPerMemsetCL;
1440 MaxStoresPerMemsetOptSize = MaxStoresPerMemsetOptSizeCL;
1443 // Set up register classes.
1446 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1447 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1448 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1449 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1450 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1451 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1452 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1453 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1454 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1455 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1456 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1458 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1459 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1462 // Handling of scalar operations.
1464 // All operations default to "legal", except:
1465 // - indexed loads and stores (pre-/post-incremented),
1466 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1467 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1468 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1469 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1470 // which default to "expand" for at least one type.
1473 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1474 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1475 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1476 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1477 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
1478 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1479 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1480 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1481 setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
1482 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1483 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1484 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1485 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1486 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
1487 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1488 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1490 // Custom legalize GlobalAddress nodes into CONST32.
1491 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1492 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1493 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1495 // Hexagon needs to optimize cases with negative constants.
1496 setOperationAction(ISD::SETCC, MVT::i8, Custom);
1497 setOperationAction(ISD::SETCC, MVT::i16, Custom);
1498 setOperationAction(ISD::SETCC, MVT::v4i8, Custom);
1499 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1501 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1502 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1503 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1504 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1505 if (Subtarget.isEnvironmentMusl())
1506 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
1508 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1510 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1511 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1512 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1515 setMinimumJumpTableEntries(MinimumJumpTables);
1517 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max());
1518 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1520 setOperationAction(ISD::ABS, MVT::i32, Legal);
1521 setOperationAction(ISD::ABS, MVT::i64, Legal);
1523 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1524 // but they only operate on i64.
1525 for (MVT VT : MVT::integer_valuetypes()) {
1526 setOperationAction(ISD::UADDO, VT, Custom);
1527 setOperationAction(ISD::USUBO, VT, Custom);
1528 setOperationAction(ISD::SADDO, VT, Expand);
1529 setOperationAction(ISD::SSUBO, VT, Expand);
1530 setOperationAction(ISD::ADDCARRY, VT, Expand);
1531 setOperationAction(ISD::SUBCARRY, VT, Expand);
1533 setOperationAction(ISD::ADDCARRY, MVT::i64, Custom);
1534 setOperationAction(ISD::SUBCARRY, MVT::i64, Custom);
1536 setOperationAction(ISD::CTLZ, MVT::i8, Promote);
1537 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
1538 setOperationAction(ISD::CTTZ, MVT::i8, Promote);
1539 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
1541 // Popcount can count # of 1s in i64 but returns i32.
1542 setOperationAction(ISD::CTPOP, MVT::i8, Promote);
1543 setOperationAction(ISD::CTPOP, MVT::i16, Promote);
1544 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
1545 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
1547 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1548 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
1549 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
1550 setOperationAction(ISD::BSWAP, MVT::i64, Legal);
1552 setOperationAction(ISD::FSHL, MVT::i32, Legal);
1553 setOperationAction(ISD::FSHL, MVT::i64, Legal);
1554 setOperationAction(ISD::FSHR, MVT::i32, Legal);
1555 setOperationAction(ISD::FSHR, MVT::i64, Legal);
1557 for (unsigned IntExpOp :
1558 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
1559 ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
1560 ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
1561 ISD::SMUL_LOHI, ISD::UMUL_LOHI}) {
1562 for (MVT VT : MVT::integer_valuetypes())
1563 setOperationAction(IntExpOp, VT, Expand);
1566 for (unsigned FPExpOp :
1567 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
1568 ISD::FPOW, ISD::FCOPYSIGN}) {
1569 for (MVT VT : MVT::fp_valuetypes())
1570 setOperationAction(FPExpOp, VT, Expand);
1573 // No extending loads from i32.
1574 for (MVT VT : MVT::integer_valuetypes()) {
1575 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1576 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1577 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1579 // Turn FP truncstore into trunc + store.
1580 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1581 // Turn FP extload into load/fpextend.
1582 for (MVT VT : MVT::fp_valuetypes())
1583 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1585 // Expand BR_CC and SELECT_CC for all integer and fp types.
1586 for (MVT VT : MVT::integer_valuetypes()) {
1587 setOperationAction(ISD::BR_CC, VT, Expand);
1588 setOperationAction(ISD::SELECT_CC, VT, Expand);
1590 for (MVT VT : MVT::fp_valuetypes()) {
1591 setOperationAction(ISD::BR_CC, VT, Expand);
1592 setOperationAction(ISD::SELECT_CC, VT, Expand);
1594 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1597 // Handling of vector operations.
1600 // Set the action for vector operations to "expand", then override it with
1601 // either "custom" or "legal" for specific cases.
1602 static const unsigned VectExpOps[] = {
1603 // Integer arithmetic:
1604 ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
1605 ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::SADDO,
1606 ISD::UADDO, ISD::SSUBO, ISD::USUBO, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
1608 ISD::AND, ISD::OR, ISD::XOR, ISD::ROTL, ISD::ROTR,
1609 ISD::CTPOP, ISD::CTLZ, ISD::CTTZ,
1610 // Floating point arithmetic/math functions:
1611 ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FMA, ISD::FDIV,
1612 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN,
1613 ISD::FCOS, ISD::FPOW, ISD::FLOG, ISD::FLOG2,
1614 ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FCEIL, ISD::FTRUNC,
1615 ISD::FRINT, ISD::FNEARBYINT, ISD::FROUND, ISD::FFLOOR,
1616 ISD::FMINNUM, ISD::FMAXNUM, ISD::FSINCOS,
1618 ISD::BR_CC, ISD::SELECT_CC, ISD::ConstantPool,
1620 ISD::BUILD_VECTOR, ISD::SCALAR_TO_VECTOR,
1621 ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT,
1622 ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
1623 ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE
1626 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1627 for (unsigned VectExpOp : VectExpOps)
1628 setOperationAction(VectExpOp, VT, Expand);
1630 // Expand all extending loads and truncating stores:
1631 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
1634 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1635 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
1636 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
1637 setTruncStoreAction(VT, TargetVT, Expand);
1640 // Normalize all inputs to SELECT to be vectors of i32.
1641 if (VT.getVectorElementType() != MVT::i32) {
1642 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
1643 setOperationAction(ISD::SELECT, VT, Promote);
1644 AddPromotedToType(ISD::SELECT, VT, VT32);
1646 setOperationAction(ISD::SRA, VT, Custom);
1647 setOperationAction(ISD::SHL, VT, Custom);
1648 setOperationAction(ISD::SRL, VT, Custom);
1651 // Extending loads from (native) vectors of i8 into (native) vectors of i16
1653 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1654 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1655 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1656 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1657 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1658 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1660 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1661 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1662 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1664 // Types natively supported:
1665 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1666 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1667 setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom);
1668 setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom);
1669 setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom);
1670 setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom);
1671 setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom);
1672 setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom);
1674 setOperationAction(ISD::ADD, NativeVT, Legal);
1675 setOperationAction(ISD::SUB, NativeVT, Legal);
1676 setOperationAction(ISD::MUL, NativeVT, Legal);
1677 setOperationAction(ISD::AND, NativeVT, Legal);
1678 setOperationAction(ISD::OR, NativeVT, Legal);
1679 setOperationAction(ISD::XOR, NativeVT, Legal);
1682 // Custom lower unaligned loads.
1683 // Also, for both loads and stores, verify the alignment of the address
1684 // in case it is a compile-time constant. This is a usability feature to
1685 // provide a meaningful error message to users.
1686 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1687 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1688 setOperationAction(ISD::LOAD, VT, Custom);
1689 setOperationAction(ISD::STORE, VT, Custom);
1692 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1694 setCondCodeAction(ISD::SETNE, VT, Expand);
1695 setCondCodeAction(ISD::SETLE, VT, Expand);
1696 setCondCodeAction(ISD::SETGE, VT, Expand);
1697 setCondCodeAction(ISD::SETLT, VT, Expand);
1698 setCondCodeAction(ISD::SETULE, VT, Expand);
1699 setCondCodeAction(ISD::SETUGE, VT, Expand);
1700 setCondCodeAction(ISD::SETULT, VT, Expand);
1703 // Custom-lower bitcasts from i8 to v8i1.
1704 setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1705 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1706 setOperationAction(ISD::VSELECT, MVT::v4i8, Custom);
1707 setOperationAction(ISD::VSELECT, MVT::v2i16, Custom);
1708 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
1709 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
1710 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
1713 setOperationAction(ISD::FMA, MVT::f64, Expand);
1714 setOperationAction(ISD::FADD, MVT::f64, Expand);
1715 setOperationAction(ISD::FSUB, MVT::f64, Expand);
1716 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1718 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1719 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1721 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1722 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1723 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1724 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1725 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1726 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1727 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1728 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1729 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1730 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1731 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1732 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1734 // Handling of indexed loads/stores: default is "expand".
1736 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1737 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1738 setIndexedLoadAction(ISD::POST_INC, VT, Legal);
1739 setIndexedStoreAction(ISD::POST_INC, VT, Legal);
1742 // Subtarget-specific operation actions.
1744 if (Subtarget.hasV60Ops()) {
1745 setOperationAction(ISD::ROTL, MVT::i32, Legal);
1746 setOperationAction(ISD::ROTL, MVT::i64, Legal);
1747 setOperationAction(ISD::ROTR, MVT::i32, Legal);
1748 setOperationAction(ISD::ROTR, MVT::i64, Legal);
1750 if (Subtarget.hasV66Ops()) {
1751 setOperationAction(ISD::FADD, MVT::f64, Legal);
1752 setOperationAction(ISD::FSUB, MVT::f64, Legal);
1754 if (Subtarget.hasV67Ops()) {
1755 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1756 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1757 setOperationAction(ISD::FMUL, MVT::f64, Legal);
1760 setTargetDAGCombine(ISD::VSELECT);
1762 if (Subtarget.useHVXOps())
1763 initializeHVXLowering();
1765 computeRegisterProperties(&HRI);
1768 // Library calls for unsupported operations
1770 bool FastMath = EnableFastMath;
1772 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1773 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1774 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1775 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1776 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1777 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1778 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1779 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1781 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1782 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1783 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1784 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1785 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1786 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1788 // This is the only fast library function for sqrtd.
1790 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1792 // Prefix is: nothing for "slow-math",
1793 // "fast2_" for V5+ fast-math double-precision
1794 // (actually, keep fast-math and fast-math2 separate for now)
1796 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1797 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1798 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1799 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1800 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1802 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1803 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1804 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1805 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1806 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1810 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1812 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1814 // These cause problems when the shift amount is non-constant.
1815 setLibcallName(RTLIB::SHL_I128, nullptr);
1816 setLibcallName(RTLIB::SRL_I128, nullptr);
1817 setLibcallName(RTLIB::SRA_I128, nullptr);
1820 const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1821 switch ((HexagonISD::NodeType)Opcode) {
1822 case HexagonISD::ADDC: return "HexagonISD::ADDC";
1823 case HexagonISD::SUBC: return "HexagonISD::SUBC";
1824 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1825 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1826 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1827 case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1828 case HexagonISD::CALL: return "HexagonISD::CALL";
1829 case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
1830 case HexagonISD::CALLR: return "HexagonISD::CALLR";
1831 case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1832 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1833 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1834 case HexagonISD::CP: return "HexagonISD::CP";
1835 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1836 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1837 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
1838 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1839 case HexagonISD::INSERT: return "HexagonISD::INSERT";
1840 case HexagonISD::JT: return "HexagonISD::JT";
1841 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1842 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1843 case HexagonISD::VASL: return "HexagonISD::VASL";
1844 case HexagonISD::VASR: return "HexagonISD::VASR";
1845 case HexagonISD::VLSR: return "HexagonISD::VLSR";
1846 case HexagonISD::VSPLAT: return "HexagonISD::VSPLAT";
1847 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
1848 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
1849 case HexagonISD::VROR: return "HexagonISD::VROR";
1850 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
1851 case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
1852 case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
1853 case HexagonISD::VZERO: return "HexagonISD::VZERO";
1854 case HexagonISD::VSPLATW: return "HexagonISD::VSPLATW";
1855 case HexagonISD::D2P: return "HexagonISD::D2P";
1856 case HexagonISD::P2D: return "HexagonISD::P2D";
1857 case HexagonISD::V2Q: return "HexagonISD::V2Q";
1858 case HexagonISD::Q2V: return "HexagonISD::Q2V";
1859 case HexagonISD::QCAT: return "HexagonISD::QCAT";
1860 case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
1861 case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
1862 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
1863 case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
1864 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
1865 case HexagonISD::OP_END: break;
1871 HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
1872 unsigned NeedAlign) const {
1873 auto *CA = dyn_cast<ConstantSDNode>(Ptr);
1876 unsigned Addr = CA->getZExtValue();
1877 unsigned HaveAlign = Addr != 0 ? 1u << countTrailingZeros(Addr) : NeedAlign;
1878 if (HaveAlign < NeedAlign) {
1880 raw_string_ostream O(ErrMsg);
1881 O << "Misaligned constant address: " << format_hex(Addr, 10)
1882 << " has alignment " << HaveAlign
1883 << ", but the memory access requires " << NeedAlign;
1884 if (DebugLoc DL = dl.getDebugLoc())
1885 DL.print(O << ", at ");
1886 report_fatal_error(O.str());
1890 // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
1892 static bool isBrevLdIntrinsic(const Value *Inst) {
1893 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
1894 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1895 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1896 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1897 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1898 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
1899 ID == Intrinsic::hexagon_L2_loadrub_pbr);
1902 // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
1903 // instruction. So far we only handle bitcast, extract value and bit reverse
1904 // load intrinsic instructions. Should we handle CGEP ?
1905 static Value *getBrevLdObject(Value *V) {
1906 if (Operator::getOpcode(V) == Instruction::ExtractValue ||
1907 Operator::getOpcode(V) == Instruction::BitCast)
1908 V = cast<Operator>(V)->getOperand(0);
1909 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
1910 V = cast<Instruction>(V)->getOperand(0);
1914 // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
1915 // a back edge. If the back edge comes from the intrinsic itself, the incoming
1916 // edge is returned.
1917 static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
1918 const BasicBlock *Parent = PN->getParent();
1920 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
1921 BasicBlock *Blk = PN->getIncomingBlock(i);
1922 // Determine if the back edge is originated from intrinsic.
1923 if (Blk == Parent) {
1924 Value *BackEdgeVal = PN->getIncomingValue(i);
1926 // Loop over till we return the same Value or we hit the IntrBaseVal.
1928 BaseVal = BackEdgeVal;
1929 BackEdgeVal = getBrevLdObject(BackEdgeVal);
1930 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1931 // If the getBrevLdObject returns IntrBaseVal, we should return the
1933 if (IntrBaseVal == BackEdgeVal)
1937 } else // Set the node to incoming edge.
1940 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
1941 return PN->getIncomingValue(Idx);
1944 // Bit-reverse Load Intrinsic: Figure out the underlying object the base
1945 // pointer points to, for the bit-reverse load intrinsic. Setting this to
1946 // memoperand might help alias analysis to figure out the dependencies.
1947 static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {
1948 Value *IntrBaseVal = V;
1950 // Loop over till we return the same Value, implies we either figure out
1951 // the object or we hit a PHI
1954 V = getBrevLdObject(V);
1955 } while (BaseVal != V);
1957 // Identify the object from PHINode.
1958 if (const PHINode *PN = dyn_cast<PHINode>(V))
1959 return returnEdge(PN, IntrBaseVal);
1960 // For non PHI nodes, the object is the last value returned by getBrevLdObject
1965 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1966 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1967 /// true and store the intrinsic information into the IntrinsicInfo that was
1968 /// passed to the function.
1969 bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1971 MachineFunction &MF,
1972 unsigned Intrinsic) const {
1973 switch (Intrinsic) {
1974 case Intrinsic::hexagon_L2_loadrd_pbr:
1975 case Intrinsic::hexagon_L2_loadri_pbr:
1976 case Intrinsic::hexagon_L2_loadrh_pbr:
1977 case Intrinsic::hexagon_L2_loadruh_pbr:
1978 case Intrinsic::hexagon_L2_loadrb_pbr:
1979 case Intrinsic::hexagon_L2_loadrub_pbr: {
1980 Info.opc = ISD::INTRINSIC_W_CHAIN;
1981 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1982 auto &Cont = I.getCalledFunction()->getParent()->getContext();
1983 // The intrinsic function call is of the form { ElTy, i8* }
1984 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
1985 // should be derived from ElTy.
1986 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
1987 Info.memVT = MVT::getVT(ElTy);
1988 llvm::Value *BasePtrVal = I.getOperand(0);
1989 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
1990 // The offset value comes through Modifier register. For now, assume the
1993 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont));
1994 Info.flags = MachineMemOperand::MOLoad;
1997 case Intrinsic::hexagon_V6_vgathermw:
1998 case Intrinsic::hexagon_V6_vgathermw_128B:
1999 case Intrinsic::hexagon_V6_vgathermh:
2000 case Intrinsic::hexagon_V6_vgathermh_128B:
2001 case Intrinsic::hexagon_V6_vgathermhw:
2002 case Intrinsic::hexagon_V6_vgathermhw_128B:
2003 case Intrinsic::hexagon_V6_vgathermwq:
2004 case Intrinsic::hexagon_V6_vgathermwq_128B:
2005 case Intrinsic::hexagon_V6_vgathermhq:
2006 case Intrinsic::hexagon_V6_vgathermhq_128B:
2007 case Intrinsic::hexagon_V6_vgathermhwq:
2008 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2009 const Module &M = *I.getParent()->getParent()->getParent();
2010 Info.opc = ISD::INTRINSIC_W_CHAIN;
2011 Type *VecTy = I.getArgOperand(1)->getType();
2012 Info.memVT = MVT::getVT(VecTy);
2013 Info.ptrVal = I.getArgOperand(0);
2016 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2017 Info.flags = MachineMemOperand::MOLoad |
2018 MachineMemOperand::MOStore |
2019 MachineMemOperand::MOVolatile;
2028 bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
2029 return X.getValueType().isScalarInteger(); // 'tstbit'
2032 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
2033 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
2036 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
2037 if (!VT1.isSimple() || !VT2.isSimple())
2039 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
2042 bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(
2043 const MachineFunction &MF, EVT VT) const {
2044 return isOperationLegalOrCustom(ISD::FMA, VT);
2047 // Should we expand the build vector with shuffles?
2048 bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
2049 unsigned DefinedValues) const {
2053 bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
2058 TargetLoweringBase::LegalizeTypeAction
2059 HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {
2060 unsigned VecLen = VT.getVectorNumElements();
2061 MVT ElemTy = VT.getVectorElementType();
2063 if (VecLen == 1 || VT.isScalableVector())
2064 return TargetLoweringBase::TypeScalarizeVector;
2066 if (Subtarget.useHVXOps()) {
2067 unsigned HwLen = Subtarget.getVectorLength();
2068 // If the size of VT is at least half of the vector length,
2069 // widen the vector. Note: the threshold was not selected in
2070 // any scientific way.
2071 ArrayRef<MVT> Tys = Subtarget.getHVXElementTypes();
2072 if (llvm::find(Tys, ElemTy) != Tys.end()) {
2073 unsigned HwWidth = 8*HwLen;
2074 unsigned VecWidth = VT.getSizeInBits();
2075 if (VecWidth >= HwWidth/2 && VecWidth < HwWidth)
2076 return TargetLoweringBase::TypeWidenVector;
2078 // Split vectors of i1 that correspond to (byte) vector pairs.
2079 if (ElemTy == MVT::i1 && VecLen == 2*HwLen)
2080 return TargetLoweringBase::TypeSplitVector;
2083 // Always widen (remaining) vectors of i1.
2084 if (ElemTy == MVT::i1)
2085 return TargetLoweringBase::TypeWidenVector;
2087 return TargetLoweringBase::TypeSplitVector;
2090 std::pair<SDValue, int>
2091 HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2092 if (Addr.getOpcode() == ISD::ADD) {
2093 SDValue Op1 = Addr.getOperand(1);
2094 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode()))
2095 return { Addr.getOperand(0), CN->getSExtValue() };
2100 // Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2101 // to select data from, V3 is the permutation.
2103 HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
2105 const auto *SVN = cast<ShuffleVectorSDNode>(Op);
2106 ArrayRef<int> AM = SVN->getMask();
2107 assert(AM.size() <= 8 && "Unexpected shuffle mask");
2108 unsigned VecLen = AM.size();
2111 assert(!Subtarget.isHVXVectorType(VecTy, true) &&
2112 "HVX shuffles should be legal");
2113 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length");
2115 SDValue Op0 = Op.getOperand(0);
2116 SDValue Op1 = Op.getOperand(1);
2117 const SDLoc &dl(Op);
2119 // If the inputs are not the same as the output, bail. This is not an
2120 // error situation, but complicates the handling and the default expansion
2121 // (into BUILD_VECTOR) should be adequate.
2122 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2125 // Normalize the mask so that the first non-negative index comes from
2126 // the first operand.
2127 SmallVector<int,8> Mask(AM.begin(), AM.end());
2128 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
2130 return DAG.getUNDEF(VecTy);
2131 if (AM[F] >= int(VecLen)) {
2132 ShuffleVectorSDNode::commuteMask(Mask);
2133 std::swap(Op0, Op1);
2136 // Express the shuffle mask in terms of bytes.
2137 SmallVector<int,8> ByteMask;
2138 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
2139 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2142 for (unsigned j = 0; j != ElemBytes; ++j)
2143 ByteMask.push_back(-1);
2145 for (unsigned j = 0; j != ElemBytes; ++j)
2146 ByteMask.push_back(M*ElemBytes + j);
2149 assert(ByteMask.size() <= 8);
2151 // All non-undef (non-negative) indexes are well within [0..127], so they
2152 // fit in a single byte. Build two 64-bit words:
2153 // - MaskIdx where each byte is the corresponding index (for non-negative
2154 // indexes), and 0xFF for negative indexes, and
2155 // - MaskUnd that has 0xFF for each negative index.
2156 uint64_t MaskIdx = 0;
2157 uint64_t MaskUnd = 0;
2158 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2160 uint64_t M = ByteMask[i] & 0xFF;
2166 if (ByteMask.size() == 4) {
2168 if (MaskIdx == (0x03020100 | MaskUnd))
2171 if (MaskIdx == (0x00010203 | MaskUnd)) {
2172 SDValue T0 = DAG.getBitcast(MVT::i32, Op0);
2173 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0);
2174 return DAG.getBitcast(VecTy, T1);
2178 SDValue Concat10 = DAG.getNode(HexagonISD::COMBINE, dl,
2179 typeJoin({ty(Op1), ty(Op0)}), {Op1, Op0});
2180 if (MaskIdx == (0x06040200 | MaskUnd))
2181 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2182 if (MaskIdx == (0x07050301 | MaskUnd))
2183 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2185 SDValue Concat01 = DAG.getNode(HexagonISD::COMBINE, dl,
2186 typeJoin({ty(Op0), ty(Op1)}), {Op0, Op1});
2187 if (MaskIdx == (0x02000604 | MaskUnd))
2188 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2189 if (MaskIdx == (0x03010705 | MaskUnd))
2190 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2193 if (ByteMask.size() == 8) {
2195 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2198 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2199 SDValue T0 = DAG.getBitcast(MVT::i64, Op0);
2200 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0);
2201 return DAG.getBitcast(VecTy, T1);
2205 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2206 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2207 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2208 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2209 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2210 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2211 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2212 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2213 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2214 VectorPair P = opSplit(Op0, dl, DAG);
2215 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
2219 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2220 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2221 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2222 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2228 // Create a Hexagon-specific node for shifting a vector by an integer.
2230 HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2232 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) {
2233 if (SDValue S = BVN->getSplatValue()) {
2235 switch (Op.getOpcode()) {
2237 NewOpc = HexagonISD::VASL;
2240 NewOpc = HexagonISD::VASR;
2243 NewOpc = HexagonISD::VLSR;
2246 llvm_unreachable("Unexpected shift opcode");
2248 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), S);
2256 HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
2257 return getVectorShiftByInt(Op, DAG);
2261 HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
2262 if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2268 HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2270 SDValue InpV = Op.getOperand(0);
2271 MVT InpTy = ty(InpV);
2272 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits());
2273 const SDLoc &dl(Op);
2275 // Handle conversion from i8 to v8i1.
2276 if (InpTy == MVT::i8) {
2277 if (ResTy == MVT::v8i1) {
2278 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV);
2279 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32);
2280 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2289 HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2290 MVT VecTy, SelectionDAG &DAG,
2291 MutableArrayRef<ConstantInt*> Consts) const {
2292 MVT ElemTy = VecTy.getVectorElementType();
2293 unsigned ElemWidth = ElemTy.getSizeInBits();
2294 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth);
2295 bool AllConst = true;
2297 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2298 SDValue V = Values[i];
2300 Consts[i] = ConstantInt::get(IntTy, 0);
2303 // Make sure to always cast to IntTy.
2304 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2305 const ConstantInt *CI = CN->getConstantIntValue();
2306 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue());
2307 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) {
2308 const ConstantFP *CF = CN->getConstantFPValue();
2309 APInt A = CF->getValueAPF().bitcastToAPInt();
2310 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2319 HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2320 MVT VecTy, SelectionDAG &DAG) const {
2321 MVT ElemTy = VecTy.getVectorElementType();
2322 assert(VecTy.getVectorNumElements() == Elem.size());
2324 SmallVector<ConstantInt*,4> Consts(Elem.size());
2325 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2327 unsigned First, Num = Elem.size();
2328 for (First = 0; First != Num; ++First)
2329 if (!isUndef(Elem[First]))
2332 return DAG.getUNDEF(VecTy);
2335 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2336 return getZero(dl, VecTy, DAG);
2338 if (ElemTy == MVT::i16) {
2339 assert(Elem.size() == 2);
2341 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2342 Consts[1]->getZExtValue() << 16;
2343 return DAG.getBitcast(MVT::v2i16, DAG.getConstant(V, dl, MVT::i32));
2345 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32,
2346 {Elem[1], Elem[0]}, DAG);
2347 return DAG.getBitcast(MVT::v2i16, N);
2350 if (ElemTy == MVT::i8) {
2351 // First try generating a constant.
2353 int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2354 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2355 (Consts[1]->getZExtValue() & 0xFF) << 16 |
2356 Consts[2]->getZExtValue() << 24;
2357 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
2361 bool IsSplat = true;
2362 for (unsigned i = 0; i != Num; ++i) {
2365 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2371 // Legalize the operand to VSPLAT.
2372 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2373 return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2377 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2378 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2379 assert(Elem.size() == 4);
2381 for (unsigned i = 0; i != 4; ++i) {
2382 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32);
2383 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8);
2385 SDValue S8 = DAG.getConstant(8, dl, MVT::i32);
2386 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8});
2387 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8});
2388 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0});
2389 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1});
2391 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2392 return DAG.getBitcast(MVT::v4i8, R);
2396 dbgs() << "VecTy: " << EVT(VecTy).getEVTString() << '\n';
2398 llvm_unreachable("Unexpected vector element type");
2402 HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2403 MVT VecTy, SelectionDAG &DAG) const {
2404 MVT ElemTy = VecTy.getVectorElementType();
2405 assert(VecTy.getVectorNumElements() == Elem.size());
2407 SmallVector<ConstantInt*,8> Consts(Elem.size());
2408 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2410 unsigned First, Num = Elem.size();
2411 for (First = 0; First != Num; ++First)
2412 if (!isUndef(Elem[First]))
2415 return DAG.getUNDEF(VecTy);
2418 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2419 return getZero(dl, VecTy, DAG);
2421 // First try splat if possible.
2422 if (ElemTy == MVT::i16) {
2423 bool IsSplat = true;
2424 for (unsigned i = 0; i != Num; ++i) {
2427 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2433 // Legalize the operand to VSPLAT.
2434 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2435 return DAG.getNode(HexagonISD::VSPLAT, dl, VecTy, Ext);
2439 // Then try constant.
2442 unsigned W = ElemTy.getSizeInBits();
2443 uint64_t Mask = (ElemTy == MVT::i8) ? 0xFFull
2444 : (ElemTy == MVT::i16) ? 0xFFFFull : 0xFFFFFFFFull;
2445 for (unsigned i = 0; i != Num; ++i)
2446 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2447 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64);
2448 return DAG.getBitcast(VecTy, V0);
2451 // Build two 32-bit vectors and concatenate.
2452 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2);
2453 SDValue L = (ElemTy == MVT::i32)
2455 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2456 SDValue H = (ElemTy == MVT::i32)
2458 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2459 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, {H, L});
2463 HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2464 const SDLoc &dl, MVT ValTy, MVT ResTy,
2465 SelectionDAG &DAG) const {
2466 MVT VecTy = ty(VecV);
2467 assert(!ValTy.isVector() ||
2468 VecTy.getVectorElementType() == ValTy.getVectorElementType());
2469 unsigned VecWidth = VecTy.getSizeInBits();
2470 unsigned ValWidth = ValTy.getSizeInBits();
2471 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2472 assert((VecWidth % ElemWidth) == 0);
2473 auto *IdxN = dyn_cast<ConstantSDNode>(IdxV);
2475 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2476 // without any coprocessors).
2477 if (ElemWidth == 1) {
2478 assert(VecWidth == VecTy.getVectorNumElements() && "Sanity failure");
2479 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2480 // Check if this is an extract of the lowest bit.
2482 // Extracting the lowest bit is a no-op, but it changes the type,
2483 // so it must be kept as an operation to avoid errors related to
2485 if (IdxN->isNullValue() && ValTy.getSizeInBits() == 1)
2486 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV);
2489 // If the value extracted is a single bit, use tstbit.
2490 if (ValWidth == 1) {
2491 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2492 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32);
2493 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0);
2494 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2497 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2498 // a predicate register. The elements of the vector are repeated
2499 // in the register (if necessary) so that the total number is 8.
2500 // The extracted subvector will need to be expanded in such a way.
2501 unsigned Scale = VecWidth / ValWidth;
2503 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2505 assert(ty(IdxV) == MVT::i32);
2506 unsigned VecRep = 8 / VecWidth;
2507 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2508 DAG.getConstant(8*VecRep, dl, MVT::i32));
2509 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2510 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0);
2512 // The longest possible subvector is at most 32 bits, so it is always
2513 // contained in the low subregister.
2514 T1 = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, T1);
2515 T1 = expandPredicate(T1, dl, DAG);
2519 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1);
2522 assert(VecWidth == 32 || VecWidth == 64);
2524 // Cast everything to scalar integer types.
2525 MVT ScalarTy = tyScalar(VecTy);
2526 VecV = DAG.getBitcast(ScalarTy, VecV);
2528 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2532 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2533 if (VecWidth == 64 && ValWidth == 32) {
2534 assert(Off == 0 || Off == 32);
2535 unsigned SubIdx = Off == 0 ? Hexagon::isub_lo : Hexagon::isub_hi;
2536 ExtV = DAG.getTargetExtractSubreg(SubIdx, dl, MVT::i32, VecV);
2537 } else if (Off == 0 && (ValWidth % 8) == 0) {
2538 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy));
2540 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32);
2541 // The return type of EXTRACTU must be the same as the type of the
2543 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2544 {VecV, WidthV, OffV});
2547 if (ty(IdxV) != MVT::i32)
2548 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2549 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2550 DAG.getConstant(ElemWidth, dl, MVT::i32));
2551 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2552 {VecV, WidthV, OffV});
2555 // Cast ExtV to the requested result type.
2556 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2557 ExtV = DAG.getBitcast(ResTy, ExtV);
2562 HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2563 const SDLoc &dl, MVT ValTy,
2564 SelectionDAG &DAG) const {
2565 MVT VecTy = ty(VecV);
2566 if (VecTy.getVectorElementType() == MVT::i1) {
2567 MVT ValTy = ty(ValV);
2568 assert(ValTy.getVectorElementType() == MVT::i1);
2569 SDValue ValR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV);
2570 unsigned VecLen = VecTy.getVectorNumElements();
2571 unsigned Scale = VecLen / ValTy.getVectorNumElements();
2574 for (unsigned R = Scale; R > 1; R /= 2) {
2575 ValR = contractPredicate(ValR, dl, DAG);
2576 ValR = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2577 DAG.getUNDEF(MVT::i32), ValR);
2579 // The longest possible subvector is at most 32 bits, so it is always
2580 // contained in the low subregister.
2581 ValR = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, ValR);
2583 unsigned ValBytes = 64 / Scale;
2584 SDValue Width = DAG.getConstant(ValBytes*8, dl, MVT::i32);
2585 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2586 DAG.getConstant(8, dl, MVT::i32));
2587 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2588 SDValue Ins = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2589 {VecR, ValR, Width, Idx});
2590 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins);
2593 unsigned VecWidth = VecTy.getSizeInBits();
2594 unsigned ValWidth = ValTy.getSizeInBits();
2595 assert(VecWidth == 32 || VecWidth == 64);
2596 assert((VecWidth % ValWidth) == 0);
2598 // Cast everything to scalar integer types.
2599 MVT ScalarTy = MVT::getIntegerVT(VecWidth);
2600 // The actual type of ValV may be different than ValTy (which is related
2601 // to the vector type).
2602 unsigned VW = ty(ValV).getSizeInBits();
2603 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV);
2604 VecV = DAG.getBitcast(ScalarTy, VecV);
2606 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy);
2608 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2611 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) {
2612 unsigned W = C->getZExtValue() * ValWidth;
2613 SDValue OffV = DAG.getConstant(W, dl, MVT::i32);
2614 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2615 {VecV, ValV, WidthV, OffV});
2617 if (ty(IdxV) != MVT::i32)
2618 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2619 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV);
2620 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2621 {VecV, ValV, WidthV, OffV});
2624 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);
2628 HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2629 SelectionDAG &DAG) const {
2630 assert(ty(Vec32).getSizeInBits() == 32);
2632 return DAG.getUNDEF(MVT::i64);
2633 return getInstr(Hexagon::S2_vsxtbh, dl, MVT::i64, {Vec32}, DAG);
2637 HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2638 SelectionDAG &DAG) const {
2639 assert(ty(Vec64).getSizeInBits() == 64);
2641 return DAG.getUNDEF(MVT::i32);
2642 return getInstr(Hexagon::S2_vtrunehb, dl, MVT::i32, {Vec64}, DAG);
2646 HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2648 if (Ty.isVector()) {
2649 assert(Ty.isInteger() && "Only integer vectors are supported here");
2650 unsigned W = Ty.getSizeInBits();
2652 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
2653 return DAG.getNode(HexagonISD::VZERO, dl, Ty);
2657 return DAG.getConstant(0, dl, Ty);
2658 if (Ty.isFloatingPoint())
2659 return DAG.getConstantFP(0.0, dl, Ty);
2660 llvm_unreachable("Invalid type for zero");
2664 HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
2666 unsigned BW = VecTy.getSizeInBits();
2667 const SDLoc &dl(Op);
2668 SmallVector<SDValue,8> Ops;
2669 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2670 Ops.push_back(Op.getOperand(i));
2673 return buildVector32(Ops, dl, VecTy, DAG);
2675 return buildVector64(Ops, dl, VecTy, DAG);
2677 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2678 // Check if this is a special case or all-0 or all-1.
2679 bool All0 = true, All1 = true;
2680 for (SDValue P : Ops) {
2681 auto *CN = dyn_cast<ConstantSDNode>(P.getNode());
2682 if (CN == nullptr) {
2683 All0 = All1 = false;
2686 uint32_t C = CN->getZExtValue();
2691 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy);
2693 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy);
2695 // For each i1 element in the resulting predicate register, put 1
2696 // shifted by the index of the element into a general-purpose register,
2697 // then or them together and transfer it back into a predicate register.
2699 SDValue Z = getZero(dl, MVT::i32, DAG);
2700 // Always produce 8 bits, repeat inputs if necessary.
2701 unsigned Rep = 8 / VecTy.getVectorNumElements();
2702 for (unsigned i = 0; i != 8; ++i) {
2703 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32);
2704 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2706 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) {
2707 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2708 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2710 // Move the value directly to a predicate register.
2711 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2718 HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
2719 SelectionDAG &DAG) const {
2721 const SDLoc &dl(Op);
2722 if (VecTy.getSizeInBits() == 64) {
2723 assert(Op.getNumOperands() == 2);
2724 return DAG.getNode(HexagonISD::COMBINE, dl, VecTy, Op.getOperand(1),
2728 MVT ElemTy = VecTy.getVectorElementType();
2729 if (ElemTy == MVT::i1) {
2730 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2731 MVT OpTy = ty(Op.getOperand(0));
2732 // Scale is how many times the operands need to be contracted to match
2733 // the representation in the target register.
2734 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
2735 assert(Scale == Op.getNumOperands() && Scale > 1);
2737 // First, convert all bool vectors to integers, then generate pairwise
2738 // inserts to form values of doubled length. Up until there are only
2739 // two values left to concatenate, all of these values will fit in a
2740 // 32-bit integer, so keep them as i32 to use 32-bit inserts.
2741 SmallVector<SDValue,4> Words[2];
2744 for (SDValue P : Op.getNode()->op_values()) {
2745 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P);
2746 for (unsigned R = Scale; R > 1; R /= 2) {
2747 W = contractPredicate(W, dl, DAG);
2748 W = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2749 DAG.getUNDEF(MVT::i32), W);
2751 W = DAG.getTargetExtractSubreg(Hexagon::isub_lo, dl, MVT::i32, W);
2752 Words[IdxW].push_back(W);
2756 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32);
2757 Words[IdxW ^ 1].clear();
2759 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
2760 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2761 // Insert W1 into W0 right next to the significant bits of W0.
2762 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
2763 {W0, W1, WidthV, WidthV});
2764 Words[IdxW ^ 1].push_back(T);
2770 // Another sanity check. At this point there should only be two words
2771 // left, and Scale should be 2.
2772 assert(Scale == 2 && Words[IdxW].size() == 2);
2774 SDValue WW = DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64,
2775 Words[IdxW][1], Words[IdxW][0]);
2776 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW);
2783 HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2784 SelectionDAG &DAG) const {
2785 SDValue Vec = Op.getOperand(0);
2786 MVT ElemTy = ty(Vec).getVectorElementType();
2787 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
2791 HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
2792 SelectionDAG &DAG) const {
2793 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
2794 ty(Op), ty(Op), DAG);
2798 HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2799 SelectionDAG &DAG) const {
2800 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
2801 SDLoc(Op), ty(Op).getVectorElementType(), DAG);
2805 HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
2806 SelectionDAG &DAG) const {
2807 SDValue ValV = Op.getOperand(1);
2808 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
2809 SDLoc(Op), ty(ValV), DAG);
2813 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
2814 // Assuming the caller does not have either a signext or zeroext modifier, and
2815 // only one value is accepted, any reasonable truncation is allowed.
2816 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
2819 // FIXME: in principle up to 64-bit could be made safe, but it would be very
2820 // fragile at the moment: any support for multiple value returns would be
2821 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
2822 return Ty1->getPrimitiveSizeInBits() <= 32;
2826 HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const {
2827 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2828 unsigned ClaimAlign = LN->getAlignment();
2829 validateConstPtrAlignment(LN->getBasePtr(), SDLoc(Op), ClaimAlign);
2830 // Call LowerUnalignedLoad for all loads, it recognizes loads that
2831 // don't need extra aligning.
2832 return LowerUnalignedLoad(Op, DAG);
2836 HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const {
2837 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
2838 unsigned ClaimAlign = SN->getAlignment();
2839 SDValue Ptr = SN->getBasePtr();
2840 const SDLoc &dl(Op);
2841 validateConstPtrAlignment(Ptr, dl, ClaimAlign);
2843 MVT StoreTy = SN->getMemoryVT().getSimpleVT();
2844 unsigned NeedAlign = Subtarget.getTypeAlignment(StoreTy);
2845 if (ClaimAlign < NeedAlign)
2846 return expandUnalignedStore(SN, DAG);
2851 HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
2853 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
2854 MVT LoadTy = ty(Op);
2855 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy);
2856 unsigned HaveAlign = LN->getAlignment();
2857 if (HaveAlign >= NeedAlign)
2860 const SDLoc &dl(Op);
2861 const DataLayout &DL = DAG.getDataLayout();
2862 LLVMContext &Ctx = *DAG.getContext();
2864 // If the load aligning is disabled or the load can be broken up into two
2865 // smaller legal loads, do the default (target-independent) expansion.
2866 bool DoDefault = false;
2867 // Handle it in the default way if this is an indexed load.
2868 if (!LN->isUnindexed())
2872 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(),
2873 *LN->getMemOperand()))
2877 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
2878 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
2879 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
2880 : MVT::getVectorVT(MVT::i8, HaveAlign);
2882 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
2885 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
2886 return DAG.getMergeValues({P.first, P.second}, dl);
2889 // The code below generates two loads, both aligned as NeedAlign, and
2890 // with the distance of NeedAlign between them. For that to cover the
2891 // bits that need to be loaded (and without overlapping), the size of
2892 // the loads should be equal to NeedAlign. This is true for all loadable
2893 // types, but add an assertion in case something changes in the future.
2894 assert(LoadTy.getSizeInBits() == 8*NeedAlign);
2896 unsigned LoadLen = NeedAlign;
2897 SDValue Base = LN->getBasePtr();
2898 SDValue Chain = LN->getChain();
2899 auto BO = getBaseAndOffset(Base);
2900 unsigned BaseOpc = BO.first.getOpcode();
2901 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
2904 if (BO.second % LoadLen != 0) {
2905 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
2906 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
2907 BO.second -= BO.second % LoadLen;
2909 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
2910 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
2911 DAG.getConstant(NeedAlign, dl, MVT::i32))
2913 SDValue Base0 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second, dl);
2914 SDValue Base1 = DAG.getMemBasePlusOffset(BaseNoOff, BO.second+LoadLen, dl);
2916 MachineMemOperand *WideMMO = nullptr;
2917 if (MachineMemOperand *MMO = LN->getMemOperand()) {
2918 MachineFunction &MF = DAG.getMachineFunction();
2919 WideMMO = MF.getMachineMemOperand(
2920 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen),
2921 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
2922 MMO->getOrdering(), MMO->getFailureOrdering());
2925 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
2926 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
2928 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy,
2929 {Load1, Load0, BaseNoOff.getOperand(0)});
2930 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2931 Load0.getValue(1), Load1.getValue(1));
2932 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl);
2937 HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const {
2938 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
2939 auto *CY = dyn_cast<ConstantSDNode>(Y);
2943 const SDLoc &dl(Op);
2944 SDVTList VTs = Op.getNode()->getVTList();
2945 assert(VTs.NumVTs == 2);
2946 assert(VTs.VTs[1] == MVT::i1);
2947 unsigned Opc = Op.getOpcode();
2950 uint32_t VY = CY->getZExtValue();
2951 assert(VY != 0 && "This should have been folded");
2956 if (Opc == ISD::UADDO) {
2957 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y});
2958 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG),
2960 return DAG.getMergeValues({Op, Ov}, dl);
2962 if (Opc == ISD::USUBO) {
2963 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y});
2964 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op,
2965 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ);
2966 return DAG.getMergeValues({Op, Ov}, dl);
2974 HexagonTargetLowering::LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const {
2975 const SDLoc &dl(Op);
2976 unsigned Opc = Op.getOpcode();
2977 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
2979 if (Opc == ISD::ADDCARRY)
2980 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(),
2983 EVT CarryTy = C.getValueType();
2984 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(),
2985 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
2986 SDValue Out[] = { SubC.getValue(0),
2987 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) };
2988 return DAG.getMergeValues(Out, dl);
2992 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
2993 SDValue Chain = Op.getOperand(0);
2994 SDValue Offset = Op.getOperand(1);
2995 SDValue Handler = Op.getOperand(2);
2997 auto PtrVT = getPointerTy(DAG.getDataLayout());
2999 // Mark function as containing a call to EH_RETURN.
3000 HexagonMachineFunctionInfo *FuncInfo =
3001 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
3002 FuncInfo->setHasEHReturn();
3004 unsigned OffsetReg = Hexagon::R28;
3007 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
3008 DAG.getIntPtrConstant(4, dl));
3009 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
3010 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
3012 // Not needed we already use it as explict input to EH_RETURN.
3013 // MF.getRegInfo().addLiveOut(OffsetReg);
3015 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
3019 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3020 unsigned Opc = Op.getOpcode();
3022 // Handle INLINEASM first.
3023 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
3024 return LowerINLINEASM(Op, DAG);
3026 if (isHvxOperation(Op)) {
3027 // If HVX lowering returns nothing, try the default lowering.
3028 if (SDValue V = LowerHvxOperation(Op, DAG))
3035 Op.getNode()->dumpr(&DAG);
3036 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
3037 errs() << "Error: check for a non-legal type in this operation\n";
3039 llvm_unreachable("Should not custom lower this!");
3040 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3041 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG);
3042 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
3043 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
3044 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3045 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3046 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3047 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3048 case ISD::LOAD: return LowerLoad(Op, DAG);
3049 case ISD::STORE: return LowerStore(Op, DAG);
3051 case ISD::USUBO: return LowerUAddSubO(Op, DAG);
3053 case ISD::SUBCARRY: return LowerAddSubCarry(Op, DAG);
3056 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
3057 case ISD::ROTL: return LowerROTL(Op, DAG);
3058 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3059 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3060 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
3061 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3062 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3063 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3064 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
3065 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
3066 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3067 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
3068 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3069 case ISD::VASTART: return LowerVASTART(Op, DAG);
3070 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
3071 case ISD::SETCC: return LowerSETCC(Op, DAG);
3072 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3073 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3074 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3075 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3076 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
3084 HexagonTargetLowering::LowerOperationWrapper(SDNode *N,
3085 SmallVectorImpl<SDValue> &Results,
3086 SelectionDAG &DAG) const {
3087 if (isHvxOperation(N)) {
3088 LowerHvxOperationWrapper(N, Results, DAG);
3089 if (!Results.empty())
3093 // We are only custom-lowering stores to verify the alignment of the
3094 // address if it is a compile-time constant. Since a store can be modified
3095 // during type-legalization (the value being stored may need legalization),
3096 // return empty Results here to indicate that we don't really make any
3097 // changes in the custom lowering.
3098 if (N->getOpcode() != ISD::STORE)
3099 return TargetLowering::LowerOperationWrapper(N, Results, DAG);
3103 HexagonTargetLowering::ReplaceNodeResults(SDNode *N,
3104 SmallVectorImpl<SDValue> &Results,
3105 SelectionDAG &DAG) const {
3106 if (isHvxOperation(N)) {
3107 ReplaceHvxNodeResults(N, Results, DAG);
3108 if (!Results.empty())
3113 switch (N->getOpcode()) {
3119 // Handle a bitcast from v8i1 to i8.
3120 if (N->getValueType(0) == MVT::i8) {
3121 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3122 N->getOperand(0), DAG);
3123 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8);
3124 Results.push_back(T);
3131 HexagonTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
3134 if (isHvxOperation(Op)) {
3135 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3140 const SDLoc &dl(Op);
3141 unsigned Opc = Op.getOpcode();
3143 if (Opc == HexagonISD::P2D) {
3144 SDValue P = Op.getOperand(0);
3145 switch (P.getOpcode()) {
3146 case HexagonISD::PTRUE:
3147 return DCI.DAG.getConstant(-1, dl, ty(Op));
3148 case HexagonISD::PFALSE:
3149 return getZero(dl, ty(Op), DCI.DAG);
3153 } else if (Opc == ISD::VSELECT) {
3154 // This is pretty much duplicated in HexagonISelLoweringHVX...
3156 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0)
3157 SDValue Cond = Op.getOperand(0);
3158 if (Cond->getOpcode() == ISD::XOR) {
3159 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);
3160 if (C1->getOpcode() == HexagonISD::PTRUE) {
3161 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0,
3162 Op.getOperand(2), Op.getOperand(1));
3171 /// Returns relocation base for the given PIC jumptable.
3173 HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3174 SelectionDAG &DAG) const {
3175 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3176 EVT VT = Table.getValueType();
3177 SDValue T = DAG.getTargetJumpTable(Idx, VT, HexagonII::MO_PCREL);
3178 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
3181 //===----------------------------------------------------------------------===//
3182 // Inline Assembly Support
3183 //===----------------------------------------------------------------------===//
3185 TargetLowering::ConstraintType
3186 HexagonTargetLowering::getConstraintType(StringRef Constraint) const {
3187 if (Constraint.size() == 1) {
3188 switch (Constraint[0]) {
3191 if (Subtarget.useHVXOps())
3192 return C_RegisterClass;
3195 return C_RegisterClass;
3200 return TargetLowering::getConstraintType(Constraint);
3203 std::pair<unsigned, const TargetRegisterClass*>
3204 HexagonTargetLowering::getRegForInlineAsmConstraint(
3205 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3207 if (Constraint.size() == 1) {
3208 switch (Constraint[0]) {
3210 switch (VT.SimpleTy) {
3212 return {0u, nullptr};
3218 return {0u, &Hexagon::IntRegsRegClass};
3221 return {0u, &Hexagon::DoubleRegsRegClass};
3226 return {0u, nullptr};
3227 return {0u, &Hexagon::ModRegsRegClass};
3229 switch (VT.getSizeInBits()) {
3231 return {0u, nullptr};
3234 return {0u, &Hexagon::HvxQRRegClass};
3238 switch (VT.getSizeInBits()) {
3240 return {0u, nullptr};
3242 return {0u, &Hexagon::HvxVRRegClass};
3244 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3245 return {0u, &Hexagon::HvxVRRegClass};
3246 return {0u, &Hexagon::HvxWRRegClass};
3248 return {0u, &Hexagon::HvxWRRegClass};
3252 return {0u, nullptr};
3256 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3259 /// isFPImmLegal - Returns true if the target can instruction select the
3260 /// specified FP immediate natively. If false, the legalizer will
3261 /// materialize the FP immediate as a load from a constant pool.
3262 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3263 bool ForCodeSize) const {
3267 /// isLegalAddressingMode - Return true if the addressing mode represented by
3268 /// AM is legal for this target, for a load/store of the specified type.
3269 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
3270 const AddrMode &AM, Type *Ty,
3271 unsigned AS, Instruction *I) const {
3272 if (Ty->isSized()) {
3273 // When LSR detects uses of the same base address to access different
3274 // types (e.g. unions), it will assume a conservative type for these
3276 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3277 // The type Ty passed here would then be "void". Skip the alignment
3278 // checks, but do not return false right away, since that confuses
3279 // LSR into crashing.
3280 Align A = DL.getABITypeAlign(Ty);
3281 // The base offset must be a multiple of the alignment.
3282 if (!isAligned(A, AM.BaseOffs))
3284 // The shifted offset must fit in 11 bits.
3285 if (!isInt<11>(AM.BaseOffs >> Log2(A)))
3289 // No global is ever allowed as a base.
3293 int Scale = AM.Scale;
3297 case 0: // No scale reg, "r+i", "r", or just "i".
3299 default: // No scaled addressing mode.
3305 /// Return true if folding a constant offset with the given GlobalAddress is
3306 /// legal. It is frequently not legal in PIC relocation models.
3307 bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
3309 return HTM.getRelocationModel() == Reloc::Static;
3312 /// isLegalICmpImmediate - Return true if the specified immediate is legal
3313 /// icmp immediate, that is the target has icmp instructions which can compare
3314 /// a register against the immediate without having to materialize the
3315 /// immediate into a register.
3316 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
3317 return Imm >= -512 && Imm <= 511;
3320 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3321 /// for tail call optimization. Targets which want to do tail call
3322 /// optimization should implement this function.
3323 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
3325 CallingConv::ID CalleeCC,
3327 bool IsCalleeStructRet,
3328 bool IsCallerStructRet,
3329 const SmallVectorImpl<ISD::OutputArg> &Outs,
3330 const SmallVectorImpl<SDValue> &OutVals,
3331 const SmallVectorImpl<ISD::InputArg> &Ins,
3332 SelectionDAG& DAG) const {
3333 const Function &CallerF = DAG.getMachineFunction().getFunction();
3334 CallingConv::ID CallerCC = CallerF.getCallingConv();
3335 bool CCMatch = CallerCC == CalleeCC;
3337 // ***************************************************************************
3338 // Look for obvious safe cases to perform tail call optimization that do not
3339 // require ABI changes.
3340 // ***************************************************************************
3342 // If this is a tail call via a function pointer, then don't do it!
3343 if (!isa<GlobalAddressSDNode>(Callee) &&
3344 !isa<ExternalSymbolSDNode>(Callee)) {
3348 // Do not optimize if the calling conventions do not match and the conventions
3349 // used are not C or Fast.
3351 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3352 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3353 // If R & E, then ok.
3358 // Do not tail call optimize vararg calls.
3362 // Also avoid tail call optimization if either caller or callee uses struct
3363 // return semantics.
3364 if (IsCalleeStructRet || IsCallerStructRet)
3367 // In addition to the cases above, we also disable Tail Call Optimization if
3368 // the calling convention code that at least one outgoing argument needs to
3369 // go on the stack. We cannot check that here because at this point that
3370 // information is not available.
3374 /// Returns the target specific optimal type for load and store operations as
3375 /// a result of memset, memcpy, and memmove lowering.
3377 /// If DstAlign is zero that means it's safe to destination alignment can
3378 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3379 /// a need to check it against alignment requirement, probably because the
3380 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
3381 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3382 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3383 /// does not need to be loaded. It returns EVT::Other if the type should be
3384 /// determined using generic target-independent logic.
3385 EVT HexagonTargetLowering::getOptimalMemOpType(
3386 const MemOp &Op, const AttributeList &FuncAttributes) const {
3387 if (Op.size() >= 8 && Op.isAligned(Align(8)))
3389 if (Op.size() >= 4 && Op.isAligned(Align(4)))
3391 if (Op.size() >= 2 && Op.isAligned(Align(2)))
3396 bool HexagonTargetLowering::allowsMemoryAccess(
3397 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
3398 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
3399 MVT SVT = VT.getSimpleVT();
3400 if (Subtarget.isHVXVectorType(SVT, true))
3401 return allowsHvxMemoryAccess(SVT, Flags, Fast);
3402 return TargetLoweringBase::allowsMemoryAccess(
3403 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);
3406 bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
3407 EVT VT, unsigned AddrSpace, unsigned Alignment,
3408 MachineMemOperand::Flags Flags, bool *Fast) const {
3409 MVT SVT = VT.getSimpleVT();
3410 if (Subtarget.isHVXVectorType(SVT, true))
3411 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast);
3417 std::pair<const TargetRegisterClass*, uint8_t>
3418 HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3420 if (Subtarget.isHVXVectorType(VT, true)) {
3421 unsigned BitWidth = VT.getSizeInBits();
3422 unsigned VecWidth = Subtarget.getVectorLength() * 8;
3424 if (VT.getVectorElementType() == MVT::i1)
3425 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3426 if (BitWidth == VecWidth)
3427 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3428 assert(BitWidth == 2 * VecWidth);
3429 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3432 return TargetLowering::findRepresentativeClass(TRI, VT);
3435 bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load,
3436 ISD::LoadExtType ExtTy, EVT NewVT) const {
3437 // TODO: This may be worth removing. Check regression tests for diffs.
3438 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
3441 auto *L = cast<LoadSDNode>(Load);
3442 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3443 // Small-data object, do not shrink.
3444 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3446 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) {
3447 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine());
3448 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3449 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3454 Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
3455 AtomicOrdering Ord) const {
3456 BasicBlock *BB = Builder.GetInsertBlock();
3457 Module *M = BB->getParent()->getParent();
3458 auto PT = cast<PointerType>(Addr->getType());
3459 Type *Ty = PT->getElementType();
3460 unsigned SZ = Ty->getPrimitiveSizeInBits();
3461 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3462 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3463 : Intrinsic::hexagon_L4_loadd_locked;
3464 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3466 PointerType *NewPtrTy
3467 = Builder.getIntNTy(SZ)->getPointerTo(PT->getAddressSpace());
3468 Addr = Builder.CreateBitCast(Addr, NewPtrTy);
3470 Value *Call = Builder.CreateCall(Fn, Addr, "larx");
3472 return Builder.CreateBitCast(Call, Ty);
3475 /// Perform a store-conditional operation to Addr. Return the status of the
3476 /// store. This should be 0 if the store succeeded, non-zero otherwise.
3477 Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
3478 Value *Val, Value *Addr, AtomicOrdering Ord) const {
3479 BasicBlock *BB = Builder.GetInsertBlock();
3480 Module *M = BB->getParent()->getParent();
3481 Type *Ty = Val->getType();
3482 unsigned SZ = Ty->getPrimitiveSizeInBits();
3484 Type *CastTy = Builder.getIntNTy(SZ);
3485 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3486 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3487 : Intrinsic::hexagon_S4_stored_locked;
3488 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3490 unsigned AS = Addr->getType()->getPointerAddressSpace();
3491 Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS));
3492 Val = Builder.CreateBitCast(Val, CastTy);
3494 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3495 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3496 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3500 TargetLowering::AtomicExpansionKind
3501 HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
3502 // Do not expand loads and stores that don't exceed 64 bits.
3503 return LI->getType()->getPrimitiveSizeInBits() > 64
3504 ? AtomicExpansionKind::LLOnly
3505 : AtomicExpansionKind::None;
3508 bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
3509 // Do not expand loads and stores that don't exceed 64 bits.
3510 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
3513 TargetLowering::AtomicExpansionKind
3514 HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
3515 AtomicCmpXchgInst *AI) const {
3516 return AtomicExpansionKind::LLSC;