1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "SparcISelLowering.h"
16 #include "MCTargetDesc/SparcMCExpr.h"
17 #include "SparcMachineFunctionInfo.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/Support/ErrorHandling.h"
36 //===----------------------------------------------------------------------===//
37 // Calling Convention Implementation
38 //===----------------------------------------------------------------------===//
40 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
41 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
42 ISD::ArgFlagsTy &ArgFlags, CCState &State)
44 assert (ArgFlags.isSRet());
46 // Assign SRet argument.
47 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
53 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
54 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
55 ISD::ArgFlagsTy &ArgFlags, CCState &State)
57 static const MCPhysReg RegList[] = {
58 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
60 // Try to get first reg.
61 if (unsigned Reg = State.AllocateReg(RegList)) {
62 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
64 // Assign whole thing in stack.
65 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
66 State.AllocateStack(8,4),
71 // Try to get second reg.
72 if (unsigned Reg = State.AllocateReg(RegList))
73 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
75 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
76 State.AllocateStack(4,4),
81 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
82 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
83 ISD::ArgFlagsTy &ArgFlags, CCState &State)
85 static const MCPhysReg RegList[] = {
86 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
89 // Try to get first reg.
90 if (unsigned Reg = State.AllocateReg(RegList))
91 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
95 // Try to get second reg.
96 if (unsigned Reg = State.AllocateReg(RegList))
97 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
104 // Allocate a full-sized argument for the 64-bit ABI.
105 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
106 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
107 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
108 assert((LocVT == MVT::f32 || LocVT == MVT::f128
109 || LocVT.getSizeInBits() == 64) &&
110 "Can't handle non-64 bits locations");
112 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
113 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
114 unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
115 unsigned Offset = State.AllocateStack(size, alignment);
118 if (LocVT == MVT::i64 && Offset < 6*8)
119 // Promote integers to %i0-%i5.
120 Reg = SP::I0 + Offset/8;
121 else if (LocVT == MVT::f64 && Offset < 16*8)
122 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
123 Reg = SP::D0 + Offset/8;
124 else if (LocVT == MVT::f32 && Offset < 16*8)
125 // Promote floats to %f1, %f3, ...
126 Reg = SP::F1 + Offset/4;
127 else if (LocVT == MVT::f128 && Offset < 16*8)
128 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
129 Reg = SP::Q0 + Offset/16;
131 // Promote to register when possible, otherwise use the stack slot.
133 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
137 // This argument goes on the stack in an 8-byte slot.
138 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
139 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
140 if (LocVT == MVT::f32)
143 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
147 // Allocate a half-sized argument for the 64-bit ABI.
149 // This is used when passing { float, int } structs by value in registers.
150 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
151 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
152 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
153 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
154 unsigned Offset = State.AllocateStack(4, 4);
156 if (LocVT == MVT::f32 && Offset < 16*8) {
157 // Promote floats to %f0-%f31.
158 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
163 if (LocVT == MVT::i32 && Offset < 6*8) {
164 // Promote integers to %i0-%i5, using half the register.
165 unsigned Reg = SP::I0 + Offset/8;
167 LocInfo = CCValAssign::AExt;
169 // Set the Custom bit if this i32 goes in the high bits of a register.
171 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
174 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
178 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
182 #include "SparcGenCallingConv.inc"
184 // The calling conventions in SparcCallingConv.td are described in terms of the
185 // callee's register window. This function translates registers to the
186 // corresponding caller window %o register.
187 static unsigned toCallerWindow(unsigned Reg) {
188 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
190 if (Reg >= SP::I0 && Reg <= SP::I7)
191 return Reg - SP::I0 + SP::O0;
196 SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
198 const SmallVectorImpl<ISD::OutputArg> &Outs,
199 const SmallVectorImpl<SDValue> &OutVals,
200 const SDLoc &DL, SelectionDAG &DAG) const {
201 if (Subtarget->is64Bit())
202 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
203 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
207 SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
209 const SmallVectorImpl<ISD::OutputArg> &Outs,
210 const SmallVectorImpl<SDValue> &OutVals,
211 const SDLoc &DL, SelectionDAG &DAG) const {
212 MachineFunction &MF = DAG.getMachineFunction();
214 // CCValAssign - represent the assignment of the return value to locations.
215 SmallVector<CCValAssign, 16> RVLocs;
217 // CCState - Info about the registers and stack slot.
218 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
221 // Analyze return values.
222 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
225 SmallVector<SDValue, 4> RetOps(1, Chain);
226 // Make room for the return address offset.
227 RetOps.push_back(SDValue());
229 // Copy the result values into the output registers.
230 for (unsigned i = 0, realRVLocIdx = 0;
232 ++i, ++realRVLocIdx) {
233 CCValAssign &VA = RVLocs[i];
234 assert(VA.isRegLoc() && "Can only return in registers!");
236 SDValue Arg = OutVals[realRVLocIdx];
238 if (VA.needsCustom()) {
239 assert(VA.getLocVT() == MVT::v2i32);
240 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
241 // happen by default if this wasn't a legal type)
243 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
245 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
246 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
248 DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
250 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
251 Flag = Chain.getValue(1);
252 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
253 VA = RVLocs[++i]; // skip ahead to next loc
254 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
257 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
259 // Guarantee that all emitted copies are stuck together with flags.
260 Flag = Chain.getValue(1);
261 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
264 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
265 // If the function returns a struct, copy the SRetReturnReg to I0
266 if (MF.getFunction()->hasStructRetAttr()) {
267 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
268 unsigned Reg = SFI->getSRetReturnReg();
270 llvm_unreachable("sret virtual register not created in the entry block");
271 auto PtrVT = getPointerTy(DAG.getDataLayout());
272 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
273 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
274 Flag = Chain.getValue(1);
275 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
276 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
279 RetOps[0] = Chain; // Update chain.
280 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
282 // Add the flag if we have it.
284 RetOps.push_back(Flag);
286 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
289 // Lower return values for the 64-bit ABI.
290 // Return values are passed the exactly the same way as function arguments.
292 SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
294 const SmallVectorImpl<ISD::OutputArg> &Outs,
295 const SmallVectorImpl<SDValue> &OutVals,
296 const SDLoc &DL, SelectionDAG &DAG) const {
297 // CCValAssign - represent the assignment of the return value to locations.
298 SmallVector<CCValAssign, 16> RVLocs;
300 // CCState - Info about the registers and stack slot.
301 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
304 // Analyze return values.
305 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
308 SmallVector<SDValue, 4> RetOps(1, Chain);
310 // The second operand on the return instruction is the return address offset.
311 // The return address is always %i7+8 with the 64-bit ABI.
312 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
314 // Copy the result values into the output registers.
315 for (unsigned i = 0; i != RVLocs.size(); ++i) {
316 CCValAssign &VA = RVLocs[i];
317 assert(VA.isRegLoc() && "Can only return in registers!");
318 SDValue OutVal = OutVals[i];
320 // Integer return values must be sign or zero extended by the callee.
321 switch (VA.getLocInfo()) {
322 case CCValAssign::Full: break;
323 case CCValAssign::SExt:
324 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
326 case CCValAssign::ZExt:
327 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
329 case CCValAssign::AExt:
330 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
333 llvm_unreachable("Unknown loc info!");
336 // The custom bit on an i32 return value indicates that it should be passed
337 // in the high bits of the register.
338 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
339 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
340 DAG.getConstant(32, DL, MVT::i32));
342 // The next value may go in the low bits of the same register.
343 // Handle both at once.
344 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
345 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
346 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
347 // Skip the next value, it's already done.
352 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
354 // Guarantee that all emitted copies are stuck together with flags.
355 Flag = Chain.getValue(1);
356 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
359 RetOps[0] = Chain; // Update chain.
361 // Add the flag if we have it.
363 RetOps.push_back(Flag);
365 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
368 SDValue SparcTargetLowering::LowerFormalArguments(
369 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
370 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
371 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
372 if (Subtarget->is64Bit())
373 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
375 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
379 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
380 /// passed in either one or two GPRs, including FP values. TODO: we should
381 /// pass FP values in FP registers for fastcc functions.
382 SDValue SparcTargetLowering::LowerFormalArguments_32(
383 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
384 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
385 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
386 MachineFunction &MF = DAG.getMachineFunction();
387 MachineRegisterInfo &RegInfo = MF.getRegInfo();
388 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
390 // Assign locations to all of the incoming arguments.
391 SmallVector<CCValAssign, 16> ArgLocs;
392 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
394 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
396 const unsigned StackOffset = 92;
397 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
400 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
401 CCValAssign &VA = ArgLocs[i];
403 if (Ins[InIdx].Flags.isSRet()) {
405 report_fatal_error("sparc only supports sret on the first parameter");
406 // Get SRet from [%fp+64].
407 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
408 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
410 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
411 InVals.push_back(Arg);
416 if (VA.needsCustom()) {
417 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
419 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
420 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
421 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
424 CCValAssign &NextVA = ArgLocs[++i];
427 if (NextVA.isMemLoc()) {
428 int FrameIdx = MF.getFrameInfo().
429 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
430 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
431 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
433 unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
434 &SP::IntRegsRegClass);
435 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
439 std::swap(LoVal, HiVal);
442 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
443 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
444 InVals.push_back(WholeValue);
447 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
448 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
449 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
450 if (VA.getLocVT() == MVT::f32)
451 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
452 else if (VA.getLocVT() != MVT::i32) {
453 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
454 DAG.getValueType(VA.getLocVT()));
455 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
457 InVals.push_back(Arg);
461 assert(VA.isMemLoc());
463 unsigned Offset = VA.getLocMemOffset()+StackOffset;
464 auto PtrVT = getPointerTy(DAG.getDataLayout());
466 if (VA.needsCustom()) {
467 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
468 // If it is double-word aligned, just load.
469 if (Offset % 8 == 0) {
470 int FI = MF.getFrameInfo().CreateFixedObject(8,
473 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
475 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
476 InVals.push_back(Load);
480 int FI = MF.getFrameInfo().CreateFixedObject(4,
483 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
485 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
486 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
489 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
492 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
495 std::swap(LoVal, HiVal);
498 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
499 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
500 InVals.push_back(WholeValue);
504 int FI = MF.getFrameInfo().CreateFixedObject(4,
507 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
509 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
510 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
511 } else if (VA.getValVT() == MVT::f128) {
512 report_fatal_error("SPARCv8 does not handle f128 in calls; "
515 // We shouldn't see any other value types here.
516 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
518 InVals.push_back(Load);
521 if (MF.getFunction()->hasStructRetAttr()) {
522 // Copy the SRet Argument to SRetReturnReg.
523 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
524 unsigned Reg = SFI->getSRetReturnReg();
526 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
527 SFI->setSRetReturnReg(Reg);
529 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
530 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
533 // Store remaining ArgRegs to the stack if this is a varargs function.
535 static const MCPhysReg ArgRegs[] = {
536 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
538 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
539 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
540 unsigned ArgOffset = CCInfo.getNextStackOffset();
541 if (NumAllocated == 6)
542 ArgOffset += StackOffset;
545 ArgOffset = 68+4*NumAllocated;
548 // Remember the vararg offset for the va_start implementation.
549 FuncInfo->setVarArgsFrameOffset(ArgOffset);
551 std::vector<SDValue> OutChains;
553 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
554 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
555 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
556 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
558 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
560 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
563 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
567 if (!OutChains.empty()) {
568 OutChains.push_back(Chain);
569 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
576 // Lower formal arguments for the 64 bit ABI.
577 SDValue SparcTargetLowering::LowerFormalArguments_64(
578 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
579 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
580 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
581 MachineFunction &MF = DAG.getMachineFunction();
583 // Analyze arguments according to CC_Sparc64.
584 SmallVector<CCValAssign, 16> ArgLocs;
585 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
587 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
589 // The argument array begins at %fp+BIAS+128, after the register save area.
590 const unsigned ArgArea = 128;
592 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
593 CCValAssign &VA = ArgLocs[i];
595 // This argument is passed in a register.
596 // All integer register arguments are promoted by the caller to i64.
598 // Create a virtual register for the promoted live-in value.
599 unsigned VReg = MF.addLiveIn(VA.getLocReg(),
600 getRegClassFor(VA.getLocVT()));
601 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
603 // Get the high bits for i32 struct elements.
604 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
605 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
606 DAG.getConstant(32, DL, MVT::i32));
608 // The caller promoted the argument, so insert an Assert?ext SDNode so we
609 // won't promote the value again in this function.
610 switch (VA.getLocInfo()) {
611 case CCValAssign::SExt:
612 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
613 DAG.getValueType(VA.getValVT()));
615 case CCValAssign::ZExt:
616 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
617 DAG.getValueType(VA.getValVT()));
623 // Truncate the register down to the argument type.
625 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
627 InVals.push_back(Arg);
631 // The registers are exhausted. This argument was passed on the stack.
632 assert(VA.isMemLoc());
633 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
634 // beginning of the arguments area at %fp+BIAS+128.
635 unsigned Offset = VA.getLocMemOffset() + ArgArea;
636 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
637 // Adjust offset for extended arguments, SPARC is big-endian.
638 // The caller will have written the full slot with extended bytes, but we
639 // prefer our own extending loads.
641 Offset += 8 - ValSize;
642 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
644 DAG.getLoad(VA.getValVT(), DL, Chain,
645 DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
646 MachinePointerInfo::getFixedStack(MF, FI)));
652 // This function takes variable arguments, some of which may have been passed
653 // in registers %i0-%i5. Variable floating point arguments are never passed
654 // in floating point registers. They go on %i0-%i5 or on the stack like
655 // integer arguments.
657 // The va_start intrinsic needs to know the offset to the first variable
659 unsigned ArgOffset = CCInfo.getNextStackOffset();
660 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
661 // Skip the 128 bytes of register save area.
662 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
663 Subtarget->getStackPointerBias());
665 // Save the variable arguments that were passed in registers.
666 // The caller is required to reserve stack space for 6 arguments regardless
667 // of how many arguments were actually passed.
668 SmallVector<SDValue, 8> OutChains;
669 for (; ArgOffset < 6*8; ArgOffset += 8) {
670 unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
671 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
672 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
673 auto PtrVT = getPointerTy(MF.getDataLayout());
675 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
676 MachinePointerInfo::getFixedStack(MF, FI)));
679 if (!OutChains.empty())
680 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
686 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
687 SmallVectorImpl<SDValue> &InVals) const {
688 if (Subtarget->is64Bit())
689 return LowerCall_64(CLI, InVals);
690 return LowerCall_32(CLI, InVals);
693 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
694 ImmutableCallSite *CS) {
696 return CS->hasFnAttr(Attribute::ReturnsTwice);
698 const Function *CalleeFn = nullptr;
699 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
700 CalleeFn = dyn_cast<Function>(G->getGlobal());
701 } else if (ExternalSymbolSDNode *E =
702 dyn_cast<ExternalSymbolSDNode>(Callee)) {
703 const Function *Fn = DAG.getMachineFunction().getFunction();
704 const Module *M = Fn->getParent();
705 const char *CalleeName = E->getSymbol();
706 CalleeFn = M->getFunction(CalleeName);
711 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
714 // Lower a call for the 32-bit ABI.
716 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
717 SmallVectorImpl<SDValue> &InVals) const {
718 SelectionDAG &DAG = CLI.DAG;
720 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
721 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
722 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
723 SDValue Chain = CLI.Chain;
724 SDValue Callee = CLI.Callee;
725 bool &isTailCall = CLI.IsTailCall;
726 CallingConv::ID CallConv = CLI.CallConv;
727 bool isVarArg = CLI.IsVarArg;
729 // Sparc target does not yet support tail call optimization.
732 // Analyze operands of the call, assigning locations to each operand.
733 SmallVector<CCValAssign, 16> ArgLocs;
734 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
736 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
738 // Get the size of the outgoing arguments stack space requirement.
739 unsigned ArgsSize = CCInfo.getNextStackOffset();
741 // Keep stack frames 8-byte aligned.
742 ArgsSize = (ArgsSize+7) & ~7;
744 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
746 // Create local copies for byval args.
747 SmallVector<SDValue, 8> ByValArgs;
748 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
749 ISD::ArgFlagsTy Flags = Outs[i].Flags;
750 if (!Flags.isByVal())
753 SDValue Arg = OutVals[i];
754 unsigned Size = Flags.getByValSize();
755 unsigned Align = Flags.getByValAlign();
758 int FI = MFI.CreateStackObject(Size, Align, false);
759 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
760 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
762 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
763 false, // isVolatile,
764 (Size <= 32), // AlwaysInline if size <= 32,
766 MachinePointerInfo(), MachinePointerInfo());
767 ByValArgs.push_back(FIPtr);
771 ByValArgs.push_back(nullVal);
775 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
778 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
779 SmallVector<SDValue, 8> MemOpChains;
781 const unsigned StackOffset = 92;
782 bool hasStructRetAttr = false;
783 // Walk the register/memloc assignments, inserting copies/loads.
784 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
787 CCValAssign &VA = ArgLocs[i];
788 SDValue Arg = OutVals[realArgIdx];
790 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
792 // Use local copy if it is a byval arg.
793 if (Flags.isByVal()) {
794 Arg = ByValArgs[byvalArgIdx++];
800 // Promote the value if needed.
801 switch (VA.getLocInfo()) {
802 default: llvm_unreachable("Unknown loc info!");
803 case CCValAssign::Full: break;
804 case CCValAssign::SExt:
805 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
807 case CCValAssign::ZExt:
808 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
810 case CCValAssign::AExt:
811 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
813 case CCValAssign::BCvt:
814 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
818 if (Flags.isSRet()) {
819 assert(VA.needsCustom());
820 // store SRet argument in %sp+64
821 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
822 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
823 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
824 MemOpChains.push_back(
825 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
826 hasStructRetAttr = true;
830 if (VA.needsCustom()) {
831 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
834 unsigned Offset = VA.getLocMemOffset() + StackOffset;
835 // if it is double-word aligned, just store.
836 if (Offset % 8 == 0) {
837 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
838 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
839 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
840 MemOpChains.push_back(
841 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
846 if (VA.getLocVT() == MVT::f64) {
847 // Move from the float value from float registers into the
848 // integer registers.
850 // TODO: The f64 -> v2i32 conversion is super-inefficient for
851 // constants: it sticks them in the constant pool, then loads
852 // to a fp register, then stores to temp memory, then loads to
853 // integer registers.
854 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
857 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
859 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
860 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
862 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
865 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
867 CCValAssign &NextVA = ArgLocs[++i];
868 if (NextVA.isRegLoc()) {
869 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
871 // Store the second part in stack.
872 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
873 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
874 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
875 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
876 MemOpChains.push_back(
877 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
880 unsigned Offset = VA.getLocMemOffset() + StackOffset;
881 // Store the first part.
882 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
883 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
884 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
885 MemOpChains.push_back(
886 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
887 // Store the second part.
888 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
889 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
890 MemOpChains.push_back(
891 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
896 // Arguments that can be passed on register must be kept at
899 if (VA.getLocVT() != MVT::f32) {
900 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
903 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
904 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
908 assert(VA.isMemLoc());
910 // Create a store off the stack pointer for this argument.
911 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
912 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
914 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
915 MemOpChains.push_back(
916 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
920 // Emit all stores, make sure the occur before any copies into physregs.
921 if (!MemOpChains.empty())
922 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
924 // Build a sequence of copy-to-reg nodes chained together with token
925 // chain and flag operands which copy the outgoing args into registers.
926 // The InFlag in necessary since all emitted instructions must be
929 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
930 unsigned Reg = toCallerWindow(RegsToPass[i].first);
931 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
932 InFlag = Chain.getValue(1);
935 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
936 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
938 // If the callee is a GlobalAddress node (quite common, every direct call is)
939 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
940 // Likewise ExternalSymbol -> TargetExternalSymbol.
941 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
942 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
943 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
944 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
945 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
947 // Returns a chain & a flag for retval copy to use
948 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
949 SmallVector<SDValue, 8> Ops;
950 Ops.push_back(Chain);
951 Ops.push_back(Callee);
952 if (hasStructRetAttr)
953 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
954 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
955 Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
956 RegsToPass[i].second.getValueType()));
958 // Add a register mask operand representing the call-preserved registers.
959 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
960 const uint32_t *Mask =
962 ? TRI->getRTCallPreservedMask(CallConv)
963 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
964 assert(Mask && "Missing call preserved mask for calling convention");
965 Ops.push_back(DAG.getRegisterMask(Mask));
967 if (InFlag.getNode())
968 Ops.push_back(InFlag);
970 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
971 InFlag = Chain.getValue(1);
973 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
974 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
975 InFlag = Chain.getValue(1);
977 // Assign locations to each value returned by this call.
978 SmallVector<CCValAssign, 16> RVLocs;
979 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
982 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
984 // Copy all of the result registers out of their specified physreg.
985 for (unsigned i = 0; i != RVLocs.size(); ++i) {
986 if (RVLocs[i].getLocVT() == MVT::v2i32) {
987 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
988 SDValue Lo = DAG.getCopyFromReg(
989 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
990 Chain = Lo.getValue(1);
991 InFlag = Lo.getValue(2);
992 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
993 DAG.getConstant(0, dl, MVT::i32));
994 SDValue Hi = DAG.getCopyFromReg(
995 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
996 Chain = Hi.getValue(1);
997 InFlag = Hi.getValue(2);
998 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
999 DAG.getConstant(1, dl, MVT::i32));
1000 InVals.push_back(Vec);
1003 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1004 RVLocs[i].getValVT(), InFlag)
1006 InFlag = Chain.getValue(2);
1007 InVals.push_back(Chain.getValue(0));
1014 // FIXME? Maybe this could be a TableGen attribute on some registers and
1015 // this table could be generated automatically from RegInfo.
1016 unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
1017 SelectionDAG &DAG) const {
1018 unsigned Reg = StringSwitch<unsigned>(RegName)
1019 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1020 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1021 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1022 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1023 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1024 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1025 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1026 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1032 report_fatal_error("Invalid register name global variable");
1035 // This functions returns true if CalleeName is a ABI function that returns
1036 // a long double (fp128).
1037 static bool isFP128ABICall(const char *CalleeName)
1039 static const char *const ABICalls[] =
1040 { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
1041 "_Q_sqrt", "_Q_neg",
1042 "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
1043 "_Q_lltoq", "_Q_ulltoq",
1046 for (const char * const *I = ABICalls; *I != nullptr; ++I)
1047 if (strcmp(CalleeName, *I) == 0)
1053 SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
1055 const Function *CalleeFn = nullptr;
1056 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1057 CalleeFn = dyn_cast<Function>(G->getGlobal());
1058 } else if (ExternalSymbolSDNode *E =
1059 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1060 const Function *Fn = DAG.getMachineFunction().getFunction();
1061 const Module *M = Fn->getParent();
1062 const char *CalleeName = E->getSymbol();
1063 CalleeFn = M->getFunction(CalleeName);
1064 if (!CalleeFn && isFP128ABICall(CalleeName))
1065 return 16; // Return sizeof(fp128)
1071 // It would be nice to check for the sret attribute on CalleeFn here,
1072 // but since it is not part of the function type, any check will misfire.
1074 PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
1075 Type *ElementTy = Ty->getElementType();
1076 return DAG.getDataLayout().getTypeAllocSize(ElementTy);
1080 // Fixup floating point arguments in the ... part of a varargs call.
1082 // The SPARC v9 ABI requires that floating point arguments are treated the same
1083 // as integers when calling a varargs function. This does not apply to the
1084 // fixed arguments that are part of the function's prototype.
1086 // This function post-processes a CCValAssign array created by
1087 // AnalyzeCallOperands().
1088 static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
1089 ArrayRef<ISD::OutputArg> Outs) {
1090 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1091 const CCValAssign &VA = ArgLocs[i];
1092 MVT ValTy = VA.getLocVT();
1093 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1094 // varargs functions.
1095 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1097 // The fixed arguments to a varargs function still go in FP registers.
1098 if (Outs[VA.getValNo()].IsFixed)
1101 // This floating point argument should be reassigned.
1104 // Determine the offset into the argument array.
1105 unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1106 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1107 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1108 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1111 // This argument should go in %i0-%i5.
1112 unsigned IReg = SP::I0 + Offset/8;
1113 if (ValTy == MVT::f64)
1114 // Full register, just bitconvert into i64.
1115 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1116 IReg, MVT::i64, CCValAssign::BCvt);
1118 assert(ValTy == MVT::f128 && "Unexpected type!");
1119 // Full register, just bitconvert into i128 -- We will lower this into
1120 // two i64s in LowerCall_64.
1121 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1122 IReg, MVT::i128, CCValAssign::BCvt);
1125 // This needs to go to memory, we're out of integer registers.
1126 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1127 Offset, VA.getLocVT(), VA.getLocInfo());
1133 // Lower a call for the 64-bit ABI.
1135 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
1136 SmallVectorImpl<SDValue> &InVals) const {
1137 SelectionDAG &DAG = CLI.DAG;
1139 SDValue Chain = CLI.Chain;
1140 auto PtrVT = getPointerTy(DAG.getDataLayout());
1142 // Sparc target does not yet support tail call optimization.
1143 CLI.IsTailCall = false;
1145 // Analyze operands of the call, assigning locations to each operand.
1146 SmallVector<CCValAssign, 16> ArgLocs;
1147 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1149 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1151 // Get the size of the outgoing arguments stack space requirement.
1152 // The stack offset computed by CC_Sparc64 includes all arguments.
1153 // Called functions expect 6 argument words to exist in the stack frame, used
1155 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1157 // Keep stack frames 16-byte aligned.
1158 ArgsSize = alignTo(ArgsSize, 16);
1160 // Varargs calls require special treatment.
1162 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1164 // Adjust the stack pointer to make room for the arguments.
1165 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1166 // with more than 6 arguments.
1167 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1170 // Collect the set of registers to pass to the function and their values.
1171 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1173 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1175 // Collect chains from all the memory opeations that copy arguments to the
1176 // stack. They must follow the stack pointer adjustment above and precede the
1177 // call instruction itself.
1178 SmallVector<SDValue, 8> MemOpChains;
1180 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1181 const CCValAssign &VA = ArgLocs[i];
1182 SDValue Arg = CLI.OutVals[i];
1184 // Promote the value if needed.
1185 switch (VA.getLocInfo()) {
1187 llvm_unreachable("Unknown location info!");
1188 case CCValAssign::Full:
1190 case CCValAssign::SExt:
1191 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1193 case CCValAssign::ZExt:
1194 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1196 case CCValAssign::AExt:
1197 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1199 case CCValAssign::BCvt:
1200 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1201 // SPARC does not support i128 natively. Lower it into two i64, see below.
1202 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1203 || VA.getLocVT() != MVT::i128)
1204 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1208 if (VA.isRegLoc()) {
1209 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1210 && VA.getLocVT() == MVT::i128) {
1211 // Store and reload into the integer register reg and reg+1.
1212 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1213 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1214 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1215 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1216 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1217 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1218 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1220 // Store to %sp+BIAS+128+Offset
1222 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1223 // Load into Reg and Reg+1
1225 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1227 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1228 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1230 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1235 // The custom bit on an i32 return value indicates that it should be
1236 // passed in the high bits of the register.
1237 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1238 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1239 DAG.getConstant(32, DL, MVT::i32));
1241 // The next value may go in the low bits of the same register.
1242 // Handle both at once.
1243 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1244 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1245 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1247 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1248 // Skip the next value, it's already done.
1252 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1256 assert(VA.isMemLoc());
1258 // Create a store off the stack pointer for this argument.
1259 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1260 // The argument area starts at %fp+BIAS+128 in the callee frame,
1261 // %sp+BIAS+128 in ours.
1262 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1263 Subtarget->getStackPointerBias() +
1265 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1266 MemOpChains.push_back(
1267 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1270 // Emit all stores, make sure they occur before the call.
1271 if (!MemOpChains.empty())
1272 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1274 // Build a sequence of CopyToReg nodes glued together with token chain and
1275 // glue operands which copy the outgoing args into registers. The InGlue is
1276 // necessary since all emitted instructions must be stuck together in order
1277 // to pass the live physical registers.
1279 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1280 Chain = DAG.getCopyToReg(Chain, DL,
1281 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1282 InGlue = Chain.getValue(1);
1285 // If the callee is a GlobalAddress node (quite common, every direct call is)
1286 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1287 // Likewise ExternalSymbol -> TargetExternalSymbol.
1288 SDValue Callee = CLI.Callee;
1289 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1290 unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
1291 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1292 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1293 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1294 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1296 // Build the operands for the call instruction itself.
1297 SmallVector<SDValue, 8> Ops;
1298 Ops.push_back(Chain);
1299 Ops.push_back(Callee);
1300 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1301 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1302 RegsToPass[i].second.getValueType()));
1304 // Add a register mask operand representing the call-preserved registers.
1305 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1306 const uint32_t *Mask =
1307 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1308 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1310 assert(Mask && "Missing call preserved mask for calling convention");
1311 Ops.push_back(DAG.getRegisterMask(Mask));
1313 // Make sure the CopyToReg nodes are glued to the call instruction which
1314 // consumes the registers.
1315 if (InGlue.getNode())
1316 Ops.push_back(InGlue);
1318 // Now the call itself.
1319 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1320 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1321 InGlue = Chain.getValue(1);
1323 // Revert the stack pointer immediately after the call.
1324 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1325 DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1326 InGlue = Chain.getValue(1);
1328 // Now extract the return values. This is more or less the same as
1329 // LowerFormalArguments_64.
1331 // Assign locations to each value returned by this call.
1332 SmallVector<CCValAssign, 16> RVLocs;
1333 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1336 // Set inreg flag manually for codegen generated library calls that
1338 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr)
1339 CLI.Ins[0].Flags.setInReg();
1341 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1343 // Copy all of the result registers out of their specified physreg.
1344 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1345 CCValAssign &VA = RVLocs[i];
1346 unsigned Reg = toCallerWindow(VA.getLocReg());
1348 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1349 // reside in the same register in the high and low bits. Reuse the
1350 // CopyFromReg previous node to avoid duplicate copies.
1352 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1353 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1354 RV = Chain.getValue(0);
1356 // But usually we'll create a new CopyFromReg for a different register.
1357 if (!RV.getNode()) {
1358 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1359 Chain = RV.getValue(1);
1360 InGlue = Chain.getValue(2);
1363 // Get the high bits for i32 struct elements.
1364 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1365 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1366 DAG.getConstant(32, DL, MVT::i32));
1368 // The callee promoted the return value, so insert an Assert?ext SDNode so
1369 // we won't promote the value again in this function.
1370 switch (VA.getLocInfo()) {
1371 case CCValAssign::SExt:
1372 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1373 DAG.getValueType(VA.getValVT()));
1375 case CCValAssign::ZExt:
1376 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1377 DAG.getValueType(VA.getValVT()));
1383 // Truncate the register down to the return value type.
1384 if (VA.isExtInLoc())
1385 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1387 InVals.push_back(RV);
1393 //===----------------------------------------------------------------------===//
1394 // TargetLowering Implementation
1395 //===----------------------------------------------------------------------===//
1397 TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
1398 if (AI->getOperation() == AtomicRMWInst::Xchg &&
1399 AI->getType()->getPrimitiveSizeInBits() == 32)
1400 return AtomicExpansionKind::None; // Uses xchg instruction
1402 return AtomicExpansionKind::CmpXChg;
1405 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1407 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
1409 default: llvm_unreachable("Unknown integer condition code!");
1410 case ISD::SETEQ: return SPCC::ICC_E;
1411 case ISD::SETNE: return SPCC::ICC_NE;
1412 case ISD::SETLT: return SPCC::ICC_L;
1413 case ISD::SETGT: return SPCC::ICC_G;
1414 case ISD::SETLE: return SPCC::ICC_LE;
1415 case ISD::SETGE: return SPCC::ICC_GE;
1416 case ISD::SETULT: return SPCC::ICC_CS;
1417 case ISD::SETULE: return SPCC::ICC_LEU;
1418 case ISD::SETUGT: return SPCC::ICC_GU;
1419 case ISD::SETUGE: return SPCC::ICC_CC;
1423 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1425 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
1427 default: llvm_unreachable("Unknown fp condition code!");
1429 case ISD::SETOEQ: return SPCC::FCC_E;
1431 case ISD::SETUNE: return SPCC::FCC_NE;
1433 case ISD::SETOLT: return SPCC::FCC_L;
1435 case ISD::SETOGT: return SPCC::FCC_G;
1437 case ISD::SETOLE: return SPCC::FCC_LE;
1439 case ISD::SETOGE: return SPCC::FCC_GE;
1440 case ISD::SETULT: return SPCC::FCC_UL;
1441 case ISD::SETULE: return SPCC::FCC_ULE;
1442 case ISD::SETUGT: return SPCC::FCC_UG;
1443 case ISD::SETUGE: return SPCC::FCC_UGE;
1444 case ISD::SETUO: return SPCC::FCC_U;
1445 case ISD::SETO: return SPCC::FCC_O;
1446 case ISD::SETONE: return SPCC::FCC_LG;
1447 case ISD::SETUEQ: return SPCC::FCC_UE;
1451 SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
1452 const SparcSubtarget &STI)
1453 : TargetLowering(TM), Subtarget(&STI) {
1454 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
1456 // Instructions which use registers as conditionals examine all the
1457 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1458 // matters much whether it's ZeroOrOneBooleanContent, or
1459 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1461 setBooleanContents(ZeroOrOneBooleanContent);
1462 setBooleanVectorContents(ZeroOrOneBooleanContent);
1464 // Set up the register classes.
1465 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1466 if (!Subtarget->useSoftFloat()) {
1467 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1468 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1469 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1471 if (Subtarget->is64Bit()) {
1472 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1474 // On 32bit sparc, we define a double-register 32bit register
1475 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1476 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1478 // ...but almost all operations must be expanded, so set that as
1480 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1481 setOperationAction(Op, MVT::v2i32, Expand);
1483 // Truncating/extending stores/loads are also not supported.
1484 for (MVT VT : MVT::integer_vector_valuetypes()) {
1485 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1486 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1487 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1489 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1490 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1491 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1493 setTruncStoreAction(VT, MVT::v2i32, Expand);
1494 setTruncStoreAction(MVT::v2i32, VT, Expand);
1496 // However, load and store *are* legal.
1497 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1498 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1499 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
1500 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
1502 // And we need to promote i64 loads/stores into vector load/store
1503 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1504 setOperationAction(ISD::STORE, MVT::i64, Custom);
1506 // Sadly, this doesn't work:
1507 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1508 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1511 // Turn FP extload into load/fpextend
1512 for (MVT VT : MVT::fp_valuetypes()) {
1513 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1514 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1517 // Sparc doesn't have i1 sign extending load
1518 for (MVT VT : MVT::integer_valuetypes())
1519 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1521 // Turn FP truncstore into trunc + store.
1522 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1523 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1524 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1526 // Custom legalize GlobalAddress nodes into LO/HI parts.
1527 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
1528 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
1529 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
1530 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
1532 // Sparc doesn't have sext_inreg, replace them with shl/sra
1533 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1534 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
1535 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1537 // Sparc has no REM or DIVREM operations.
1538 setOperationAction(ISD::UREM, MVT::i32, Expand);
1539 setOperationAction(ISD::SREM, MVT::i32, Expand);
1540 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1541 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1543 // ... nor does SparcV9.
1544 if (Subtarget->is64Bit()) {
1545 setOperationAction(ISD::UREM, MVT::i64, Expand);
1546 setOperationAction(ISD::SREM, MVT::i64, Expand);
1547 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1548 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1551 // Custom expand fp<->sint
1552 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1553 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1554 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
1555 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
1557 // Custom Expand fp<->uint
1558 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1559 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1560 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
1561 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
1563 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
1564 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
1566 // Sparc has no select or setcc: expand to SELECT_CC.
1567 setOperationAction(ISD::SELECT, MVT::i32, Expand);
1568 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1569 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1570 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1572 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1573 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1574 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1575 setOperationAction(ISD::SETCC, MVT::f128, Expand);
1577 // Sparc doesn't have BRCOND either, it has BR_CC.
1578 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1579 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1580 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1581 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1582 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1583 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1584 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1586 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1587 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1588 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1589 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1591 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1592 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1594 if (Subtarget->is64Bit()) {
1595 setOperationAction(ISD::ADDC, MVT::i64, Custom);
1596 setOperationAction(ISD::ADDE, MVT::i64, Custom);
1597 setOperationAction(ISD::SUBC, MVT::i64, Custom);
1598 setOperationAction(ISD::SUBE, MVT::i64, Custom);
1599 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
1600 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
1601 setOperationAction(ISD::SELECT, MVT::i64, Expand);
1602 setOperationAction(ISD::SETCC, MVT::i64, Expand);
1603 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1604 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1606 setOperationAction(ISD::CTPOP, MVT::i64,
1607 Subtarget->usePopc() ? Legal : Expand);
1608 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1609 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1610 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1611 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1612 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1613 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1617 // Atomics are supported on SparcV9. 32-bit atomics are also
1618 // supported by some Leon SparcV8 variants. Otherwise, atomics
1620 if (Subtarget->isV9())
1621 setMaxAtomicSizeInBitsSupported(64);
1622 else if (Subtarget->hasLeonCasa())
1623 setMaxAtomicSizeInBitsSupported(32);
1625 setMaxAtomicSizeInBitsSupported(0);
1627 setMinCmpXchgSizeInBits(32);
1629 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1631 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1633 // Custom Lower Atomic LOAD/STORE
1634 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1635 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1637 if (Subtarget->is64Bit()) {
1638 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1639 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1640 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1641 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1644 if (!Subtarget->is64Bit()) {
1645 // These libcalls are not available in 32-bit.
1646 setLibcallName(RTLIB::SHL_I128, nullptr);
1647 setLibcallName(RTLIB::SRL_I128, nullptr);
1648 setLibcallName(RTLIB::SRA_I128, nullptr);
1651 if (!Subtarget->isV9()) {
1652 // SparcV8 does not have FNEGD and FABSD.
1653 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1654 setOperationAction(ISD::FABS, MVT::f64, Custom);
1657 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1658 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1659 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1660 setOperationAction(ISD::FREM , MVT::f128, Expand);
1661 setOperationAction(ISD::FMA , MVT::f128, Expand);
1662 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1663 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1664 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1665 setOperationAction(ISD::FREM , MVT::f64, Expand);
1666 setOperationAction(ISD::FMA , MVT::f64, Expand);
1667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1670 setOperationAction(ISD::FREM , MVT::f32, Expand);
1671 setOperationAction(ISD::FMA , MVT::f32, Expand);
1672 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1673 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1674 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1675 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1676 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1677 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1678 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1679 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1680 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1681 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1682 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1684 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1685 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1686 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1688 // Expands to [SU]MUL_LOHI.
1689 setOperationAction(ISD::MULHU, MVT::i32, Expand);
1690 setOperationAction(ISD::MULHS, MVT::i32, Expand);
1691 setOperationAction(ISD::MUL, MVT::i32, Expand);
1693 if (Subtarget->is64Bit()) {
1694 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1695 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1696 setOperationAction(ISD::MULHU, MVT::i64, Expand);
1697 setOperationAction(ISD::MULHS, MVT::i64, Expand);
1699 setOperationAction(ISD::UMULO, MVT::i64, Custom);
1700 setOperationAction(ISD::SMULO, MVT::i64, Custom);
1702 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
1703 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
1704 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
1707 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1708 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1709 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1710 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1712 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1714 // Use the default implementation.
1715 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1716 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1717 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1718 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1719 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1721 setStackPointerRegisterToSaveRestore(SP::O6);
1723 setOperationAction(ISD::CTPOP, MVT::i32,
1724 Subtarget->usePopc() ? Legal : Expand);
1726 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1727 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1728 setOperationAction(ISD::STORE, MVT::f128, Legal);
1730 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1731 setOperationAction(ISD::STORE, MVT::f128, Custom);
1734 if (Subtarget->hasHardQuad()) {
1735 setOperationAction(ISD::FADD, MVT::f128, Legal);
1736 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1737 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1738 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1739 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1740 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1741 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1742 if (Subtarget->isV9()) {
1743 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1744 setOperationAction(ISD::FABS, MVT::f128, Legal);
1746 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1747 setOperationAction(ISD::FABS, MVT::f128, Custom);
1750 if (!Subtarget->is64Bit()) {
1751 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1752 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1753 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1754 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1758 // Custom legalize f128 operations.
1760 setOperationAction(ISD::FADD, MVT::f128, Custom);
1761 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1762 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1763 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1764 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1765 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1766 setOperationAction(ISD::FABS, MVT::f128, Custom);
1768 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1769 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
1770 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
1772 // Setup Runtime library names.
1773 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1774 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1775 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1776 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1777 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1778 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1779 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1780 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1781 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1782 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1783 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1784 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1785 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1786 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1787 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1788 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1789 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1790 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1791 } else if (!Subtarget->useSoftFloat()) {
1792 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1793 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1794 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1795 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1796 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1797 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1798 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1799 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1800 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1801 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1802 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1803 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1804 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1805 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1806 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1807 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1808 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1812 if (Subtarget->fixAllFDIVSQRT()) {
1813 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1814 // the former instructions generate errata on LEON processors.
1815 setOperationAction(ISD::FDIV, MVT::f32, Promote);
1816 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1819 if (Subtarget->replaceFMULS()) {
1820 // Promote FMULS to FMULD instructions instead as
1821 // the former instructions generate errata on LEON processors.
1822 setOperationAction(ISD::FMUL, MVT::f32, Promote);
1825 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1827 setMinFunctionAlignment(2);
1829 computeRegisterProperties(Subtarget->getRegisterInfo());
1832 bool SparcTargetLowering::useSoftFloat() const {
1833 return Subtarget->useSoftFloat();
1836 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1837 switch ((SPISD::NodeType)Opcode) {
1838 case SPISD::FIRST_NUMBER: break;
1839 case SPISD::CMPICC: return "SPISD::CMPICC";
1840 case SPISD::CMPFCC: return "SPISD::CMPFCC";
1841 case SPISD::BRICC: return "SPISD::BRICC";
1842 case SPISD::BRXCC: return "SPISD::BRXCC";
1843 case SPISD::BRFCC: return "SPISD::BRFCC";
1844 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1845 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1846 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1847 case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
1848 case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
1849 case SPISD::Hi: return "SPISD::Hi";
1850 case SPISD::Lo: return "SPISD::Lo";
1851 case SPISD::FTOI: return "SPISD::FTOI";
1852 case SPISD::ITOF: return "SPISD::ITOF";
1853 case SPISD::FTOX: return "SPISD::FTOX";
1854 case SPISD::XTOF: return "SPISD::XTOF";
1855 case SPISD::CALL: return "SPISD::CALL";
1856 case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1857 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1858 case SPISD::FLUSHW: return "SPISD::FLUSHW";
1859 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1860 case SPISD::TLS_LD: return "SPISD::TLS_LD";
1861 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1866 EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
1870 return VT.changeVectorElementTypeToInteger();
1873 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1874 /// be zero. Op is expected to be a target specific node. Used by DAG
1876 void SparcTargetLowering::computeKnownBitsForTargetNode
1880 const APInt &DemandedElts,
1881 const SelectionDAG &DAG,
1882 unsigned Depth) const {
1883 APInt KnownZero2, KnownOne2;
1884 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1886 switch (Op.getOpcode()) {
1888 case SPISD::SELECT_ICC:
1889 case SPISD::SELECT_XCC:
1890 case SPISD::SELECT_FCC:
1891 DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1892 DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1894 // Only known if known in both the LHS and RHS.
1895 KnownOne &= KnownOne2;
1896 KnownZero &= KnownZero2;
1901 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1902 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1903 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1904 ISD::CondCode CC, unsigned &SPCC) {
1905 if (isNullConstant(RHS) &&
1907 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1908 LHS.getOpcode() == SPISD::SELECT_XCC) &&
1909 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1910 (LHS.getOpcode() == SPISD::SELECT_FCC &&
1911 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1912 isOneConstant(LHS.getOperand(0)) &&
1913 isNullConstant(LHS.getOperand(1))) {
1914 SDValue CMPCC = LHS.getOperand(3);
1915 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1916 LHS = CMPCC.getOperand(0);
1917 RHS = CMPCC.getOperand(1);
1921 // Convert to a target node and set target flags.
1922 SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
1923 SelectionDAG &DAG) const {
1924 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1925 return DAG.getTargetGlobalAddress(GA->getGlobal(),
1927 GA->getValueType(0),
1928 GA->getOffset(), TF);
1930 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1931 return DAG.getTargetConstantPool(CP->getConstVal(),
1932 CP->getValueType(0),
1934 CP->getOffset(), TF);
1936 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1937 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1942 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1943 return DAG.getTargetExternalSymbol(ES->getSymbol(),
1944 ES->getValueType(0), TF);
1946 llvm_unreachable("Unhandled address SDNode");
1949 // Split Op into high and low parts according to HiTF and LoTF.
1950 // Return an ADD node combining the parts.
1951 SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
1952 unsigned HiTF, unsigned LoTF,
1953 SelectionDAG &DAG) const {
1955 EVT VT = Op.getValueType();
1956 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1957 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1958 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1961 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1962 // or ExternalSymbol SDNode.
1963 SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
1965 EVT VT = getPointerTy(DAG.getDataLayout());
1967 // Handle PIC mode first. SPARC needs a got load for every variable!
1968 if (isPositionIndependent()) {
1969 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1970 SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
1971 SparcMCExpr::VK_Sparc_GOT10, DAG);
1972 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1973 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
1974 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1975 // function has calls.
1976 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1977 MFI.setHasCalls(true);
1978 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1979 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
1982 // This is one of the absolute code models.
1983 switch(getTargetMachine().getCodeModel()) {
1985 llvm_unreachable("Unsupported absolute code model");
1986 case CodeModel::Small:
1988 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
1989 SparcMCExpr::VK_Sparc_LO, DAG);
1990 case CodeModel::Medium: {
1992 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
1993 SparcMCExpr::VK_Sparc_M44, DAG);
1994 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
1995 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
1996 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
1997 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
1999 case CodeModel::Large: {
2001 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
2002 SparcMCExpr::VK_Sparc_HM, DAG);
2003 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2004 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
2005 SparcMCExpr::VK_Sparc_LO, DAG);
2006 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2011 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
2012 SelectionDAG &DAG) const {
2013 return makeAddress(Op, DAG);
2016 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
2017 SelectionDAG &DAG) const {
2018 return makeAddress(Op, DAG);
2021 SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
2022 SelectionDAG &DAG) const {
2023 return makeAddress(Op, DAG);
2026 SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
2027 SelectionDAG &DAG) const {
2029 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2030 if (DAG.getTarget().Options.EmulatedTLS)
2031 return LowerToTLSEmulatedModel(GA, DAG);
2034 const GlobalValue *GV = GA->getGlobal();
2035 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2037 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2039 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2040 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
2041 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
2042 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
2043 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
2044 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
2045 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
2046 unsigned addTF = ((model == TLSModel::GeneralDynamic)
2047 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
2048 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
2049 unsigned callTF = ((model == TLSModel::GeneralDynamic)
2050 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
2051 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
2053 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2054 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2055 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2056 withTargetFlags(Op, addTF, DAG));
2058 SDValue Chain = DAG.getEntryNode();
2061 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, DL, true), DL);
2062 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
2063 InFlag = Chain.getValue(1);
2064 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2065 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2067 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2068 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2069 DAG.getMachineFunction(), CallingConv::C);
2070 assert(Mask && "Missing call preserved mask for calling convention");
2071 SDValue Ops[] = {Chain,
2074 DAG.getRegister(SP::O0, PtrVT),
2075 DAG.getRegisterMask(Mask),
2077 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2078 InFlag = Chain.getValue(1);
2079 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2080 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2081 InFlag = Chain.getValue(1);
2082 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2084 if (model != TLSModel::LocalDynamic)
2087 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2088 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
2089 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2090 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
2091 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2092 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2093 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
2096 if (model == TLSModel::InitialExec) {
2097 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2098 : SparcMCExpr::VK_Sparc_TLS_IE_LD);
2100 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2102 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2103 // function has calls.
2104 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2105 MFI.setHasCalls(true);
2107 SDValue TGA = makeHiLoPair(Op,
2108 SparcMCExpr::VK_Sparc_TLS_IE_HI22,
2109 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
2110 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2111 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2113 withTargetFlags(Op, ldTF, DAG));
2114 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2115 DAG.getRegister(SP::G7, PtrVT), Offset,
2117 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
2120 assert(model == TLSModel::LocalExec);
2121 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2122 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
2123 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2124 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
2125 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2127 return DAG.getNode(ISD::ADD, DL, PtrVT,
2128 DAG.getRegister(SP::G7, PtrVT), Offset);
2131 SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,
2132 ArgListTy &Args, SDValue Arg,
2134 SelectionDAG &DAG) const {
2135 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2136 EVT ArgVT = Arg.getValueType();
2137 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2143 if (ArgTy->isFP128Ty()) {
2144 // Create a stack object and pass the pointer to the library function.
2145 int FI = MFI.CreateStackObject(16, 8, false);
2146 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2147 Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
2148 /* Alignment = */ 8);
2151 Entry.Ty = PointerType::getUnqual(ArgTy);
2153 Args.push_back(Entry);
2158 SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
2159 const char *LibFuncName,
2160 unsigned numArgs) const {
2164 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2165 auto PtrVT = getPointerTy(DAG.getDataLayout());
2167 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2168 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2169 Type *RetTyABI = RetTy;
2170 SDValue Chain = DAG.getEntryNode();
2173 if (RetTy->isFP128Ty()) {
2174 // Create a Stack Object to receive the return value of type f128.
2176 int RetFI = MFI.CreateStackObject(16, 8, false);
2177 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2178 Entry.Node = RetPtr;
2179 Entry.Ty = PointerType::getUnqual(RetTy);
2180 if (!Subtarget->is64Bit())
2181 Entry.IsSRet = true;
2182 Entry.IsReturned = false;
2183 Args.push_back(Entry);
2184 RetTyABI = Type::getVoidTy(*DAG.getContext());
2187 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2188 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2189 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2191 TargetLowering::CallLoweringInfo CLI(DAG);
2192 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2193 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2195 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2197 // chain is in second result.
2198 if (RetTyABI == RetTy)
2199 return CallInfo.first;
2201 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2203 Chain = CallInfo.second;
2205 // Load RetPtr to get the return value.
2206 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2207 MachinePointerInfo(), /* Alignment = */ 8);
2210 SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
2211 unsigned &SPCC, const SDLoc &DL,
2212 SelectionDAG &DAG) const {
2214 const char *LibCall = nullptr;
2215 bool is64Bit = Subtarget->is64Bit();
2217 default: llvm_unreachable("Unhandled conditional code!");
2218 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2219 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2220 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2221 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2222 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2223 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2231 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2234 auto PtrVT = getPointerTy(DAG.getDataLayout());
2235 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2236 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2238 SDValue Chain = DAG.getEntryNode();
2239 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2240 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2242 TargetLowering::CallLoweringInfo CLI(DAG);
2243 CLI.setDebugLoc(DL).setChain(Chain)
2244 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2246 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2248 // result is in first, and chain is in second result.
2249 SDValue Result = CallInfo.first;
2253 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2254 SPCC = SPCC::ICC_NE;
2255 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2257 case SPCC::FCC_UL : {
2258 SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2259 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2260 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2261 SPCC = SPCC::ICC_NE;
2262 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2264 case SPCC::FCC_ULE: {
2265 SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2266 SPCC = SPCC::ICC_NE;
2267 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2269 case SPCC::FCC_UG : {
2270 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2272 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2274 case SPCC::FCC_UGE: {
2275 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2276 SPCC = SPCC::ICC_NE;
2277 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2280 case SPCC::FCC_U : {
2281 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2283 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2285 case SPCC::FCC_O : {
2286 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2287 SPCC = SPCC::ICC_NE;
2288 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2290 case SPCC::FCC_LG : {
2291 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2292 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2293 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2294 SPCC = SPCC::ICC_NE;
2295 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2297 case SPCC::FCC_UE : {
2298 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2299 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2300 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2302 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2308 LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
2309 const SparcTargetLowering &TLI) {
2311 if (Op.getOperand(0).getValueType() == MVT::f64)
2312 return TLI.LowerF128Op(Op, DAG,
2313 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2315 if (Op.getOperand(0).getValueType() == MVT::f32)
2316 return TLI.LowerF128Op(Op, DAG,
2317 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2319 llvm_unreachable("fpextend with non-float operand!");
2324 LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
2325 const SparcTargetLowering &TLI) {
2326 // FP_ROUND on f64 and f32 are legal.
2327 if (Op.getOperand(0).getValueType() != MVT::f128)
2330 if (Op.getValueType() == MVT::f64)
2331 return TLI.LowerF128Op(Op, DAG,
2332 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2333 if (Op.getValueType() == MVT::f32)
2334 return TLI.LowerF128Op(Op, DAG,
2335 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2337 llvm_unreachable("fpround to non-float!");
2341 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
2342 const SparcTargetLowering &TLI,
2345 EVT VT = Op.getValueType();
2346 assert(VT == MVT::i32 || VT == MVT::i64);
2348 // Expand f128 operations to fp128 abi calls.
2349 if (Op.getOperand(0).getValueType() == MVT::f128
2350 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2351 const char *libName = TLI.getLibcallName(VT == MVT::i32
2352 ? RTLIB::FPTOSINT_F128_I32
2353 : RTLIB::FPTOSINT_F128_I64);
2354 return TLI.LowerF128Op(Op, DAG, libName, 1);
2357 // Expand if the resulting type is illegal.
2358 if (!TLI.isTypeLegal(VT))
2361 // Otherwise, Convert the fp value to integer in an FP register.
2363 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2365 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2367 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2370 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2371 const SparcTargetLowering &TLI,
2374 EVT OpVT = Op.getOperand(0).getValueType();
2375 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2377 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2379 // Expand f128 operations to fp128 ABI calls.
2380 if (Op.getValueType() == MVT::f128
2381 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2382 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2383 ? RTLIB::SINTTOFP_I32_F128
2384 : RTLIB::SINTTOFP_I64_F128);
2385 return TLI.LowerF128Op(Op, DAG, libName, 1);
2388 // Expand if the operand type is illegal.
2389 if (!TLI.isTypeLegal(OpVT))
2392 // Otherwise, Convert the int value to FP in an FP register.
2393 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2394 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2395 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2398 static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
2399 const SparcTargetLowering &TLI,
2402 EVT VT = Op.getValueType();
2404 // Expand if it does not involve f128 or the target has support for
2405 // quad floating point instructions and the resulting type is legal.
2406 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2407 (hasHardQuad && TLI.isTypeLegal(VT)))
2410 assert(VT == MVT::i32 || VT == MVT::i64);
2412 return TLI.LowerF128Op(Op, DAG,
2413 TLI.getLibcallName(VT == MVT::i32
2414 ? RTLIB::FPTOUINT_F128_I32
2415 : RTLIB::FPTOUINT_F128_I64),
2419 static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2420 const SparcTargetLowering &TLI,
2423 EVT OpVT = Op.getOperand(0).getValueType();
2424 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2426 // Expand if it does not involve f128 or the target has support for
2427 // quad floating point instructions and the operand type is legal.
2428 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2431 return TLI.LowerF128Op(Op, DAG,
2432 TLI.getLibcallName(OpVT == MVT::i32
2433 ? RTLIB::UINTTOFP_I32_F128
2434 : RTLIB::UINTTOFP_I64_F128),
2438 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
2439 const SparcTargetLowering &TLI,
2441 SDValue Chain = Op.getOperand(0);
2442 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2443 SDValue LHS = Op.getOperand(2);
2444 SDValue RHS = Op.getOperand(3);
2445 SDValue Dest = Op.getOperand(4);
2447 unsigned Opc, SPCC = ~0U;
2449 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2450 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2451 LookThroughSetCC(LHS, RHS, CC, SPCC);
2453 // Get the condition flag.
2454 SDValue CompareFlag;
2455 if (LHS.getValueType().isInteger()) {
2456 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2457 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2458 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2459 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2461 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2462 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2463 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2466 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2467 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2471 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2472 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2475 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2476 const SparcTargetLowering &TLI,
2478 SDValue LHS = Op.getOperand(0);
2479 SDValue RHS = Op.getOperand(1);
2480 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2481 SDValue TrueVal = Op.getOperand(2);
2482 SDValue FalseVal = Op.getOperand(3);
2484 unsigned Opc, SPCC = ~0U;
2486 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2487 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2488 LookThroughSetCC(LHS, RHS, CC, SPCC);
2490 SDValue CompareFlag;
2491 if (LHS.getValueType().isInteger()) {
2492 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2493 Opc = LHS.getValueType() == MVT::i32 ?
2494 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2495 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2497 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2498 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2499 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2500 Opc = SPISD::SELECT_ICC;
2502 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2503 Opc = SPISD::SELECT_FCC;
2504 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2507 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2508 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2511 SDValue SparcTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG,
2512 const SparcTargetLowering &TLI) const {
2514 return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
2515 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
2519 SDValue SparcTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG,
2520 const SparcTargetLowering &TLI) const {
2522 return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
2525 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
2526 const SparcTargetLowering &TLI) {
2527 MachineFunction &MF = DAG.getMachineFunction();
2528 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
2529 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2531 // Need frame address to find the address of VarArgsFrameIndex.
2532 MF.getFrameInfo().setFrameAddressIsTaken(true);
2534 // vastart just stores the address of the VarArgsFrameIndex slot into the
2535 // memory location argument.
2538 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2539 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2540 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2541 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2542 MachinePointerInfo(SV));
2545 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
2546 SDNode *Node = Op.getNode();
2547 EVT VT = Node->getValueType(0);
2548 SDValue InChain = Node->getOperand(0);
2549 SDValue VAListPtr = Node->getOperand(1);
2550 EVT PtrVT = VAListPtr.getValueType();
2551 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2554 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2555 // Increment the pointer, VAList, to the next vaarg.
2556 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2557 DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2559 // Store the incremented VAList to the legalized pointer.
2560 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2561 MachinePointerInfo(SV));
2562 // Load the actual argument out of the pointer VAList.
2563 // We can't count on greater alignment than the word size.
2564 return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2565 std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
2568 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
2569 const SparcSubtarget *Subtarget) {
2570 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2571 SDValue Size = Op.getOperand(1); // Legalize the size.
2572 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
2573 unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
2574 EVT VT = Size->getValueType(0);
2577 // TODO: implement over-aligned alloca. (Note: also implies
2578 // supporting support for overaligned function frames + dynamic
2579 // allocations, at all, which currently isn't supported)
2580 if (Align > StackAlign) {
2581 const MachineFunction &MF = DAG.getMachineFunction();
2582 report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
2583 "over-aligned dynamic alloca not supported.");
2586 // The resultant pointer needs to be above the register spill area
2587 // at the bottom of the stack.
2588 unsigned regSpillArea;
2589 if (Subtarget->is64Bit()) {
2592 // On Sparc32, the size of the spill area is 92. Unfortunately,
2593 // that's only 4-byte aligned, not 8-byte aligned (the stack
2594 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2595 // aligned dynamic allocation, we actually need to add 96 to the
2596 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2598 // That also means adding 4 to the size of the allocation --
2599 // before applying the 8-byte rounding. Unfortunately, we the
2600 // value we get here has already had rounding applied. So, we need
2601 // to add 8, instead, wasting a bit more memory.
2603 // Further, this only actually needs to be done if the required
2604 // alignment is > 4, but, we've lost that info by this point, too,
2605 // so we always apply it.
2607 // (An alternative approach would be to always reserve 96 bytes
2608 // instead of the required 92, but then we'd waste 4 extra bytes
2609 // in every frame, not just those with dynamic stack allocations)
2611 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2613 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2614 DAG.getConstant(8, dl, VT));
2618 unsigned SPReg = SP::O6;
2619 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2620 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2621 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2623 regSpillArea += Subtarget->getStackPointerBias();
2625 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2626 DAG.getConstant(regSpillArea, dl, VT));
2627 SDValue Ops[2] = { NewVal, Chain };
2628 return DAG.getMergeValues(Ops, dl);
2632 static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
2634 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2635 dl, MVT::Other, DAG.getEntryNode());
2639 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2640 const SparcSubtarget *Subtarget) {
2641 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2642 MFI.setFrameAddressIsTaken(true);
2644 EVT VT = Op.getValueType();
2646 unsigned FrameReg = SP::I6;
2647 unsigned stackBias = Subtarget->getStackPointerBias();
2652 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2653 if (Subtarget->is64Bit())
2654 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2655 DAG.getIntPtrConstant(stackBias, dl));
2659 // flush first to make sure the windowed registers' values are in stack
2660 SDValue Chain = getFLUSHW(Op, DAG);
2661 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2663 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2666 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2667 DAG.getIntPtrConstant(Offset, dl));
2668 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2670 if (Subtarget->is64Bit())
2671 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2672 DAG.getIntPtrConstant(stackBias, dl));
2677 static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
2678 const SparcSubtarget *Subtarget) {
2680 uint64_t depth = Op.getConstantOperandVal(0);
2682 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2686 static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
2687 const SparcTargetLowering &TLI,
2688 const SparcSubtarget *Subtarget) {
2689 MachineFunction &MF = DAG.getMachineFunction();
2690 MachineFrameInfo &MFI = MF.getFrameInfo();
2691 MFI.setReturnAddressIsTaken(true);
2693 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2696 EVT VT = Op.getValueType();
2698 uint64_t depth = Op.getConstantOperandVal(0);
2702 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2703 unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2704 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2708 // Need frame address to find return address of the caller.
2709 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
2711 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2712 SDValue Ptr = DAG.getNode(ISD::ADD,
2715 DAG.getIntPtrConstant(Offset, dl));
2716 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2721 static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2723 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2724 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2726 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2727 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2728 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2730 // Note: in little-endian, the floating-point value is stored in the
2731 // registers are in the opposite order, so the subreg with the sign
2732 // bit is the highest-numbered (odd), rather than the
2733 // lowest-numbered (even).
2735 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2737 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2740 if (DAG.getDataLayout().isLittleEndian())
2741 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2743 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2745 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2747 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2749 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2754 // Lower a f128 load into two f64 loads.
2755 static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
2758 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2759 assert(LdNode && LdNode->getOffset().isUndef()
2760 && "Unexpected node type");
2762 unsigned alignment = LdNode->getAlignment();
2767 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2768 LdNode->getPointerInfo(), alignment);
2769 EVT addrVT = LdNode->getBasePtr().getValueType();
2770 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2771 LdNode->getBasePtr(),
2772 DAG.getConstant(8, dl, addrVT));
2773 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2774 LdNode->getPointerInfo(), alignment);
2776 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2777 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2779 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2781 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2783 SDValue(InFP128, 0),
2786 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2788 SDValue(InFP128, 0),
2791 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2792 SDValue(Lo64.getNode(), 1) };
2793 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2794 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2795 return DAG.getMergeValues(Ops, dl);
2798 static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
2800 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2802 EVT MemVT = LdNode->getMemoryVT();
2803 if (MemVT == MVT::f128)
2804 return LowerF128Load(Op, DAG);
2809 // Lower a f128 store into two f64 stores.
2810 static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
2812 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2813 assert(StNode && StNode->getOffset().isUndef()
2814 && "Unexpected node type");
2815 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2816 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2818 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2823 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2829 unsigned alignment = StNode->getAlignment();
2833 SDValue OutChains[2];
2835 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2836 StNode->getBasePtr(), MachinePointerInfo(), alignment);
2837 EVT addrVT = StNode->getBasePtr().getValueType();
2838 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2839 StNode->getBasePtr(),
2840 DAG.getConstant(8, dl, addrVT));
2841 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
2842 MachinePointerInfo(), alignment);
2843 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2846 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
2849 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2851 EVT MemVT = St->getMemoryVT();
2852 if (MemVT == MVT::f128)
2853 return LowerF128Store(Op, DAG);
2855 if (MemVT == MVT::i64) {
2856 // Custom handling for i64 stores: turn it into a bitcast and a
2858 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2859 SDValue Chain = DAG.getStore(
2860 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2861 St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
2868 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2869 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2870 && "invalid opcode");
2874 if (Op.getValueType() == MVT::f64)
2875 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
2876 if (Op.getValueType() != MVT::f128)
2879 // Lower fabs/fneg on f128 to fabs/fneg on f64
2880 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2881 // (As with LowerF64Op, on little-endian, we need to negate the odd
2884 SDValue SrcReg128 = Op.getOperand(0);
2885 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2887 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2890 if (DAG.getDataLayout().isLittleEndian()) {
2892 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
2894 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
2897 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2899 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
2902 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2904 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2906 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2911 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
2913 if (Op.getValueType() != MVT::i64)
2917 SDValue Src1 = Op.getOperand(0);
2918 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2919 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2920 DAG.getConstant(32, dl, MVT::i64));
2921 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2923 SDValue Src2 = Op.getOperand(1);
2924 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2925 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2926 DAG.getConstant(32, dl, MVT::i64));
2927 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2930 bool hasChain = false;
2931 unsigned hiOpc = Op.getOpcode();
2932 switch (Op.getOpcode()) {
2933 default: llvm_unreachable("Invalid opcode");
2934 case ISD::ADDC: hiOpc = ISD::ADDE; break;
2935 case ISD::ADDE: hasChain = true; break;
2936 case ISD::SUBC: hiOpc = ISD::SUBE; break;
2937 case ISD::SUBE: hasChain = true; break;
2940 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2942 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2945 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2947 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2948 SDValue Carry = Hi.getValue(1);
2950 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2951 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2952 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2953 DAG.getConstant(32, dl, MVT::i64));
2955 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2956 SDValue Ops[2] = { Dst, Carry };
2957 return DAG.getMergeValues(Ops, dl);
2960 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2961 // in LegalizeDAG.cpp except the order of arguments to the library function.
2962 static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
2963 const SparcTargetLowering &TLI)
2965 unsigned opcode = Op.getOpcode();
2966 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2968 bool isSigned = (opcode == ISD::SMULO);
2970 EVT WideVT = MVT::i128;
2972 SDValue LHS = Op.getOperand(0);
2974 if (LHS.getValueType() != VT)
2977 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
2979 SDValue RHS = Op.getOperand(1);
2980 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
2981 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
2982 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
2984 SDValue MulResult = TLI.makeLibCall(DAG,
2985 RTLIB::MUL_I128, WideVT,
2986 Args, isSigned, dl).first;
2987 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2988 MulResult, DAG.getIntPtrConstant(0, dl));
2989 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2990 MulResult, DAG.getIntPtrConstant(1, dl));
2992 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
2993 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
2995 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
2998 // MulResult is a node with an illegal type. Because such things are not
2999 // generally permitted during this phase of legalization, ensure that
3000 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
3002 assert(MulResult->use_empty() && "Illegally typed node still in use!");
3004 SDValue Ops[2] = { BottomHalf, TopHalf } ;
3005 return DAG.getMergeValues(Ops, dl);
3008 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
3009 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
3010 // Expand with a fence.
3013 // Monotonic load/stores are legal.
3017 SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3018 SelectionDAG &DAG) const {
3019 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3022 default: return SDValue(); // Don't custom lower most intrinsics.
3023 case Intrinsic::thread_pointer: {
3024 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3025 return DAG.getRegister(SP::G7, PtrVT);
3030 SDValue SparcTargetLowering::
3031 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3033 bool hasHardQuad = Subtarget->hasHardQuad();
3034 bool isV9 = Subtarget->isV9();
3036 switch (Op.getOpcode()) {
3037 default: llvm_unreachable("Should not custom lower this!");
3039 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3041 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3043 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3044 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3045 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3046 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3047 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3049 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3051 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3053 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3055 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
3057 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
3059 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
3060 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
3061 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3062 case ISD::VAARG: return LowerVAARG(Op, DAG);
3063 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3066 case ISD::LOAD: return LowerLOAD(Op, DAG);
3067 case ISD::STORE: return LowerSTORE(Op, DAG);
3068 case ISD::FADD: return LowerF128Op(Op, DAG,
3069 getLibcallName(RTLIB::ADD_F128), 2);
3070 case ISD::FSUB: return LowerF128Op(Op, DAG,
3071 getLibcallName(RTLIB::SUB_F128), 2);
3072 case ISD::FMUL: return LowerF128Op(Op, DAG,
3073 getLibcallName(RTLIB::MUL_F128), 2);
3074 case ISD::FDIV: return LowerF128Op(Op, DAG,
3075 getLibcallName(RTLIB::DIV_F128), 2);
3076 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3077 getLibcallName(RTLIB::SQRT_F128),1);
3079 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3080 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3081 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3085 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
3087 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
3088 case ISD::ATOMIC_LOAD:
3089 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3090 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3095 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
3096 MachineBasicBlock *BB) const {
3097 switch (MI.getOpcode()) {
3098 default: llvm_unreachable("Unknown SELECT_CC!");
3099 case SP::SELECT_CC_Int_ICC:
3100 case SP::SELECT_CC_FP_ICC:
3101 case SP::SELECT_CC_DFP_ICC:
3102 case SP::SELECT_CC_QFP_ICC:
3103 return expandSelectCC(MI, BB, SP::BCOND);
3104 case SP::SELECT_CC_Int_FCC:
3105 case SP::SELECT_CC_FP_FCC:
3106 case SP::SELECT_CC_DFP_FCC:
3107 case SP::SELECT_CC_QFP_FCC:
3108 return expandSelectCC(MI, BB, SP::FBCOND);
3109 case SP::EH_SJLJ_SETJMP32ri:
3110 case SP::EH_SJLJ_SETJMP32rr:
3111 return emitEHSjLjSetJmp(MI, BB);
3112 case SP::EH_SJLJ_LONGJMP32rr:
3113 case SP::EH_SJLJ_LONGJMP32ri:
3114 return emitEHSjLjLongJmp(MI, BB);
3120 SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
3121 unsigned BROpcode) const {
3122 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3123 DebugLoc dl = MI.getDebugLoc();
3124 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3126 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
3127 // control-flow pattern. The incoming instruction knows the destination vreg
3128 // to set, the condition code register to branch on, the true/false values to
3129 // select between, and a branch opcode to use.
3130 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3131 MachineFunction::iterator It = ++BB->getIterator();
3137 // fallthrough --> copy0MBB
3138 MachineBasicBlock *thisMBB = BB;
3139 MachineFunction *F = BB->getParent();
3140 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
3141 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3142 F->insert(It, copy0MBB);
3143 F->insert(It, sinkMBB);
3145 // Transfer the remainder of BB and its successor edges to sinkMBB.
3146 sinkMBB->splice(sinkMBB->begin(), BB,
3147 std::next(MachineBasicBlock::iterator(MI)),
3149 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
3151 // Add the true and fallthrough blocks as its successors.
3152 BB->addSuccessor(copy0MBB);
3153 BB->addSuccessor(sinkMBB);
3155 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC);
3158 // %FalseValue = ...
3159 // # fallthrough to sinkMBB
3162 // Update machine-CFG edges
3163 BB->addSuccessor(sinkMBB);
3166 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
3169 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI.getOperand(0).getReg())
3170 .addReg(MI.getOperand(2).getReg())
3172 .addReg(MI.getOperand(1).getReg())
3175 MI.eraseFromParent(); // The pseudo instruction is gone now.
3180 SparcTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
3181 MachineBasicBlock *MBB) const {
3182 DebugLoc DL = MI.getDebugLoc();
3183 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3185 MachineFunction *MF = MBB->getParent();
3186 MachineRegisterInfo &MRI = MF->getRegInfo();
3187 MachineInstrBuilder MIB;
3189 MVT PVT = getPointerTy(MF->getDataLayout());
3190 unsigned RegSize = PVT.getStoreSize();
3191 assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3193 unsigned Buf = MI.getOperand(0).getReg();
3194 unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3196 // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
3197 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
3199 // Instruction to restore FP
3200 const unsigned FP = SP::I6;
3201 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3206 // Instruction to load jmp location
3207 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3208 .addReg(JmpLoc, RegState::Define)
3212 // Instruction to restore SP
3213 const unsigned SP = SP::O6;
3214 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3217 .addImm(2 * RegSize);
3219 // Instruction to restore I7
3220 MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
3222 .addReg(Buf, RegState::Kill)
3223 .addImm(3 * RegSize);
3226 BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
3228 MI.eraseFromParent();
3233 SparcTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
3234 MachineBasicBlock *MBB) const {
3235 DebugLoc DL = MI.getDebugLoc();
3236 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
3238 MachineFunction *MF = MBB->getParent();
3239 MachineRegisterInfo &MRI = MF->getRegInfo();
3240 MachineInstrBuilder MIB;
3242 MVT PVT = getPointerTy(MF->getDataLayout());
3243 unsigned RegSize = PVT.getStoreSize();
3244 assert(PVT == MVT::i32 && "Invalid Pointer Size!");
3246 unsigned DstReg = MI.getOperand(0).getReg();
3247 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
3248 assert(RC->hasType(MVT::i32) && "Invalid destination!");
3249 unsigned mainDstReg = MRI.createVirtualRegister(RC);
3250 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
3252 // For v = setjmp(buf), we generate
3256 // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
3257 // buf[RegSize * 2] = O6
3258 // buf[RegSize * 3] = I7
3259 // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
3271 // v = phi(main, restore)
3273 const BasicBlock *BB = MBB->getBasicBlock();
3274 MachineFunction::iterator It = ++MBB->getIterator();
3275 MachineBasicBlock *thisMBB = MBB;
3276 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
3277 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
3278 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
3280 MF->insert(It, mainMBB);
3281 MF->insert(It, restoreMBB);
3282 MF->insert(It, sinkMBB);
3283 restoreMBB->setHasAddressTaken();
3285 // Transfer the remainder of BB and its successor edges to sinkMBB.
3286 sinkMBB->splice(sinkMBB->begin(), MBB,
3287 std::next(MachineBasicBlock::iterator(MI)),
3289 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
3291 unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3292 unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
3293 unsigned BufReg = MI.getOperand(1).getReg();
3295 // Instruction to store FP
3296 const unsigned FP = SP::I6;
3297 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3302 // Instructions to store jmp location
3303 MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
3304 .addReg(LabelReg, RegState::Define)
3305 .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
3307 MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
3308 .addReg(LabelReg2, RegState::Define)
3309 .addReg(LabelReg, RegState::Kill)
3310 .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
3312 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3315 .addReg(LabelReg2, RegState::Kill);
3317 // Instruction to store SP
3318 const unsigned SP = SP::O6;
3319 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3321 .addImm(2 * RegSize)
3324 // Instruction to store I7
3325 MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
3327 .addImm(3 * RegSize)
3331 // FIX ME: This next instruction ensures that the restoreMBB block address remains
3332 // valid through optimization passes and serves no other purpose. The ICC_N ensures
3333 // that the branch is never taken. This commented-out code here was an alternative
3334 // attempt to achieve this which brought myriad problems.
3335 //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
3336 MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3338 .addImm(SPCC::ICC_N);
3340 MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
3342 .addImm(SPCC::ICC_A);
3344 thisMBB->addSuccessor(mainMBB);
3345 thisMBB->addSuccessor(restoreMBB);
3349 MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
3350 .addReg(mainDstReg, RegState::Define)
3353 MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3355 mainMBB->addSuccessor(sinkMBB);
3359 MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
3360 .addReg(restoreDstReg, RegState::Define)
3363 //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
3364 restoreMBB->addSuccessor(sinkMBB);
3367 MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
3368 TII->get(SP::PHI), DstReg)
3369 .addReg(mainDstReg).addMBB(mainMBB)
3370 .addReg(restoreDstReg).addMBB(restoreMBB);
3372 MI.eraseFromParent();
3376 //===----------------------------------------------------------------------===//
3377 // Sparc Inline Assembly Support
3378 //===----------------------------------------------------------------------===//
3380 /// getConstraintType - Given a constraint letter, return the type of
3381 /// constraint it is for this target.
3382 SparcTargetLowering::ConstraintType
3383 SparcTargetLowering::getConstraintType(StringRef Constraint) const {
3384 if (Constraint.size() == 1) {
3385 switch (Constraint[0]) {
3387 case 'r': return C_RegisterClass;
3393 return TargetLowering::getConstraintType(Constraint);
3396 TargetLowering::ConstraintWeight SparcTargetLowering::
3397 getSingleConstraintMatchWeight(AsmOperandInfo &info,
3398 const char *constraint) const {
3399 ConstraintWeight weight = CW_Invalid;
3400 Value *CallOperandVal = info.CallOperandVal;
3401 // If we don't have a value, we can't do a match,
3402 // but allow it at the lowest weight.
3403 if (!CallOperandVal)
3406 // Look at the constraint type.
3407 switch (*constraint) {
3409 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3412 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3413 if (isInt<13>(C->getSExtValue()))
3414 weight = CW_Constant;
3421 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3422 /// vector. If it is invalid, don't add anything to Ops.
3423 void SparcTargetLowering::
3424 LowerAsmOperandForConstraint(SDValue Op,
3425 std::string &Constraint,
3426 std::vector<SDValue> &Ops,
3427 SelectionDAG &DAG) const {
3428 SDValue Result(nullptr, 0);
3430 // Only support length 1 constraints for now.
3431 if (Constraint.length() > 1)
3434 char ConstraintLetter = Constraint[0];
3435 switch (ConstraintLetter) {
3438 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3439 if (isInt<13>(C->getSExtValue())) {
3440 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3448 if (Result.getNode()) {
3449 Ops.push_back(Result);
3452 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3455 std::pair<unsigned, const TargetRegisterClass *>
3456 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3457 StringRef Constraint,
3459 if (Constraint.size() == 1) {
3460 switch (Constraint[0]) {
3462 if (VT == MVT::v2i32)
3463 return std::make_pair(0U, &SP::IntPairRegClass);
3465 return std::make_pair(0U, &SP::IntRegsRegClass);
3467 } else if (!Constraint.empty() && Constraint.size() <= 5
3468 && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3469 // constraint = '{r<d>}'
3470 // Remove the braces from around the name.
3471 StringRef name(Constraint.data()+1, Constraint.size()-2);
3472 // Handle register aliases:
3477 uint64_t intVal = 0;
3478 if (name.substr(0, 1).equals("r")
3479 && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3480 const char regTypes[] = { 'g', 'o', 'l', 'i' };
3481 char regType = regTypes[intVal/8];
3482 char regIdx = '0' + (intVal % 8);
3483 char tmp[] = { '{', regType, regIdx, '}', 0 };
3484 std::string newConstraint = std::string(tmp);
3485 return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3490 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3494 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3495 // The Sparc target isn't yet aware of offsets.
3499 void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
3500 SmallVectorImpl<SDValue>& Results,
3501 SelectionDAG &DAG) const {
3505 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3507 switch (N->getOpcode()) {
3509 llvm_unreachable("Do not know how to custom type legalize this operation!");
3511 case ISD::FP_TO_SINT:
3512 case ISD::FP_TO_UINT:
3513 // Custom lower only if it involves f128 or i64.
3514 if (N->getOperand(0).getValueType() != MVT::f128
3515 || N->getValueType(0) != MVT::i64)
3517 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3518 ? RTLIB::FPTOSINT_F128_I64
3519 : RTLIB::FPTOUINT_F128_I64);
3521 Results.push_back(LowerF128Op(SDValue(N, 0),
3523 getLibcallName(libCall),
3527 case ISD::SINT_TO_FP:
3528 case ISD::UINT_TO_FP:
3529 // Custom lower only if it involves f128 or i64.
3530 if (N->getValueType(0) != MVT::f128
3531 || N->getOperand(0).getValueType() != MVT::i64)
3534 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3535 ? RTLIB::SINTTOFP_I64_F128
3536 : RTLIB::UINTTOFP_I64_F128);
3538 Results.push_back(LowerF128Op(SDValue(N, 0),
3540 getLibcallName(libCall),
3544 LoadSDNode *Ld = cast<LoadSDNode>(N);
3545 // Custom handling only for i64: turn i64 load into a v2i32 load,
3547 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3551 SDValue LoadRes = DAG.getExtLoad(
3552 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3553 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
3554 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3556 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3557 Results.push_back(Res);
3558 Results.push_back(LoadRes.getValue(1));
3564 // Override to enable LOAD_STACK_GUARD lowering on Linux.
3565 bool SparcTargetLowering::useLoadStackGuardNode() const {
3566 if (!Subtarget->isTargetLinux())
3567 return TargetLowering::useLoadStackGuardNode();
3571 // Override to disable global variable loading on Linux.
3572 void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
3573 if (!Subtarget->isTargetLinux())
3574 return TargetLowering::insertSSPDeclarations(M);