1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the XCoreTargetLowering class.
11 //===----------------------------------------------------------------------===//
13 #include "XCoreISelLowering.h"
15 #include "XCoreMachineFunctionInfo.h"
16 #include "XCoreSubtarget.h"
17 #include "XCoreTargetMachine.h"
18 #include "XCoreTargetObjectFile.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/IntrinsicsXCore.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/KnownBits.h"
38 #include "llvm/Support/raw_ostream.h"
43 #define DEBUG_TYPE "xcore-lower"
45 const char *XCoreTargetLowering::
46 getTargetNodeName(unsigned Opcode) const
48 switch ((XCoreISD::NodeType)Opcode)
50 case XCoreISD::FIRST_NUMBER : break;
51 case XCoreISD::BL : return "XCoreISD::BL";
52 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
53 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
54 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
55 case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
56 case XCoreISD::STWSP : return "XCoreISD::STWSP";
57 case XCoreISD::RETSP : return "XCoreISD::RETSP";
58 case XCoreISD::LADD : return "XCoreISD::LADD";
59 case XCoreISD::LSUB : return "XCoreISD::LSUB";
60 case XCoreISD::LMUL : return "XCoreISD::LMUL";
61 case XCoreISD::MACCU : return "XCoreISD::MACCU";
62 case XCoreISD::MACCS : return "XCoreISD::MACCS";
63 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
64 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
65 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
66 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
67 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
68 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
73 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
74 const XCoreSubtarget &Subtarget)
75 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
77 // Set up the register classes.
78 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
80 // Compute derived properties from the register classes
81 computeRegisterProperties(Subtarget.getRegisterInfo());
83 setStackPointerRegisterToSaveRestore(XCore::SP);
85 setSchedulingPreference(Sched::Source);
87 // Use i32 for setcc operations results (slt, sgt, ...).
88 setBooleanContents(ZeroOrOneBooleanContent);
89 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
91 // XCore does not have the NodeTypes below.
92 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
93 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
96 setOperationAction(ISD::ADD, MVT::i64, Custom);
97 setOperationAction(ISD::SUB, MVT::i64, Custom);
98 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
99 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
100 setOperationAction(ISD::MULHS, MVT::i32, Expand);
101 setOperationAction(ISD::MULHU, MVT::i32, Expand);
102 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
103 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
104 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
107 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
108 setOperationAction(ISD::ROTL , MVT::i32, Expand);
109 setOperationAction(ISD::ROTR , MVT::i32, Expand);
111 setOperationAction(ISD::TRAP, MVT::Other, Legal);
114 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
116 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
117 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
119 // Conversion of i64 -> double produces constantpool nodes
120 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
123 for (MVT VT : MVT::integer_valuetypes()) {
124 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
125 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
128 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
129 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
132 // Custom expand misaligned loads / stores.
133 setOperationAction(ISD::LOAD, MVT::i32, Custom);
134 setOperationAction(ISD::STORE, MVT::i32, Custom);
137 setOperationAction(ISD::VAEND, MVT::Other, Expand);
138 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
139 setOperationAction(ISD::VAARG, MVT::Other, Custom);
140 setOperationAction(ISD::VASTART, MVT::Other, Custom);
143 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
144 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
145 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
147 // Exception handling
148 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
149 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
152 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
153 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
154 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
155 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
156 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
158 // TRAMPOLINE is custom lowered.
159 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
160 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
162 // We want to custom lower some of our intrinsics.
163 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
165 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4;
166 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize
167 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2;
169 // We have target-specific dag combine patterns for the following nodes:
170 setTargetDAGCombine(ISD::STORE);
171 setTargetDAGCombine(ISD::ADD);
172 setTargetDAGCombine(ISD::INTRINSIC_VOID);
173 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
175 setMinFunctionAlignment(Align(2));
176 setPrefFunctionAlignment(Align(4));
179 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
180 if (Val.getOpcode() != ISD::LOAD)
183 EVT VT1 = Val.getValueType();
184 if (!VT1.isSimple() || !VT1.isInteger() ||
185 !VT2.isSimple() || !VT2.isInteger())
188 switch (VT1.getSimpleVT().SimpleTy) {
197 SDValue XCoreTargetLowering::
198 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
199 switch (Op.getOpcode())
201 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
202 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
203 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
204 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
205 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
206 case ISD::LOAD: return LowerLOAD(Op, DAG);
207 case ISD::STORE: return LowerSTORE(Op, DAG);
208 case ISD::VAARG: return LowerVAARG(Op, DAG);
209 case ISD::VASTART: return LowerVASTART(Op, DAG);
210 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
211 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
212 // FIXME: Remove these when LegalizeDAGTypes lands.
214 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
215 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
216 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
217 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
218 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
219 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
220 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
221 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
222 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
223 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
225 llvm_unreachable("unimplemented operand");
229 /// ReplaceNodeResults - Replace the results of node with an illegal result
230 /// type with new values built out of custom code.
231 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
232 SmallVectorImpl<SDValue>&Results,
233 SelectionDAG &DAG) const {
234 switch (N->getOpcode()) {
236 llvm_unreachable("Don't know how to custom expand this!");
239 Results.push_back(ExpandADDSUB(N, DAG));
244 //===----------------------------------------------------------------------===//
245 // Misc Lower Operation implementation
246 //===----------------------------------------------------------------------===//
248 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
249 const GlobalValue *GV,
250 SelectionDAG &DAG) const {
251 // FIXME there is no actual debug info here
254 if (GV->getValueType()->isFunctionTy())
255 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
257 const auto *GVar = dyn_cast<GlobalVariable>(GV);
258 if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
259 (GVar && GVar->isConstant() && GV->hasLocalLinkage()))
260 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
262 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
265 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
266 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small)
269 Type *ObjType = GV->getValueType();
270 if (!ObjType->isSized())
273 auto &DL = GV->getParent()->getDataLayout();
274 unsigned ObjSize = DL.getTypeAllocSize(ObjType);
275 return ObjSize < CodeModelLargeSize && ObjSize != 0;
278 SDValue XCoreTargetLowering::
279 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
281 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
282 const GlobalValue *GV = GN->getGlobal();
284 int64_t Offset = GN->getOffset();
285 if (IsSmallObject(GV, *this)) {
286 // We can only fold positive offsets that are a multiple of the word size.
287 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
288 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
289 GA = getGlobalAddressWrapper(GA, GV, DAG);
290 // Handle the rest of the offset.
291 if (Offset != FoldedOffset) {
292 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
293 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
297 // Ideally we would not fold in offset with an index <= 11.
298 Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
299 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
300 Ty = Type::getInt32Ty(*DAG.getContext());
301 Constant *Idx = ConstantInt::get(Ty, Offset);
302 Constant *GAI = ConstantExpr::getGetElementPtr(
303 Type::getInt8Ty(*DAG.getContext()), GA, Idx);
304 SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
305 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
306 DAG.getEntryNode(), CP, MachinePointerInfo());
310 SDValue XCoreTargetLowering::
311 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
314 auto PtrVT = getPointerTy(DAG.getDataLayout());
315 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
316 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
318 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
321 SDValue XCoreTargetLowering::
322 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
324 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
325 // FIXME there isn't really debug info here
327 EVT PtrVT = Op.getValueType();
329 if (CP->isMachineConstantPoolEntry()) {
330 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
331 CP->getAlignment(), CP->getOffset());
333 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
334 CP->getAlignment(), CP->getOffset());
336 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
339 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
340 return MachineJumpTableInfo::EK_Inline;
343 SDValue XCoreTargetLowering::
344 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
346 SDValue Chain = Op.getOperand(0);
347 SDValue Table = Op.getOperand(1);
348 SDValue Index = Op.getOperand(2);
350 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
351 unsigned JTI = JT->getIndex();
352 MachineFunction &MF = DAG.getMachineFunction();
353 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
354 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
356 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
357 if (NumEntries <= 32) {
358 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
360 assert((NumEntries >> 31) == 0);
361 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
362 DAG.getConstant(1, dl, MVT::i32));
363 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
367 SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
368 const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
369 SelectionDAG &DAG) const {
370 auto PtrVT = getPointerTy(DAG.getDataLayout());
371 if ((Offset & 0x3) == 0) {
372 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
374 // Lower to pair of consecutive word aligned loads plus some bit shifting.
375 int32_t HighOffset = alignTo(Offset, 4);
376 int32_t LowOffset = HighOffset - 4;
377 SDValue LowAddr, HighAddr;
378 if (GlobalAddressSDNode *GASD =
379 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
380 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
382 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
385 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
386 DAG.getConstant(LowOffset, DL, MVT::i32));
387 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
388 DAG.getConstant(HighOffset, DL, MVT::i32));
390 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
391 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
393 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
394 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
395 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
396 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
397 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
398 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
400 SDValue Ops[] = { Result, Chain };
401 return DAG.getMergeValues(Ops, DL);
404 static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
406 KnownBits Known = DAG.computeKnownBits(Value);
407 return Known.countMinTrailingZeros() >= 2;
410 SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
411 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
412 LLVMContext &Context = *DAG.getContext();
413 LoadSDNode *LD = cast<LoadSDNode>(Op);
414 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
415 "Unexpected extension type");
416 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
418 if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
419 LD->getMemoryVT(), *LD->getMemOperand()))
422 SDValue Chain = LD->getChain();
423 SDValue BasePtr = LD->getBasePtr();
426 if (!LD->isVolatile()) {
427 const GlobalValue *GV;
429 if (DAG.isBaseWithConstantOffset(BasePtr) &&
430 isWordAligned(BasePtr->getOperand(0), DAG)) {
431 SDValue NewBasePtr = BasePtr->getOperand(0);
432 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
433 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
436 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
437 MinAlign(GV->getAlignment(), 4) == 4) {
438 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
439 BasePtr->getValueType(0));
440 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
445 if (LD->getAlignment() == 2) {
447 DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
448 LD->getPointerInfo(), MVT::i16,
449 /* Alignment = */ 2, LD->getMemOperand()->getFlags());
450 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
451 DAG.getConstant(2, DL, MVT::i32));
453 DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
454 LD->getPointerInfo().getWithOffset(2), MVT::i16,
455 /* Alignment = */ 2, LD->getMemOperand()->getFlags());
456 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
457 DAG.getConstant(16, DL, MVT::i32));
458 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
459 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
461 SDValue Ops[] = { Result, Chain };
462 return DAG.getMergeValues(Ops, DL);
465 // Lower to a call to __misaligned_load(BasePtr).
466 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
467 TargetLowering::ArgListTy Args;
468 TargetLowering::ArgListEntry Entry;
471 Entry.Node = BasePtr;
472 Args.push_back(Entry);
474 TargetLowering::CallLoweringInfo CLI(DAG);
475 CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
476 CallingConv::C, IntPtrTy,
477 DAG.getExternalSymbol("__misaligned_load",
478 getPointerTy(DAG.getDataLayout())),
481 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
482 SDValue Ops[] = { CallResult.first, CallResult.second };
483 return DAG.getMergeValues(Ops, DL);
486 SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
487 LLVMContext &Context = *DAG.getContext();
488 StoreSDNode *ST = cast<StoreSDNode>(Op);
489 assert(!ST->isTruncatingStore() && "Unexpected store type");
490 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
492 if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
493 ST->getMemoryVT(), *ST->getMemOperand()))
496 SDValue Chain = ST->getChain();
497 SDValue BasePtr = ST->getBasePtr();
498 SDValue Value = ST->getValue();
501 if (ST->getAlignment() == 2) {
503 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
504 DAG.getConstant(16, dl, MVT::i32));
505 SDValue StoreLow = DAG.getTruncStore(
506 Chain, dl, Low, BasePtr, ST->getPointerInfo(), MVT::i16,
507 /* Alignment = */ 2, ST->getMemOperand()->getFlags());
508 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
509 DAG.getConstant(2, dl, MVT::i32));
510 SDValue StoreHigh = DAG.getTruncStore(
511 Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
512 MVT::i16, /* Alignment = */ 2, ST->getMemOperand()->getFlags());
513 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
516 // Lower to a call to __misaligned_store(BasePtr, Value).
517 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
518 TargetLowering::ArgListTy Args;
519 TargetLowering::ArgListEntry Entry;
522 Entry.Node = BasePtr;
523 Args.push_back(Entry);
526 Args.push_back(Entry);
528 TargetLowering::CallLoweringInfo CLI(DAG);
529 CLI.setDebugLoc(dl).setChain(Chain).setCallee(
530 CallingConv::C, Type::getVoidTy(Context),
531 DAG.getExternalSymbol("__misaligned_store",
532 getPointerTy(DAG.getDataLayout())),
535 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
536 return CallResult.second;
539 SDValue XCoreTargetLowering::
540 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
542 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
543 "Unexpected operand to lower!");
545 SDValue LHS = Op.getOperand(0);
546 SDValue RHS = Op.getOperand(1);
547 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
548 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
549 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
551 SDValue Lo(Hi.getNode(), 1);
552 SDValue Ops[] = { Lo, Hi };
553 return DAG.getMergeValues(Ops, dl);
556 SDValue XCoreTargetLowering::
557 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
559 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
560 "Unexpected operand to lower!");
562 SDValue LHS = Op.getOperand(0);
563 SDValue RHS = Op.getOperand(1);
564 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
565 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
566 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
568 SDValue Lo(Hi.getNode(), 1);
569 SDValue Ops[] = { Lo, Hi };
570 return DAG.getMergeValues(Ops, dl);
573 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
574 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
575 /// each intermediate result in the calculation must also have a single use.
576 /// If the Op is in the correct form the constituent parts are written to Mul0,
577 /// Mul1, Addend0 and Addend1.
579 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
580 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
582 if (Op.getOpcode() != ISD::ADD)
584 SDValue N0 = Op.getOperand(0);
585 SDValue N1 = Op.getOperand(1);
588 if (N0.getOpcode() == ISD::ADD) {
591 } else if (N1.getOpcode() == ISD::ADD) {
597 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
599 if (OtherOp.getOpcode() == ISD::MUL) {
600 // add(add(a,b),mul(x,y))
601 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
603 Mul0 = OtherOp.getOperand(0);
604 Mul1 = OtherOp.getOperand(1);
605 Addend0 = AddOp.getOperand(0);
606 Addend1 = AddOp.getOperand(1);
609 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
610 // add(add(mul(x,y),a),b)
611 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
613 Mul0 = AddOp.getOperand(0).getOperand(0);
614 Mul1 = AddOp.getOperand(0).getOperand(1);
615 Addend0 = AddOp.getOperand(1);
619 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
620 // add(add(a,mul(x,y)),b)
621 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
623 Mul0 = AddOp.getOperand(1).getOperand(0);
624 Mul1 = AddOp.getOperand(1).getOperand(1);
625 Addend0 = AddOp.getOperand(0);
632 SDValue XCoreTargetLowering::
633 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
637 if (N->getOperand(0).getOpcode() == ISD::MUL) {
638 Mul = N->getOperand(0);
639 Other = N->getOperand(1);
640 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
641 Mul = N->getOperand(1);
642 Other = N->getOperand(0);
647 SDValue LL, RL, AddendL, AddendH;
648 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
649 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
650 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
651 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
652 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
653 Other, DAG.getConstant(0, dl, MVT::i32));
654 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
655 Other, DAG.getConstant(1, dl, MVT::i32));
656 APInt HighMask = APInt::getHighBitsSet(64, 32);
657 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
658 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
659 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
660 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
661 // The inputs are both zero-extended.
662 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
663 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
665 SDValue Lo(Hi.getNode(), 1);
666 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
668 if (LHSSB > 32 && RHSSB > 32) {
669 // The inputs are both sign-extended.
670 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
671 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
673 SDValue Lo(Hi.getNode(), 1);
674 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
677 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
678 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
679 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
680 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
681 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
682 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
684 SDValue Lo(Hi.getNode(), 1);
685 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
686 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
687 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
688 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
689 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
692 SDValue XCoreTargetLowering::
693 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
695 assert(N->getValueType(0) == MVT::i64 &&
696 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
697 "Unknown operand to lower!");
699 if (N->getOpcode() == ISD::ADD)
700 if (SDValue Result = TryExpandADDWithMul(N, DAG))
705 // Extract components
706 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
708 DAG.getConstant(0, dl, MVT::i32));
709 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
711 DAG.getConstant(1, dl, MVT::i32));
712 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
714 DAG.getConstant(0, dl, MVT::i32));
715 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
717 DAG.getConstant(1, dl, MVT::i32));
720 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
722 SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
723 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
725 SDValue Carry(Lo.getNode(), 1);
727 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
729 SDValue Ignored(Hi.getNode(), 1);
731 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
734 SDValue XCoreTargetLowering::
735 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
737 // Whist llvm does not support aggregate varargs we can ignore
738 // the possibility of the ValueType being an implicit byVal vararg.
739 SDNode *Node = Op.getNode();
740 EVT VT = Node->getValueType(0); // not an aggregate
741 SDValue InChain = Node->getOperand(0);
742 SDValue VAListPtr = Node->getOperand(1);
743 EVT PtrVT = VAListPtr.getValueType();
744 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
747 DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
748 // Increment the pointer, VAList, to the next vararg
749 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
750 DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
752 // Store the incremented VAList to the legalized pointer
753 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
754 MachinePointerInfo(SV));
755 // Load the actual argument out of the pointer VAList
756 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
759 SDValue XCoreTargetLowering::
760 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
763 // vastart stores the address of the VarArgsFrameIndex slot into the
764 // memory location argument
765 MachineFunction &MF = DAG.getMachineFunction();
766 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
767 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
768 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
769 MachinePointerInfo());
772 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
773 SelectionDAG &DAG) const {
774 // This nodes represent llvm.frameaddress on the DAG.
775 // It takes one operand, the index of the frame address to return.
776 // An index of zero corresponds to the current function's frame address.
777 // An index of one to the parent's frame address, and so on.
778 // Depths > 0 not supported yet!
779 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
782 MachineFunction &MF = DAG.getMachineFunction();
783 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
784 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
785 RegInfo->getFrameRegister(MF), MVT::i32);
788 SDValue XCoreTargetLowering::
789 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
790 // This nodes represent llvm.returnaddress on the DAG.
791 // It takes one operand, the index of the return address to return.
792 // An index of zero corresponds to the current function's return address.
793 // An index of one to the parent's return address, and so on.
794 // Depths > 0 not supported yet!
795 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
798 MachineFunction &MF = DAG.getMachineFunction();
799 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
800 int FI = XFI->createLRSpillSlot(MF);
801 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
802 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
803 DAG.getEntryNode(), FIN,
804 MachinePointerInfo::getFixedStack(MF, FI));
807 SDValue XCoreTargetLowering::
808 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
809 // This node represents offset from frame pointer to first on-stack argument.
810 // This is needed for correct stack adjustment during unwind.
811 // However, we don't know the offset until after the frame has be finalised.
812 // This is done during the XCoreFTAOElim pass.
813 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
816 SDValue XCoreTargetLowering::
817 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
818 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
819 // This node represents 'eh_return' gcc dwarf builtin, which is used to
820 // return from exception. The general meaning is: adjust stack by OFFSET and
821 // pass execution to HANDLER.
822 MachineFunction &MF = DAG.getMachineFunction();
823 SDValue Chain = Op.getOperand(0);
824 SDValue Offset = Op.getOperand(1);
825 SDValue Handler = Op.getOperand(2);
828 // Absolute SP = (FP + FrameToArgs) + Offset
829 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
830 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
831 RegInfo->getFrameRegister(MF), MVT::i32);
832 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
834 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
835 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
837 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
838 // which leaves 2 caller saved registers, R2 & R3 for us to use.
839 unsigned StackReg = XCore::R2;
840 unsigned HandlerReg = XCore::R3;
842 SDValue OutChains[] = {
843 DAG.getCopyToReg(Chain, dl, StackReg, Stack),
844 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
847 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
849 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
850 DAG.getRegister(StackReg, MVT::i32),
851 DAG.getRegister(HandlerReg, MVT::i32));
855 SDValue XCoreTargetLowering::
856 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
857 return Op.getOperand(0);
860 SDValue XCoreTargetLowering::
861 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
862 SDValue Chain = Op.getOperand(0);
863 SDValue Trmp = Op.getOperand(1); // trampoline
864 SDValue FPtr = Op.getOperand(2); // nested function
865 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
867 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
870 // LDAPF_u10 r11, nest
871 // LDW_2rus r11, r11[0]
872 // STWSP_ru6 r11, sp[0]
873 // LDAPF_u10 r11, fptr
874 // LDW_2rus r11, r11[0]
880 SDValue OutChains[5];
886 DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
887 MachinePointerInfo(TrmpAddr));
889 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
890 DAG.getConstant(4, dl, MVT::i32));
892 DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
893 MachinePointerInfo(TrmpAddr, 4));
895 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
896 DAG.getConstant(8, dl, MVT::i32));
898 DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
899 MachinePointerInfo(TrmpAddr, 8));
901 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
902 DAG.getConstant(12, dl, MVT::i32));
904 DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
906 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
907 DAG.getConstant(16, dl, MVT::i32));
909 DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
911 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
914 SDValue XCoreTargetLowering::
915 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
917 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
919 case Intrinsic::xcore_crc8:
920 EVT VT = Op.getValueType();
922 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
923 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
924 SDValue Crc(Data.getNode(), 1);
925 SDValue Results[] = { Crc, Data };
926 return DAG.getMergeValues(Results, DL);
931 SDValue XCoreTargetLowering::
932 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
934 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
937 SDValue XCoreTargetLowering::
938 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
939 AtomicSDNode *N = cast<AtomicSDNode>(Op);
940 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
941 assert((N->getOrdering() == AtomicOrdering::Unordered ||
942 N->getOrdering() == AtomicOrdering::Monotonic) &&
943 "setInsertFencesForAtomic(true) expects unordered / monotonic");
944 if (N->getMemoryVT() == MVT::i32) {
945 if (N->getAlignment() < 4)
946 report_fatal_error("atomic load must be aligned");
947 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
948 N->getChain(), N->getBasePtr(), N->getPointerInfo(),
949 N->getAlignment(), N->getMemOperand()->getFlags(),
950 N->getAAInfo(), N->getRanges());
952 if (N->getMemoryVT() == MVT::i16) {
953 if (N->getAlignment() < 2)
954 report_fatal_error("atomic load must be aligned");
955 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
956 N->getBasePtr(), N->getPointerInfo(), MVT::i16,
957 N->getAlignment(), N->getMemOperand()->getFlags(),
960 if (N->getMemoryVT() == MVT::i8)
961 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
962 N->getBasePtr(), N->getPointerInfo(), MVT::i8,
963 N->getAlignment(), N->getMemOperand()->getFlags(),
968 SDValue XCoreTargetLowering::
969 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
970 AtomicSDNode *N = cast<AtomicSDNode>(Op);
971 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
972 assert((N->getOrdering() == AtomicOrdering::Unordered ||
973 N->getOrdering() == AtomicOrdering::Monotonic) &&
974 "setInsertFencesForAtomic(true) expects unordered / monotonic");
975 if (N->getMemoryVT() == MVT::i32) {
976 if (N->getAlignment() < 4)
977 report_fatal_error("atomic store must be aligned");
978 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
979 N->getPointerInfo(), N->getAlignment(),
980 N->getMemOperand()->getFlags(), N->getAAInfo());
982 if (N->getMemoryVT() == MVT::i16) {
983 if (N->getAlignment() < 2)
984 report_fatal_error("atomic store must be aligned");
985 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
986 N->getBasePtr(), N->getPointerInfo(), MVT::i16,
987 N->getAlignment(), N->getMemOperand()->getFlags(),
990 if (N->getMemoryVT() == MVT::i8)
991 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
992 N->getBasePtr(), N->getPointerInfo(), MVT::i8,
993 N->getAlignment(), N->getMemOperand()->getFlags(),
998 MachineMemOperand::Flags
999 XCoreTargetLowering::getMMOFlags(const Instruction &I) const {
1000 // Because of how we convert atomic_load and atomic_store to normal loads and
1001 // stores in the DAG, we need to ensure that the MMOs are marked volatile
1002 // since DAGCombine hasn't been updated to account for atomic, but non
1003 // volatile loads. (See D57601)
1004 if (auto *SI = dyn_cast<StoreInst>(&I))
1006 return MachineMemOperand::MOVolatile;
1007 if (auto *LI = dyn_cast<LoadInst>(&I))
1009 return MachineMemOperand::MOVolatile;
1010 if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
1012 return MachineMemOperand::MOVolatile;
1013 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
1015 return MachineMemOperand::MOVolatile;
1016 return MachineMemOperand::MONone;
1019 //===----------------------------------------------------------------------===//
1020 // Calling Convention Implementation
1021 //===----------------------------------------------------------------------===//
1023 #include "XCoreGenCallingConv.inc"
1025 //===----------------------------------------------------------------------===//
1026 // Call Calling Convention Implementation
1027 //===----------------------------------------------------------------------===//
1029 /// XCore call implementation
1031 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1032 SmallVectorImpl<SDValue> &InVals) const {
1033 SelectionDAG &DAG = CLI.DAG;
1035 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1036 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1037 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1038 SDValue Chain = CLI.Chain;
1039 SDValue Callee = CLI.Callee;
1040 bool &isTailCall = CLI.IsTailCall;
1041 CallingConv::ID CallConv = CLI.CallConv;
1042 bool isVarArg = CLI.IsVarArg;
1044 // XCore target does not yet support tail call optimization.
1047 // For now, only CallingConv::C implemented
1051 report_fatal_error("Unsupported calling convention");
1052 case CallingConv::Fast:
1053 case CallingConv::C:
1054 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
1055 Outs, OutVals, Ins, dl, DAG, InVals);
1059 /// LowerCallResult - Lower the result values of a call into the
1060 /// appropriate copies out of appropriate physical registers / memory locations.
1061 static SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1062 const SmallVectorImpl<CCValAssign> &RVLocs,
1063 const SDLoc &dl, SelectionDAG &DAG,
1064 SmallVectorImpl<SDValue> &InVals) {
1065 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
1066 // Copy results out of physical registers.
1067 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1068 const CCValAssign &VA = RVLocs[i];
1069 if (VA.isRegLoc()) {
1070 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
1071 InFlag).getValue(1);
1072 InFlag = Chain.getValue(2);
1073 InVals.push_back(Chain.getValue(0));
1075 assert(VA.isMemLoc());
1076 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
1078 // Reserve space for this result.
1079 InVals.push_back(SDValue());
1083 // Copy results out of memory.
1084 SmallVector<SDValue, 4> MemOpChains;
1085 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
1086 int offset = ResultMemLocs[i].first;
1087 unsigned index = ResultMemLocs[i].second;
1088 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
1089 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
1090 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
1091 InVals[index] = load;
1092 MemOpChains.push_back(load.getValue(1));
1095 // Transform all loads nodes into one single node because
1096 // all load nodes are independent of each other.
1097 if (!MemOpChains.empty())
1098 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1103 /// LowerCCCCallTo - functions arguments are copied from virtual
1104 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
1105 /// CALLSEQ_END are emitted.
1106 /// TODO: isTailCall, sret.
1107 SDValue XCoreTargetLowering::LowerCCCCallTo(
1108 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
1109 bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
1110 const SmallVectorImpl<SDValue> &OutVals,
1111 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1112 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1114 // Analyze operands of the call, assigning locations to each operand.
1115 SmallVector<CCValAssign, 16> ArgLocs;
1116 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1119 // The ABI dictates there should be one stack slot available to the callee
1120 // on function entry (for saving lr).
1121 CCInfo.AllocateStack(4, 4);
1123 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
1125 SmallVector<CCValAssign, 16> RVLocs;
1126 // Analyze return values to determine the number of bytes of stack required.
1127 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1129 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
1130 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1132 // Get a count of how many bytes are to be pushed on the stack.
1133 unsigned NumBytes = RetCCInfo.getNextStackOffset();
1134 auto PtrVT = getPointerTy(DAG.getDataLayout());
1136 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
1138 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
1139 SmallVector<SDValue, 12> MemOpChains;
1141 // Walk the register/memloc assignments, inserting copies/loads.
1142 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1143 CCValAssign &VA = ArgLocs[i];
1144 SDValue Arg = OutVals[i];
1146 // Promote the value if needed.
1147 switch (VA.getLocInfo()) {
1148 default: llvm_unreachable("Unknown loc info!");
1149 case CCValAssign::Full: break;
1150 case CCValAssign::SExt:
1151 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1153 case CCValAssign::ZExt:
1154 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1156 case CCValAssign::AExt:
1157 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1161 // Arguments that can be passed on register must be kept at
1162 // RegsToPass vector
1163 if (VA.isRegLoc()) {
1164 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1166 assert(VA.isMemLoc());
1168 int Offset = VA.getLocMemOffset();
1170 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
1172 DAG.getConstant(Offset/4, dl,
1177 // Transform all store nodes into one single node because
1178 // all store nodes are independent of each other.
1179 if (!MemOpChains.empty())
1180 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1182 // Build a sequence of copy-to-reg nodes chained together with token
1183 // chain and flag operands which copy the outgoing args into registers.
1184 // The InFlag in necessary since all emitted instructions must be
1187 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1188 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1189 RegsToPass[i].second, InFlag);
1190 InFlag = Chain.getValue(1);
1193 // If the callee is a GlobalAddress node (quite common, every direct call is)
1194 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1195 // Likewise ExternalSymbol -> TargetExternalSymbol.
1196 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1197 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
1198 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1199 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1201 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1202 // = Chain, Callee, Reg#1, Reg#2, ...
1204 // Returns a chain & a flag for retval copy to use.
1205 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1206 SmallVector<SDValue, 8> Ops;
1207 Ops.push_back(Chain);
1208 Ops.push_back(Callee);
1210 // Add argument registers to the end of the list so that they are
1211 // known live into the call.
1212 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1213 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1214 RegsToPass[i].second.getValueType()));
1216 if (InFlag.getNode())
1217 Ops.push_back(InFlag);
1219 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
1220 InFlag = Chain.getValue(1);
1222 // Create the CALLSEQ_END node.
1223 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
1224 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
1225 InFlag = Chain.getValue(1);
1227 // Handle result values, copying them out of physregs into vregs that we
1229 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
1232 //===----------------------------------------------------------------------===//
1233 // Formal Arguments Calling Convention Implementation
1234 //===----------------------------------------------------------------------===//
1237 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1240 /// XCore formal arguments implementation
1241 SDValue XCoreTargetLowering::LowerFormalArguments(
1242 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1243 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1244 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1248 report_fatal_error("Unsupported calling convention");
1249 case CallingConv::C:
1250 case CallingConv::Fast:
1251 return LowerCCCArguments(Chain, CallConv, isVarArg,
1252 Ins, dl, DAG, InVals);
1256 /// LowerCCCArguments - transform physical registers into
1257 /// virtual registers and generate load operations for
1258 /// arguments places on the stack.
1260 SDValue XCoreTargetLowering::LowerCCCArguments(
1261 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1262 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1263 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1264 MachineFunction &MF = DAG.getMachineFunction();
1265 MachineFrameInfo &MFI = MF.getFrameInfo();
1266 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1267 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1269 // Assign locations to all of the incoming arguments.
1270 SmallVector<CCValAssign, 16> ArgLocs;
1271 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1274 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1276 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1278 unsigned LRSaveSize = StackSlotSize;
1281 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
1283 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1284 // scheduler clobbering a register before it has been copied.
1286 // 1. CopyFromReg (and load) arg & vararg registers.
1287 // 2. Chain CopyFromReg nodes into a TokenFactor.
1288 // 3. Memcpy 'byVal' args & push final InVals.
1289 // 4. Chain mem ops nodes into a TokenFactor.
1290 SmallVector<SDValue, 4> CFRegNode;
1291 SmallVector<ArgDataPair, 4> ArgData;
1292 SmallVector<SDValue, 4> MemOps;
1294 // 1a. CopyFromReg (and load) arg registers.
1295 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1297 CCValAssign &VA = ArgLocs[i];
1300 if (VA.isRegLoc()) {
1301 // Arguments passed in registers
1302 EVT RegVT = VA.getLocVT();
1303 switch (RegVT.getSimpleVT().SimpleTy) {
1307 errs() << "LowerFormalArguments Unhandled argument type: "
1308 << RegVT.getEVTString() << "\n";
1310 llvm_unreachable(nullptr);
1313 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1314 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1315 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1316 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1320 assert(VA.isMemLoc());
1321 // Load the argument to a virtual register
1322 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1323 if (ObjSize > StackSlotSize) {
1324 errs() << "LowerFormalArguments Unhandled argument type: "
1325 << EVT(VA.getLocVT()).getEVTString()
1328 // Create the frame index object for this incoming parameter...
1329 int FI = MFI.CreateFixedObject(ObjSize,
1330 LRSaveSize + VA.getLocMemOffset(),
1333 // Create the SelectionDAG nodes corresponding to a load
1334 //from this parameter
1335 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1336 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1337 MachinePointerInfo::getFixedStack(MF, FI));
1339 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1340 ArgData.push_back(ADP);
1343 // 1b. CopyFromReg vararg registers.
1345 // Argument registers
1346 static const MCPhysReg ArgRegs[] = {
1347 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1349 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1350 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
1351 if (FirstVAReg < array_lengthof(ArgRegs)) {
1353 // Save remaining registers, storing higher register numbers at a higher
1355 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1356 // Create a stack slot
1357 int FI = MFI.CreateFixedObject(4, offset, true);
1358 if (i == (int)FirstVAReg) {
1359 XFI->setVarArgsFrameIndex(FI);
1361 offset -= StackSlotSize;
1362 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1363 // Move argument from phys reg -> virt reg
1364 Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1365 RegInfo.addLiveIn(ArgRegs[i], VReg);
1366 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1367 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1368 // Move argument from virt reg -> stack
1370 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
1371 MemOps.push_back(Store);
1374 // This will point to the next argument passed via stack.
1375 XFI->setVarArgsFrameIndex(
1376 MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1381 // 2. chain CopyFromReg nodes into a TokenFactor.
1382 if (!CFRegNode.empty())
1383 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
1385 // 3. Memcpy 'byVal' args & push final InVals.
1386 // Aggregates passed "byVal" need to be copied by the callee.
1387 // The callee will use a pointer to this copy, rather than the original
1389 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1390 ArgDE = ArgData.end();
1391 ArgDI != ArgDE; ++ArgDI) {
1392 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1393 unsigned Size = ArgDI->Flags.getByValSize();
1394 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
1395 // Create a new object on the stack and copy the pointee into it.
1396 int FI = MFI.CreateStackObject(Size, Align, false);
1397 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1398 InVals.push_back(FIN);
1399 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1400 DAG.getConstant(Size, dl, MVT::i32),
1401 Align, false, false, false,
1402 MachinePointerInfo(),
1403 MachinePointerInfo()));
1405 InVals.push_back(ArgDI->SDV);
1409 // 4, chain mem ops nodes into a TokenFactor.
1410 if (!MemOps.empty()) {
1411 MemOps.push_back(Chain);
1412 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
1418 //===----------------------------------------------------------------------===//
1419 // Return Value Calling Convention Implementation
1420 //===----------------------------------------------------------------------===//
1422 bool XCoreTargetLowering::
1423 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1425 const SmallVectorImpl<ISD::OutputArg> &Outs,
1426 LLVMContext &Context) const {
1427 SmallVector<CCValAssign, 16> RVLocs;
1428 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1429 if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
1431 if (CCInfo.getNextStackOffset() != 0 && isVarArg)
1437 XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1439 const SmallVectorImpl<ISD::OutputArg> &Outs,
1440 const SmallVectorImpl<SDValue> &OutVals,
1441 const SDLoc &dl, SelectionDAG &DAG) const {
1443 XCoreFunctionInfo *XFI =
1444 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>();
1445 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
1447 // CCValAssign - represent the assignment of
1448 // the return value to a location
1449 SmallVector<CCValAssign, 16> RVLocs;
1451 // CCState - Info about the registers and stack slot.
1452 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1455 // Analyze return values.
1457 CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
1459 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1462 SmallVector<SDValue, 4> RetOps(1, Chain);
1464 // Return on XCore is always a "retsp 0"
1465 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
1467 SmallVector<SDValue, 4> MemOpChains;
1468 // Handle return values that must be copied to memory.
1469 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1470 CCValAssign &VA = RVLocs[i];
1473 assert(VA.isMemLoc());
1475 report_fatal_error("Can't return value from vararg function in memory");
1478 int Offset = VA.getLocMemOffset();
1479 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
1480 // Create the frame index object for the memory location.
1481 int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
1483 // Create a SelectionDAG node corresponding to a store
1484 // to this memory location.
1485 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1486 MemOpChains.push_back(DAG.getStore(
1487 Chain, dl, OutVals[i], FIN,
1488 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
1491 // Transform all store nodes into one single node because
1492 // all stores are independent of each other.
1493 if (!MemOpChains.empty())
1494 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1496 // Now handle return values copied to registers.
1497 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1498 CCValAssign &VA = RVLocs[i];
1501 // Copy the result values into the output registers.
1502 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1504 // guarantee that all emitted copies are
1505 // stuck together, avoiding something bad
1506 Flag = Chain.getValue(1);
1507 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1510 RetOps[0] = Chain; // Update chain.
1512 // Add the flag if we have it.
1514 RetOps.push_back(Flag);
1516 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
1519 //===----------------------------------------------------------------------===//
1520 // Other Lowering Code
1521 //===----------------------------------------------------------------------===//
1524 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1525 MachineBasicBlock *BB) const {
1526 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1527 DebugLoc dl = MI.getDebugLoc();
1528 assert((MI.getOpcode() == XCore::SELECT_CC) &&
1529 "Unexpected instr type to insert");
1531 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1532 // control-flow pattern. The incoming instruction knows the destination vreg
1533 // to set, the condition code register to branch on, the true/false values to
1534 // select between, and a branch opcode to use.
1535 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1536 MachineFunction::iterator It = ++BB->getIterator();
1541 // cmpTY ccX, r1, r2
1543 // fallthrough --> copy0MBB
1544 MachineBasicBlock *thisMBB = BB;
1545 MachineFunction *F = BB->getParent();
1546 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1547 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1548 F->insert(It, copy0MBB);
1549 F->insert(It, sinkMBB);
1551 // Transfer the remainder of BB and its successor edges to sinkMBB.
1552 sinkMBB->splice(sinkMBB->begin(), BB,
1553 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1554 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1556 // Next, add the true and fallthrough blocks as its successors.
1557 BB->addSuccessor(copy0MBB);
1558 BB->addSuccessor(sinkMBB);
1560 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1561 .addReg(MI.getOperand(1).getReg())
1565 // %FalseValue = ...
1566 // # fallthrough to sinkMBB
1569 // Update machine-CFG edges
1570 BB->addSuccessor(sinkMBB);
1573 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1576 BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
1577 .addReg(MI.getOperand(3).getReg())
1579 .addReg(MI.getOperand(2).getReg())
1582 MI.eraseFromParent(); // The pseudo instruction is gone now.
1586 //===----------------------------------------------------------------------===//
1587 // Target Optimization Hooks
1588 //===----------------------------------------------------------------------===//
1590 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1591 DAGCombinerInfo &DCI) const {
1592 SelectionDAG &DAG = DCI.DAG;
1594 switch (N->getOpcode()) {
1596 case ISD::INTRINSIC_VOID:
1597 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
1598 case Intrinsic::xcore_outt:
1599 case Intrinsic::xcore_outct:
1600 case Intrinsic::xcore_chkct: {
1601 SDValue OutVal = N->getOperand(3);
1602 // These instructions ignore the high bits.
1603 if (OutVal.hasOneUse()) {
1604 unsigned BitWidth = OutVal.getValueSizeInBits();
1605 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
1607 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1608 !DCI.isBeforeLegalizeOps());
1609 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1610 if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
1611 TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
1612 DCI.CommitTargetLoweringOpt(TLO);
1616 case Intrinsic::xcore_setpt: {
1617 SDValue Time = N->getOperand(3);
1618 // This instruction ignores the high bits.
1619 if (Time.hasOneUse()) {
1620 unsigned BitWidth = Time.getValueSizeInBits();
1621 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
1623 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1624 !DCI.isBeforeLegalizeOps());
1625 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1626 if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
1627 TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
1628 DCI.CommitTargetLoweringOpt(TLO);
1634 case XCoreISD::LADD: {
1635 SDValue N0 = N->getOperand(0);
1636 SDValue N1 = N->getOperand(1);
1637 SDValue N2 = N->getOperand(2);
1638 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1639 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1640 EVT VT = N0.getValueType();
1642 // canonicalize constant to RHS
1644 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1646 // fold (ladd 0, 0, x) -> 0, x & 1
1647 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1648 SDValue Carry = DAG.getConstant(0, dl, VT);
1649 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1650 DAG.getConstant(1, dl, VT));
1651 SDValue Ops[] = { Result, Carry };
1652 return DAG.getMergeValues(Ops, dl);
1655 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1657 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1658 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1659 VT.getSizeInBits() - 1);
1660 KnownBits Known = DAG.computeKnownBits(N2);
1661 if ((Known.Zero & Mask) == Mask) {
1662 SDValue Carry = DAG.getConstant(0, dl, VT);
1663 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1664 SDValue Ops[] = { Result, Carry };
1665 return DAG.getMergeValues(Ops, dl);
1670 case XCoreISD::LSUB: {
1671 SDValue N0 = N->getOperand(0);
1672 SDValue N1 = N->getOperand(1);
1673 SDValue N2 = N->getOperand(2);
1674 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1675 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1676 EVT VT = N0.getValueType();
1678 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1679 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1680 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1681 VT.getSizeInBits() - 1);
1682 KnownBits Known = DAG.computeKnownBits(N2);
1683 if ((Known.Zero & Mask) == Mask) {
1684 SDValue Borrow = N2;
1685 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1686 DAG.getConstant(0, dl, VT), N2);
1687 SDValue Ops[] = { Result, Borrow };
1688 return DAG.getMergeValues(Ops, dl);
1692 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1694 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1695 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1696 VT.getSizeInBits() - 1);
1697 KnownBits Known = DAG.computeKnownBits(N2);
1698 if ((Known.Zero & Mask) == Mask) {
1699 SDValue Borrow = DAG.getConstant(0, dl, VT);
1700 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1701 SDValue Ops[] = { Result, Borrow };
1702 return DAG.getMergeValues(Ops, dl);
1707 case XCoreISD::LMUL: {
1708 SDValue N0 = N->getOperand(0);
1709 SDValue N1 = N->getOperand(1);
1710 SDValue N2 = N->getOperand(2);
1711 SDValue N3 = N->getOperand(3);
1712 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1713 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1714 EVT VT = N0.getValueType();
1715 // Canonicalize multiplicative constant to RHS. If both multiplicative
1716 // operands are constant canonicalize smallest to RHS.
1717 if ((N0C && !N1C) ||
1718 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1719 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1723 if (N1C && N1C->isNullValue()) {
1724 // If the high result is unused fold to add(a, b)
1725 if (N->hasNUsesOfValue(0, 0)) {
1726 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1727 SDValue Ops[] = { Lo, Lo };
1728 return DAG.getMergeValues(Ops, dl);
1730 // Otherwise fold to ladd(a, b, 0)
1732 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1733 SDValue Carry(Result.getNode(), 1);
1734 SDValue Ops[] = { Carry, Result };
1735 return DAG.getMergeValues(Ops, dl);
1740 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1741 // lmul(x, y, a, b). The high result of lmul will be ignored.
1742 // This is only profitable if the intermediate results are unused
1744 SDValue Mul0, Mul1, Addend0, Addend1;
1745 if (N->getValueType(0) == MVT::i32 &&
1746 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1747 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1748 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1749 Mul1, Addend0, Addend1);
1750 SDValue Result(Ignored.getNode(), 1);
1753 APInt HighMask = APInt::getHighBitsSet(64, 32);
1754 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1755 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1756 // before type legalization as it is messy to match the operands after
1758 if (N->getValueType(0) == MVT::i64 &&
1759 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1760 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1761 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1762 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1763 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1764 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1765 Mul0, DAG.getConstant(0, dl, MVT::i32));
1766 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1767 Mul1, DAG.getConstant(0, dl, MVT::i32));
1768 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1769 Addend0, DAG.getConstant(0, dl, MVT::i32));
1770 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1771 Addend1, DAG.getConstant(0, dl, MVT::i32));
1772 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1773 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1774 Addend0L, Addend1L);
1775 SDValue Lo(Hi.getNode(), 1);
1776 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1781 // Replace unaligned store of unaligned load with memmove.
1782 StoreSDNode *ST = cast<StoreSDNode>(N);
1783 if (!DCI.isBeforeLegalize() ||
1784 allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1786 *ST->getMemOperand()) ||
1787 ST->isVolatile() || ST->isIndexed()) {
1790 SDValue Chain = ST->getChain();
1792 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1793 assert((StoreBits % 8) == 0 &&
1794 "Store size in bits must be a multiple of 8");
1795 unsigned Alignment = ST->getAlignment();
1797 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1798 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1799 LD->getAlignment() == Alignment &&
1800 !LD->isVolatile() && !LD->isIndexed() &&
1801 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1802 bool isTail = isInTailCallPosition(DAG, ST, Chain);
1803 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1805 DAG.getConstant(StoreBits/8, dl, MVT::i32),
1806 Alignment, false, isTail, ST->getPointerInfo(),
1807 LD->getPointerInfo());
1816 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
1818 const APInt &DemandedElts,
1819 const SelectionDAG &DAG,
1820 unsigned Depth) const {
1822 switch (Op.getOpcode()) {
1824 case XCoreISD::LADD:
1825 case XCoreISD::LSUB:
1826 if (Op.getResNo() == 1) {
1827 // Top bits of carry / borrow are clear.
1828 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1829 Known.getBitWidth() - 1);
1832 case ISD::INTRINSIC_W_CHAIN:
1834 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1836 case Intrinsic::xcore_getts:
1837 // High bits are known to be zero.
1838 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1839 Known.getBitWidth() - 16);
1841 case Intrinsic::xcore_int:
1842 case Intrinsic::xcore_inct:
1843 // High bits are known to be zero.
1844 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1845 Known.getBitWidth() - 8);
1847 case Intrinsic::xcore_testct:
1848 // Result is either 0 or 1.
1849 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1850 Known.getBitWidth() - 1);
1852 case Intrinsic::xcore_testwct:
1853 // Result is in the range 0 - 4.
1854 Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
1855 Known.getBitWidth() - 3);
1863 //===----------------------------------------------------------------------===//
1864 // Addressing mode description hooks
1865 //===----------------------------------------------------------------------===//
1867 static inline bool isImmUs(int64_t val)
1869 return (val >= 0 && val <= 11);
1872 static inline bool isImmUs2(int64_t val)
1874 return (val%2 == 0 && isImmUs(val/2));
1877 static inline bool isImmUs4(int64_t val)
1879 return (val%4 == 0 && isImmUs(val/4));
1882 /// isLegalAddressingMode - Return true if the addressing mode represented
1883 /// by AM is legal for this target, for a load/store of the specified type.
1884 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1885 const AddrMode &AM, Type *Ty,
1887 Instruction *I) const {
1888 if (Ty->getTypeID() == Type::VoidTyID)
1889 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1891 unsigned Size = DL.getTypeAllocSize(Ty);
1893 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1900 if (AM.Scale == 0) {
1901 return isImmUs(AM.BaseOffs);
1904 return AM.Scale == 1 && AM.BaseOffs == 0;
1908 if (AM.Scale == 0) {
1909 return isImmUs2(AM.BaseOffs);
1912 return AM.Scale == 2 && AM.BaseOffs == 0;
1915 if (AM.Scale == 0) {
1916 return isImmUs4(AM.BaseOffs);
1919 return AM.Scale == 4 && AM.BaseOffs == 0;
1923 //===----------------------------------------------------------------------===//
1924 // XCore Inline Assembly Support
1925 //===----------------------------------------------------------------------===//
1927 std::pair<unsigned, const TargetRegisterClass *>
1928 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
1929 StringRef Constraint,
1931 if (Constraint.size() == 1) {
1932 switch (Constraint[0]) {
1935 return std::make_pair(0U, &XCore::GRRegsRegClass);
1938 // Use the default implementation in TargetLowering to convert the register
1939 // constraint into a member of a register class.
1940 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);