1 //===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that BPF uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "BPFISelLowering.h"
16 #include "BPFSubtarget.h"
17 #include "BPFTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 #include "llvm/IR/DiagnosticPrinter.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
33 #define DEBUG_TYPE "bpf-lower"
35 static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
36 cl::Hidden, cl::init(false),
37 cl::desc("Expand memcpy into load/store pairs in order"));
39 static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) {
40 MachineFunction &MF = DAG.getMachineFunction();
41 DAG.getContext()->diagnose(
42 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
45 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
47 MachineFunction &MF = DAG.getMachineFunction();
49 raw_string_ostream OS(Str);
53 DAG.getContext()->diagnose(
54 DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc()));
57 BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
58 const BPFSubtarget &STI)
59 : TargetLowering(TM) {
61 // Set up the register classes.
62 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
63 if (STI.getHasAlu32())
64 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
66 // Compute derived properties from the register classes
67 computeRegisterProperties(STI.getRegisterInfo());
69 setStackPointerRegisterToSaveRestore(BPF::R11);
71 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
72 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
73 setOperationAction(ISD::BRIND, MVT::Other, Expand);
74 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
76 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
78 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
79 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
80 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
82 for (auto VT : { MVT::i32, MVT::i64 }) {
83 if (VT == MVT::i32 && !STI.getHasAlu32())
86 setOperationAction(ISD::SDIVREM, VT, Expand);
87 setOperationAction(ISD::UDIVREM, VT, Expand);
88 setOperationAction(ISD::SREM, VT, Expand);
89 setOperationAction(ISD::UREM, VT, Expand);
90 setOperationAction(ISD::MULHU, VT, Expand);
91 setOperationAction(ISD::MULHS, VT, Expand);
92 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
93 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
94 setOperationAction(ISD::ROTR, VT, Expand);
95 setOperationAction(ISD::ROTL, VT, Expand);
96 setOperationAction(ISD::SHL_PARTS, VT, Expand);
97 setOperationAction(ISD::SRL_PARTS, VT, Expand);
98 setOperationAction(ISD::SRA_PARTS, VT, Expand);
99 setOperationAction(ISD::CTPOP, VT, Expand);
101 setOperationAction(ISD::SETCC, VT, Expand);
102 setOperationAction(ISD::SELECT, VT, Expand);
103 setOperationAction(ISD::SELECT_CC, VT, Custom);
106 if (STI.getHasAlu32()) {
107 setOperationAction(ISD::BSWAP, MVT::i32, Promote);
108 setOperationAction(ISD::BR_CC, MVT::i32,
109 STI.getHasJmp32() ? Custom : Promote);
112 setOperationAction(ISD::CTTZ, MVT::i64, Custom);
113 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
114 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
115 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
117 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
119 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
120 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
122 // Extended load operations for i1 types must be promoted
123 for (MVT VT : MVT::integer_valuetypes()) {
124 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
125 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
128 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
129 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
130 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
133 setBooleanContents(ZeroOrOneBooleanContent);
135 // Function alignments
136 setMinFunctionAlignment(Align(8));
137 setPrefFunctionAlignment(Align(8));
139 if (BPFExpandMemcpyInOrder) {
140 // LLVM generic code will try to expand memcpy into load/store pairs at this
141 // stage which is before quite a few IR optimization passes, therefore the
142 // loads and stores could potentially be moved apart from each other which
143 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
146 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
147 // of memcpy to later stage in IR optimization pipeline so those load/store
148 // pairs won't be touched and could be kept in order. Hence, we set
149 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
150 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
151 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 0;
152 MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 0;
153 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 0;
155 // inline memcpy() for kernel to see explicit copy
156 unsigned CommonMaxStores =
157 STI.getSelectionDAGInfo()->getCommonMaxStoresPerMemFunc();
159 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = CommonMaxStores;
160 MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = CommonMaxStores;
161 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = CommonMaxStores;
164 // CPU/Feature control
165 HasAlu32 = STI.getHasAlu32();
166 HasJmp32 = STI.getHasJmp32();
167 HasJmpExt = STI.getHasJmpExt();
170 bool BPFTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
174 bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
175 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
177 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
178 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
179 return NumBits1 > NumBits2;
182 bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
183 if (!VT1.isInteger() || !VT2.isInteger())
185 unsigned NumBits1 = VT1.getSizeInBits();
186 unsigned NumBits2 = VT2.getSizeInBits();
187 return NumBits1 > NumBits2;
190 bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
191 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
193 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
194 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
195 return NumBits1 == 32 && NumBits2 == 64;
198 bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
199 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
201 unsigned NumBits1 = VT1.getSizeInBits();
202 unsigned NumBits2 = VT2.getSizeInBits();
203 return NumBits1 == 32 && NumBits2 == 64;
206 std::pair<unsigned, const TargetRegisterClass *>
207 BPFTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
208 StringRef Constraint,
210 if (Constraint.size() == 1)
211 // GCC Constraint Letters
212 switch (Constraint[0]) {
213 case 'r': // GENERAL_REGS
214 return std::make_pair(0U, &BPF::GPRRegClass);
219 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
222 SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
223 switch (Op.getOpcode()) {
225 return LowerBR_CC(Op, DAG);
226 case ISD::GlobalAddress:
227 return LowerGlobalAddress(Op, DAG);
229 return LowerSELECT_CC(Op, DAG);
230 case ISD::DYNAMIC_STACKALLOC:
231 report_fatal_error("Unsupported dynamic stack allocation");
233 llvm_unreachable("unimplemented operand");
237 // Calling Convention Implementation
238 #include "BPFGenCallingConv.inc"
240 SDValue BPFTargetLowering::LowerFormalArguments(
241 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
242 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
243 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
246 report_fatal_error("Unsupported calling convention");
248 case CallingConv::Fast:
252 MachineFunction &MF = DAG.getMachineFunction();
253 MachineRegisterInfo &RegInfo = MF.getRegInfo();
255 // Assign locations to all of the incoming arguments.
256 SmallVector<CCValAssign, 16> ArgLocs;
257 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
258 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
260 for (auto &VA : ArgLocs) {
262 // Arguments passed in registers
263 EVT RegVT = VA.getLocVT();
264 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
267 errs() << "LowerFormalArguments Unhandled argument type: "
268 << RegVT.getEVTString() << '\n';
273 Register VReg = RegInfo.createVirtualRegister(
274 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
275 RegInfo.addLiveIn(VA.getLocReg(), VReg);
276 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
278 // If this is an value that has been promoted to wider types, insert an
279 // assert[sz]ext to capture this, then truncate to the right size.
280 if (VA.getLocInfo() == CCValAssign::SExt)
281 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
282 DAG.getValueType(VA.getValVT()));
283 else if (VA.getLocInfo() == CCValAssign::ZExt)
284 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
285 DAG.getValueType(VA.getValVT()));
287 if (VA.getLocInfo() != CCValAssign::Full)
288 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
290 InVals.push_back(ArgValue);
295 fail(DL, DAG, "defined with too many args");
296 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
300 if (IsVarArg || MF.getFunction().hasStructRetAttr()) {
301 fail(DL, DAG, "functions with VarArgs or StructRet are not supported");
307 const unsigned BPFTargetLowering::MaxArgs = 5;
309 SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
310 SmallVectorImpl<SDValue> &InVals) const {
311 SelectionDAG &DAG = CLI.DAG;
312 auto &Outs = CLI.Outs;
313 auto &OutVals = CLI.OutVals;
315 SDValue Chain = CLI.Chain;
316 SDValue Callee = CLI.Callee;
317 bool &IsTailCall = CLI.IsTailCall;
318 CallingConv::ID CallConv = CLI.CallConv;
319 bool IsVarArg = CLI.IsVarArg;
320 MachineFunction &MF = DAG.getMachineFunction();
322 // BPF target does not support tail call optimization.
327 report_fatal_error("Unsupported calling convention");
328 case CallingConv::Fast:
333 // Analyze operands of the call, assigning locations to each operand.
334 SmallVector<CCValAssign, 16> ArgLocs;
335 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
337 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
339 unsigned NumBytes = CCInfo.getNextStackOffset();
341 if (Outs.size() > MaxArgs)
342 fail(CLI.DL, DAG, "too many args to ", Callee);
344 for (auto &Arg : Outs) {
345 ISD::ArgFlagsTy Flags = Arg.Flags;
346 if (!Flags.isByVal())
349 fail(CLI.DL, DAG, "pass by value not supported ", Callee);
352 auto PtrVT = getPointerTy(MF.getDataLayout());
353 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
355 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
357 // Walk arg assignments
359 e = std::min(static_cast<unsigned>(ArgLocs.size()), MaxArgs);
361 CCValAssign &VA = ArgLocs[i];
362 SDValue Arg = OutVals[i];
364 // Promote the value if needed.
365 switch (VA.getLocInfo()) {
367 llvm_unreachable("Unknown loc info");
368 case CCValAssign::Full:
370 case CCValAssign::SExt:
371 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
373 case CCValAssign::ZExt:
374 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
376 case CCValAssign::AExt:
377 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
381 // Push arguments into RegsToPass vector
383 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
385 llvm_unreachable("call arg pass bug");
390 // Build a sequence of copy-to-reg nodes chained together with token chain and
391 // flag operands which copy the outgoing args into registers. The InFlag in
392 // necessary since all emitted instructions must be stuck together.
393 for (auto &Reg : RegsToPass) {
394 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InFlag);
395 InFlag = Chain.getValue(1);
398 // If the callee is a GlobalAddress node (quite common, every direct call is)
399 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
400 // Likewise ExternalSymbol -> TargetExternalSymbol.
401 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
402 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
404 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
405 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
406 fail(CLI.DL, DAG, Twine("A call to built-in function '"
407 + StringRef(E->getSymbol())
408 + "' is not supported."));
411 // Returns a chain & a flag for retval copy to use.
412 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
413 SmallVector<SDValue, 8> Ops;
414 Ops.push_back(Chain);
415 Ops.push_back(Callee);
417 // Add argument registers to the end of the list so that they are
418 // known live into the call.
419 for (auto &Reg : RegsToPass)
420 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
422 if (InFlag.getNode())
423 Ops.push_back(InFlag);
425 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
426 InFlag = Chain.getValue(1);
428 // Create the CALLSEQ_END node.
429 Chain = DAG.getCALLSEQ_END(
430 Chain, DAG.getConstant(NumBytes, CLI.DL, PtrVT, true),
431 DAG.getConstant(0, CLI.DL, PtrVT, true), InFlag, CLI.DL);
432 InFlag = Chain.getValue(1);
434 // Handle result values, copying them out of physregs into vregs that we
436 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, CLI.DL, DAG,
441 BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
443 const SmallVectorImpl<ISD::OutputArg> &Outs,
444 const SmallVectorImpl<SDValue> &OutVals,
445 const SDLoc &DL, SelectionDAG &DAG) const {
446 unsigned Opc = BPFISD::RET_FLAG;
448 // CCValAssign - represent the assignment of the return value to a location
449 SmallVector<CCValAssign, 16> RVLocs;
450 MachineFunction &MF = DAG.getMachineFunction();
452 // CCState - Info about the registers and stack slot.
453 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
455 if (MF.getFunction().getReturnType()->isAggregateType()) {
456 fail(DL, DAG, "only integer returns supported");
457 return DAG.getNode(Opc, DL, MVT::Other, Chain);
460 // Analize return values.
461 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
464 SmallVector<SDValue, 4> RetOps(1, Chain);
466 // Copy the result values into the output registers.
467 for (unsigned i = 0; i != RVLocs.size(); ++i) {
468 CCValAssign &VA = RVLocs[i];
469 assert(VA.isRegLoc() && "Can only return in registers!");
471 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag);
473 // Guarantee that all emitted copies are stuck together,
474 // avoiding something bad.
475 Flag = Chain.getValue(1);
476 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
479 RetOps[0] = Chain; // Update chain.
481 // Add the flag if we have it.
483 RetOps.push_back(Flag);
485 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
488 SDValue BPFTargetLowering::LowerCallResult(
489 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
490 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
491 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
493 MachineFunction &MF = DAG.getMachineFunction();
494 // Assign locations to each value returned by this call.
495 SmallVector<CCValAssign, 16> RVLocs;
496 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
498 if (Ins.size() >= 2) {
499 fail(DL, DAG, "only small returns supported");
500 for (unsigned i = 0, e = Ins.size(); i != e; ++i)
501 InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT));
502 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InFlag).getValue(1);
505 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
507 // Copy all of the result registers out of their specified physreg.
508 for (auto &Val : RVLocs) {
509 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
510 Val.getValVT(), InFlag).getValue(1);
511 InFlag = Chain.getValue(2);
512 InVals.push_back(Chain.getValue(0));
518 static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
526 CC = ISD::getSetCCSwappedOperands(CC);
532 SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
533 SDValue Chain = Op.getOperand(0);
534 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
535 SDValue LHS = Op.getOperand(2);
536 SDValue RHS = Op.getOperand(3);
537 SDValue Dest = Op.getOperand(4);
541 NegateCC(LHS, RHS, CC);
543 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
544 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
547 SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
548 SDValue LHS = Op.getOperand(0);
549 SDValue RHS = Op.getOperand(1);
550 SDValue TrueV = Op.getOperand(2);
551 SDValue FalseV = Op.getOperand(3);
552 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
556 NegateCC(LHS, RHS, CC);
558 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
559 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
560 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
562 return DAG.getNode(BPFISD::SELECT_CC, DL, VTs, Ops);
565 const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
566 switch ((BPFISD::NodeType)Opcode) {
567 case BPFISD::FIRST_NUMBER:
569 case BPFISD::RET_FLAG:
570 return "BPFISD::RET_FLAG";
572 return "BPFISD::CALL";
573 case BPFISD::SELECT_CC:
574 return "BPFISD::SELECT_CC";
576 return "BPFISD::BR_CC";
577 case BPFISD::Wrapper:
578 return "BPFISD::Wrapper";
580 return "BPFISD::MEMCPY";
585 SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
586 SelectionDAG &DAG) const {
587 auto N = cast<GlobalAddressSDNode>(Op);
588 assert(N->getOffset() == 0 && "Invalid offset for global address");
591 const GlobalValue *GV = N->getGlobal();
592 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i64);
594 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
598 BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
599 unsigned Reg, bool isSigned) const {
600 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
601 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
602 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
603 MachineFunction *F = BB->getParent();
604 DebugLoc DL = MI.getDebugLoc();
606 MachineRegisterInfo &RegInfo = F->getRegInfo();
609 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
610 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
613 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
614 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
615 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
616 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
617 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
618 .addReg(PromotedReg0).addImm(32);
619 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
620 .addReg(PromotedReg1).addImm(32);
626 BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
627 MachineBasicBlock *BB)
629 MachineFunction *MF = MI.getParent()->getParent();
630 MachineRegisterInfo &MRI = MF->getRegInfo();
631 MachineInstrBuilder MIB(*MF, MI);
634 // This function does custom insertion during lowering BPFISD::MEMCPY which
635 // only has two register operands from memcpy semantics, the copy source
636 // address and the copy destination address.
638 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
639 // a third scratch register to serve as the destination register of load and
640 // source register of store.
642 // The scratch register here is with the Define | Dead | EarlyClobber flags.
643 // The EarlyClobber flag has the semantic property that the operand it is
644 // attached to is clobbered before the rest of the inputs are read. Hence it
645 // must be unique among the operands to the instruction. The Define flag is
646 // needed to coerce the machine verifier that an Undef value isn't a problem
647 // as we anyway is loading memory into it. The Dead flag is needed as the
648 // value in scratch isn't supposed to be used by any other instruction.
649 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
650 MIB.addReg(ScratchReg,
651 RegState::Define | RegState::Dead | RegState::EarlyClobber);
657 BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
658 MachineBasicBlock *BB) const {
659 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
660 DebugLoc DL = MI.getDebugLoc();
661 unsigned Opc = MI.getOpcode();
662 bool isSelectRROp = (Opc == BPF::Select ||
663 Opc == BPF::Select_64_32 ||
664 Opc == BPF::Select_32 ||
665 Opc == BPF::Select_32_64);
667 bool isMemcpyOp = Opc == BPF::MEMCPY;
670 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
671 Opc == BPF::Select_Ri_64_32 ||
672 Opc == BPF::Select_Ri_32 ||
673 Opc == BPF::Select_Ri_32_64);
676 assert((isSelectRROp || isSelectRIOp || isMemcpyOp) &&
677 "Unexpected instr type to insert");
681 return EmitInstrWithCustomInserterMemcpy(MI, BB);
683 bool is32BitCmp = (Opc == BPF::Select_32 ||
684 Opc == BPF::Select_32_64 ||
685 Opc == BPF::Select_Ri_32 ||
686 Opc == BPF::Select_Ri_32_64);
688 // To "insert" a SELECT instruction, we actually have to insert the diamond
689 // control-flow pattern. The incoming instruction knows the destination vreg
690 // to set, the condition code register to branch on, the true/false values to
691 // select between, and a branch opcode to use.
692 const BasicBlock *LLVM_BB = BB->getBasicBlock();
693 MachineFunction::iterator I = ++BB->getIterator();
698 // jmp_XX r1, r2 goto Copy1MBB
699 // fallthrough --> Copy0MBB
700 MachineBasicBlock *ThisMBB = BB;
701 MachineFunction *F = BB->getParent();
702 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
703 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
705 F->insert(I, Copy0MBB);
706 F->insert(I, Copy1MBB);
707 // Update machine-CFG edges by transferring all successors of the current
708 // block to the new block which will contain the Phi node for the select.
709 Copy1MBB->splice(Copy1MBB->begin(), BB,
710 std::next(MachineBasicBlock::iterator(MI)), BB->end());
711 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
712 // Next, add the true and fallthrough blocks as its successors.
713 BB->addSuccessor(Copy0MBB);
714 BB->addSuccessor(Copy1MBB);
716 // Insert Branch if Flag
717 int CC = MI.getOperand(3).getImm();
720 #define SET_NEWCC(X, Y) \
722 if (is32BitCmp && HasJmp32) \
723 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
725 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
727 SET_NEWCC(SETGT, JSGT);
728 SET_NEWCC(SETUGT, JUGT);
729 SET_NEWCC(SETGE, JSGE);
730 SET_NEWCC(SETUGE, JUGE);
731 SET_NEWCC(SETEQ, JEQ);
732 SET_NEWCC(SETNE, JNE);
733 SET_NEWCC(SETLT, JSLT);
734 SET_NEWCC(SETULT, JULT);
735 SET_NEWCC(SETLE, JSLE);
736 SET_NEWCC(SETULE, JULE);
738 report_fatal_error("unimplemented select CondCode " + Twine(CC));
741 Register LHS = MI.getOperand(1).getReg();
742 bool isSignedCmp = (CC == ISD::SETGT ||
747 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
748 // to be promoted, however if the 32-bit comparison operands are destination
749 // registers then they are implicitly zero-extended already, there is no
750 // need of explicit zero-extend sequence for them.
752 // We simply do extension for all situations in this method, but we will
753 // try to remove those unnecessary in BPFMIPeephole pass.
754 if (is32BitCmp && !HasJmp32)
755 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
758 Register RHS = MI.getOperand(2).getReg();
760 if (is32BitCmp && !HasJmp32)
761 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
763 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
765 int64_t imm32 = MI.getOperand(2).getImm();
766 // sanity check before we build J*_ri instruction.
767 assert (isInt<32>(imm32));
768 BuildMI(BB, DL, TII.get(NewCC))
769 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
774 // # fallthrough to Copy1MBB
777 // Update machine-CFG edges
778 BB->addSuccessor(Copy1MBB);
781 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
784 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
785 .addReg(MI.getOperand(5).getReg())
787 .addReg(MI.getOperand(4).getReg())
790 MI.eraseFromParent(); // The pseudo instruction is gone now.
794 EVT BPFTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
796 return getHasAlu32() ? MVT::i32 : MVT::i64;
799 MVT BPFTargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
801 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;