//===-- BPFInstrInfo.td - Target Description for BPF Target ---------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file describes the BPF instructions in TableGen format. // //===----------------------------------------------------------------------===// include "BPFInstrFormats.td" // Instruction Operands and Patterns // These are target-independent nodes, but have target-specific formats. def SDT_BPFCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; def SDT_BPFCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; def SDT_BPFCall : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; def SDT_BPFSetFlag : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>]>; def SDT_BPFSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, SDTCisSameAs<0, 4>, SDTCisSameAs<4, 5>]>; def SDT_BPFBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>, SDTCisVT<3, OtherVT>]>; def SDT_BPFWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; def SDT_BPFMEMCPY : SDTypeProfile<0, 4, [SDTCisVT<0, i64>, SDTCisVT<1, i64>, SDTCisVT<2, i64>, SDTCisVT<3, i64>]>; def BPFcall : SDNode<"BPFISD::CALL", SDT_BPFCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; def BPFretflag : SDNode<"BPFISD::RET_FLAG", SDTNone, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; def BPFcallseq_start: SDNode<"ISD::CALLSEQ_START", SDT_BPFCallSeqStart, [SDNPHasChain, SDNPOutGlue]>; def BPFcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_BPFCallSeqEnd, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; def BPFbrcc : SDNode<"BPFISD::BR_CC", SDT_BPFBrCC, [SDNPHasChain, SDNPOutGlue, SDNPInGlue]>; def BPFselectcc : SDNode<"BPFISD::SELECT_CC", SDT_BPFSelectCC, [SDNPInGlue]>; def BPFWrapper : SDNode<"BPFISD::Wrapper", SDT_BPFWrapper>; def BPFmemcpy : SDNode<"BPFISD::MEMCPY", SDT_BPFMEMCPY, [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, SDNPMayLoad]>; def BPFIsLittleEndian : Predicate<"CurDAG->getDataLayout().isLittleEndian()">; def BPFIsBigEndian : Predicate<"!CurDAG->getDataLayout().isLittleEndian()">; def BPFHasALU32 : Predicate<"Subtarget->getHasAlu32()">; def BPFNoALU32 : Predicate<"!Subtarget->getHasAlu32()">; def brtarget : Operand { let PrintMethod = "printBrTargetOperand"; } def calltarget : Operand; def u64imm : Operand { let PrintMethod = "printImm64Operand"; } def i64immSExt32 : PatLeaf<(i64 imm), [{return isInt<32>(N->getSExtValue()); }]>; def i32immSExt32 : PatLeaf<(i32 imm), [{return isInt<32>(N->getSExtValue()); }]>; // Addressing modes. def ADDRri : ComplexPattern; def FIri : ComplexPattern; // Address operands def MEMri : Operand { let PrintMethod = "printMemOperand"; let EncoderMethod = "getMemoryOpValue"; let DecoderMethod = "decodeMemoryOpValue"; let MIOperandInfo = (ops GPR, i16imm); } // Conditional code predicates - used for pattern matching for jump instructions def BPF_CC_EQ : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETEQ);}]>; def BPF_CC_NE : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETNE);}]>; def BPF_CC_GE : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETGE);}]>; def BPF_CC_GT : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETGT);}]>; def BPF_CC_GTU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETUGT);}]>; def BPF_CC_GEU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETUGE);}]>; def BPF_CC_LE : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETLE);}]>; def BPF_CC_LT : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETLT);}]>; def BPF_CC_LTU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETULT);}]>; def BPF_CC_LEU : PatLeaf<(i64 imm), [{return (N->getZExtValue() == ISD::SETULE);}]>; def BPF_CC_EQ_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETEQ);}]>; def BPF_CC_NE_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETNE);}]>; def BPF_CC_GE_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETGE);}]>; def BPF_CC_GT_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETGT);}]>; def BPF_CC_GTU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETUGT);}]>; def BPF_CC_GEU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETUGE);}]>; def BPF_CC_LE_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETLE);}]>; def BPF_CC_LT_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETLT);}]>; def BPF_CC_LTU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETULT);}]>; def BPF_CC_LEU_32 : PatLeaf<(i32 imm), [{return (N->getZExtValue() == ISD::SETULE);}]>; // For arithmetic and jump instructions the 8-bit 'code' // field is divided into three parts: // // +----------------+--------+--------------------+ // | 4 bits | 1 bit | 3 bits | // | operation code | source | instruction class | // +----------------+--------+--------------------+ // (MSB) (LSB) class TYPE_ALU_JMP op, bits<1> srctype, dag outs, dag ins, string asmstr, list pattern> : InstBPF { let Inst{63-60} = op; let Inst{59} = srctype; } //For load and store instructions the 8-bit 'code' field is divided as: // // +--------+--------+-------------------+ // | 3 bits | 2 bits | 3 bits | // | mode | size | instruction class | // +--------+--------+-------------------+ // (MSB) (LSB) class TYPE_LD_ST mode, bits<2> size, dag outs, dag ins, string asmstr, list pattern> : InstBPF { let Inst{63-61} = mode; let Inst{60-59} = size; } // jump instructions class JMP_RR : TYPE_ALU_JMP { bits<4> dst; bits<4> src; bits<16> BrDst; let Inst{55-52} = src; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP; } class JMP_RI : TYPE_ALU_JMP { bits<4> dst; bits<16> BrDst; bits<32> imm; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let Inst{31-0} = imm; let BPFClass = BPF_JMP; } class JMP_RR_32 : TYPE_ALU_JMP { bits<4> dst; bits<4> src; bits<16> BrDst; let Inst{55-52} = src; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP32; } class JMP_RI_32 : TYPE_ALU_JMP { bits<4> dst; bits<16> BrDst; bits<32> imm; let Inst{51-48} = dst; let Inst{47-32} = BrDst; let Inst{31-0} = imm; let BPFClass = BPF_JMP32; } multiclass J { def _rr : JMP_RR; def _ri : JMP_RI; def _rr_32 : JMP_RR_32; def _ri_32 : JMP_RI_32; } let isBranch = 1, isTerminator = 1, hasDelaySlot=0 in { // cmp+goto instructions defm JEQ : J; defm JUGT : J", BPF_CC_GTU, BPF_CC_GTU_32>; defm JUGE : J=", BPF_CC_GEU, BPF_CC_GEU_32>; defm JNE : J", BPF_CC_GT, BPF_CC_GT_32>; defm JSGE : J=", BPF_CC_GE, BPF_CC_GE_32>; defm JULT : J; defm JULE : J; defm JSLE : J; def _ri : ALU_RI; def _rr_32 : ALU_RR; def _ri_32 : ALU_RI; } let Constraints = "$dst = $src2" in { let isAsCheapAsAMove = 1 in { defm ADD : ALU>=", srl>; defm XOR : ALU>=", sra>; } defm MUL : ALU; } class NEG_RR pattern> : TYPE_ALU_JMP { bits<4> dst; let Inst{51-48} = dst; let BPFClass = Class; } let Constraints = "$dst = $src", isAsCheapAsAMove = 1 in { def NEG_64: NEG_RR; def NEG_32: NEG_RR; } class LD_IMM64 Pseudo, string OpcodeStr> : TYPE_LD_ST { bits<4> dst; bits<64> imm; let Inst{51-48} = dst; let Inst{55-52} = Pseudo; let Inst{47-32} = 0; let Inst{31-0} = imm{31-0}; let BPFClass = BPF_LD; } let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def LD_imm64 : LD_IMM64<0, "=">; def MOV_rr : ALU_RR; def MOV_ri : ALU_RI; def MOV_rr_32 : ALU_RR; def MOV_ri_32 : ALU_RI; } def FI_ri : TYPE_LD_ST { // This is a tentative instruction, and will be replaced // with MOV_rr and ADD_ri in PEI phase let Inst{51-48} = 0; let Inst{55-52} = 2; let Inst{47-32} = 0; let Inst{31-0} = 0; let BPFClass = BPF_LD; } def LD_pseudo : TYPE_LD_ST { bits<4> dst; bits<64> imm; bits<4> pseudo; let Inst{51-48} = dst; let Inst{55-52} = pseudo; let Inst{47-32} = 0; let Inst{31-0} = imm{31-0}; let BPFClass = BPF_LD; } // STORE instructions class STORE Pattern> : TYPE_LD_ST { bits<4> src; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = src; let Inst{47-32} = addr{15-0}; // offset let BPFClass = BPF_STX; } class STOREi64 : STORE; let Predicates = [BPFNoALU32] in { def STW : STOREi64; def STH : STOREi64; def STB : STOREi64; } def STD : STOREi64; // LOAD instructions class LOAD Pattern> : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = dst; let Inst{55-52} = addr{19-16}; let Inst{47-32} = addr{15-0}; let BPFClass = BPF_LDX; } class LOADi64 : LOAD; let isCodeGenOnly = 1 in { def CORE_MEM : TYPE_LD_ST; def CORE_ALU32_MEM : TYPE_LD_ST; let Constraints = "$dst = $src" in { def CORE_SHIFT : ALU_RR; } } let Predicates = [BPFNoALU32] in { def LDW : LOADi64; def LDH : LOADi64; def LDB : LOADi64; } def LDD : LOADi64; class BRANCH Pattern> : TYPE_ALU_JMP { bits<16> BrDst; let Inst{47-32} = BrDst; let BPFClass = BPF_JMP; } class CALL : TYPE_ALU_JMP { bits<32> BrDst; let Inst{31-0} = BrDst; let BPFClass = BPF_JMP; } class CALLX : TYPE_ALU_JMP { bits<32> BrDst; let Inst{31-0} = BrDst; let BPFClass = BPF_JMP; } // Jump always let isBranch = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1 in { def JMP : BRANCH; } // Jump and link let isCall=1, hasDelaySlot=0, Uses = [R11], // Potentially clobbered registers Defs = [R0, R1, R2, R3, R4, R5] in { def JAL : CALL<"call">; def JALX : CALLX<"callx">; } class NOP_I : TYPE_ALU_JMP { // mov r0, r0 == nop let Inst{55-52} = 0; let Inst{51-48} = 0; let BPFClass = BPF_ALU64; } let hasSideEffects = 0 in def NOP : NOP_I<"nop">; class RET : TYPE_ALU_JMP { let Inst{31-0} = 0; let BPFClass = BPF_JMP; } let isReturn = 1, isTerminator = 1, hasDelaySlot=0, isBarrier = 1, isNotDuplicable = 1 in { def RET : RET<"exit">; } // ADJCALLSTACKDOWN/UP pseudo insns let Defs = [R11], Uses = [R11], isCodeGenOnly = 1 in { def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), "#ADJCALLSTACKDOWN $amt1 $amt2", [(BPFcallseq_start timm:$amt1, timm:$amt2)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), "#ADJCALLSTACKUP $amt1 $amt2", [(BPFcallseq_end timm:$amt1, timm:$amt2)]>; } let usesCustomInserter = 1, isCodeGenOnly = 1 in { def Select : Pseudo<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i64:$src, i64:$src2))]>; def Select_Ri : Pseudo<(outs GPR:$dst), (ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i64:$lhs, (i64immSExt32:$rhs), (i64 imm:$imm), i64:$src, i64:$src2))]>; def Select_64_32 : Pseudo<(outs GPR32:$dst), (ins GPR:$lhs, GPR:$rhs, i64imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i64:$lhs, i64:$rhs, (i64 imm:$imm), i32:$src, i32:$src2))]>; def Select_Ri_64_32 : Pseudo<(outs GPR32:$dst), (ins GPR:$lhs, i64imm:$rhs, i64imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i64:$lhs, (i64immSExt32:$rhs), (i64 imm:$imm), i32:$src, i32:$src2))]>; def Select_32 : Pseudo<(outs GPR32:$dst), (ins GPR32:$lhs, GPR32:$rhs, i32imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i32:$lhs, i32:$rhs, (i32 imm:$imm), i32:$src, i32:$src2))]>; def Select_Ri_32 : Pseudo<(outs GPR32:$dst), (ins GPR32:$lhs, i32imm:$rhs, i32imm:$imm, GPR32:$src, GPR32:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i32:$dst, (BPFselectcc i32:$lhs, (i32immSExt32:$rhs), (i32 imm:$imm), i32:$src, i32:$src2))]>; def Select_32_64 : Pseudo<(outs GPR:$dst), (ins GPR32:$lhs, GPR32:$rhs, i32imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i32:$lhs, i32:$rhs, (i32 imm:$imm), i64:$src, i64:$src2))]>; def Select_Ri_32_64 : Pseudo<(outs GPR:$dst), (ins GPR32:$lhs, i32imm:$rhs, i32imm:$imm, GPR:$src, GPR:$src2), "# Select PSEUDO $dst = $lhs $imm $rhs ? $src : $src2", [(set i64:$dst, (BPFselectcc i32:$lhs, (i32immSExt32:$rhs), (i32 imm:$imm), i64:$src, i64:$src2))]>; } // load 64-bit global addr into register def : Pat<(BPFWrapper tglobaladdr:$in), (LD_imm64 tglobaladdr:$in)>; // 0xffffFFFF doesn't fit into simm32, optimize common case def : Pat<(i64 (and (i64 GPR:$src), 0xffffFFFF)), (SRL_ri (SLL_ri (i64 GPR:$src), 32), 32)>; // Calls def : Pat<(BPFcall tglobaladdr:$dst), (JAL tglobaladdr:$dst)>; def : Pat<(BPFcall texternalsym:$dst), (JAL texternalsym:$dst)>; def : Pat<(BPFcall imm:$dst), (JAL imm:$dst)>; def : Pat<(BPFcall GPR:$dst), (JALX GPR:$dst)>; // Loads let Predicates = [BPFNoALU32] in { def : Pat<(i64 (extloadi8 ADDRri:$src)), (i64 (LDB ADDRri:$src))>; def : Pat<(i64 (extloadi16 ADDRri:$src)), (i64 (LDH ADDRri:$src))>; def : Pat<(i64 (extloadi32 ADDRri:$src)), (i64 (LDW ADDRri:$src))>; } // Atomics class XADD : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let BPFClass = BPF_STX; } class XADD32 : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = dst; let Inst{47-32} = addr{15-0}; // offset let BPFClass = BPF_STX; } let Constraints = "$dst = $val" in { let Predicates = [BPFNoALU32] in { def XADDW : XADD; } let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def XADDW32 : XADD32; } def XADDD : XADD; } // bswap16, bswap32, bswap64 class BSWAP SizeOp, string OpcodeStr, BPFSrcType SrcType, list Pattern> : TYPE_ALU_JMP { bits<4> dst; let Inst{51-48} = dst; let Inst{31-0} = SizeOp; let BPFClass = BPF_ALU; } let Constraints = "$dst = $src" in { let Predicates = [BPFIsLittleEndian] in { def BE16 : BSWAP<16, "be16", BPF_TO_BE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 48)))]>; def BE32 : BSWAP<32, "be32", BPF_TO_BE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 32)))]>; def BE64 : BSWAP<64, "be64", BPF_TO_BE, [(set GPR:$dst, (bswap GPR:$src))]>; } let Predicates = [BPFIsBigEndian] in { def LE16 : BSWAP<16, "le16", BPF_TO_LE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 48)))]>; def LE32 : BSWAP<32, "le32", BPF_TO_LE, [(set GPR:$dst, (srl (bswap GPR:$src), (i64 32)))]>; def LE64 : BSWAP<64, "le64", BPF_TO_LE, [(set GPR:$dst, (bswap GPR:$src))]>; } } let Defs = [R0, R1, R2, R3, R4, R5], Uses = [R6], hasSideEffects = 1, hasExtraDefRegAllocReq = 1, hasExtraSrcRegAllocReq = 1, mayLoad = 1 in { class LOAD_ABS : TYPE_LD_ST { bits<32> imm; let Inst{31-0} = imm; let BPFClass = BPF_LD; } class LOAD_IND : TYPE_LD_ST { bits<4> val; let Inst{55-52} = val; let BPFClass = BPF_LD; } } def LD_ABS_B : LOAD_ABS; def LD_ABS_H : LOAD_ABS; def LD_ABS_W : LOAD_ABS; def LD_IND_B : LOAD_IND; def LD_IND_H : LOAD_IND; def LD_IND_W : LOAD_IND; let isCodeGenOnly = 1 in { def MOV_32_64 : ALU_RR; } def : Pat<(i64 (sext GPR32:$src)), (SRA_ri (SLL_ri (MOV_32_64 GPR32:$src), 32), 32)>; def : Pat<(i64 (zext GPR32:$src)), (SRL_ri (SLL_ri (MOV_32_64 GPR32:$src), 32), 32)>; // For i64 -> i32 truncation, use the 32-bit subregister directly. def : Pat<(i32 (trunc GPR:$src)), (i32 (EXTRACT_SUBREG GPR:$src, sub_32))>; // For i32 -> i64 anyext, we don't care about the high bits. def : Pat<(i64 (anyext GPR32:$src)), (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>; class STORE32 Pattern> : TYPE_LD_ST { bits<4> src; bits<20> addr; let Inst{51-48} = addr{19-16}; // base reg let Inst{55-52} = src; let Inst{47-32} = addr{15-0}; // offset let BPFClass = BPF_STX; } class STOREi32 : STORE32; let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def STW32 : STOREi32; def STH32 : STOREi32; def STB32 : STOREi32; } class LOAD32 Pattern> : TYPE_LD_ST { bits<4> dst; bits<20> addr; let Inst{51-48} = dst; let Inst{55-52} = addr{19-16}; let Inst{47-32} = addr{15-0}; let BPFClass = BPF_LDX; } class LOADi32 : LOAD32; let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in { def LDW32 : LOADi32; def LDH32 : LOADi32; def LDB32 : LOADi32; } let Predicates = [BPFHasALU32] in { def : Pat<(truncstorei8 GPR:$src, ADDRri:$dst), (STB32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>; def : Pat<(truncstorei16 GPR:$src, ADDRri:$dst), (STH32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>; def : Pat<(truncstorei32 GPR:$src, ADDRri:$dst), (STW32 (EXTRACT_SUBREG GPR:$src, sub_32), ADDRri:$dst)>; def : Pat<(i32 (extloadi8 ADDRri:$src)), (i32 (LDB32 ADDRri:$src))>; def : Pat<(i32 (extloadi16 ADDRri:$src)), (i32 (LDH32 ADDRri:$src))>; def : Pat<(i64 (zextloadi8 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDB32 ADDRri:$src), sub_32)>; def : Pat<(i64 (zextloadi16 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDH32 ADDRri:$src), sub_32)>; def : Pat<(i64 (zextloadi32 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>; def : Pat<(i64 (extloadi8 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDB32 ADDRri:$src), sub_32)>; def : Pat<(i64 (extloadi16 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDH32 ADDRri:$src), sub_32)>; def : Pat<(i64 (extloadi32 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>; } let usesCustomInserter = 1, isCodeGenOnly = 1 in { def MEMCPY : Pseudo< (outs), (ins GPR:$dst, GPR:$src, i64imm:$len, i64imm:$align, variable_ops), "#memcpy dst: $dst, src: $src, len: $len, align: $align", [(BPFmemcpy GPR:$dst, GPR:$src, imm:$len, imm:$align)]>; }