1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the MachineIRBuidler class.
11 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/TargetInstrInfo.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
26 void MachineIRBuilder::setMF(MachineFunction &MF) {
29 State.MRI = &MF.getRegInfo();
30 State.TII = MF.getSubtarget().getInstrInfo();
31 State.DL = DebugLoc();
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
36 void MachineIRBuilder::setMBB(MachineBasicBlock &MBB) {
39 assert(&getMF() == MBB.getParent() &&
40 "Basic block is in a different function");
43 void MachineIRBuilder::setInstr(MachineInstr &MI) {
44 assert(MI.getParent() && "Instruction is not part of a basic block");
45 setMBB(*MI.getParent());
46 State.II = MI.getIterator();
49 void MachineIRBuilder::setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }
51 void MachineIRBuilder::setInsertPt(MachineBasicBlock &MBB,
52 MachineBasicBlock::iterator II) {
53 assert(MBB.getParent() == &getMF() &&
54 "Basic block is in a different function");
59 void MachineIRBuilder::recordInsertion(MachineInstr *InsertedInstr) const {
61 State.Observer->createdInstr(*InsertedInstr);
64 void MachineIRBuilder::setChangeObserver(GISelChangeObserver &Observer) {
65 State.Observer = &Observer;
68 void MachineIRBuilder::stopObservingChanges() { State.Observer = nullptr; }
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
74 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opcode) {
75 return insertInstr(buildInstrNoInsert(Opcode));
78 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
79 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
83 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
84 getMBB().insert(getInsertPt(), MIB);
90 MachineIRBuilder::buildDirectDbgValue(unsigned Reg, const MDNode *Variable,
92 assert(isa<DILocalVariable>(Variable) && "not a variable");
93 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
96 "Expected inlined-at fields to agree");
97 return insertInstr(BuildMI(getMF(), getDL(),
98 getTII().get(TargetOpcode::DBG_VALUE),
99 /*IsIndirect*/ false, Reg, Variable, Expr));
103 MachineIRBuilder::buildIndirectDbgValue(unsigned Reg, const MDNode *Variable,
104 const MDNode *Expr) {
105 assert(isa<DILocalVariable>(Variable) && "not a variable");
106 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
108 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
109 "Expected inlined-at fields to agree");
110 return insertInstr(BuildMI(getMF(), getDL(),
111 getTII().get(TargetOpcode::DBG_VALUE),
112 /*IsIndirect*/ true, Reg, Variable, Expr));
115 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
116 const MDNode *Variable,
117 const MDNode *Expr) {
118 assert(isa<DILocalVariable>(Variable) && "not a variable");
119 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
121 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
122 "Expected inlined-at fields to agree");
123 return buildInstr(TargetOpcode::DBG_VALUE)
126 .addMetadata(Variable)
130 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
131 const MDNode *Variable,
132 const MDNode *Expr) {
133 assert(isa<DILocalVariable>(Variable) && "not a variable");
134 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
136 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
137 "Expected inlined-at fields to agree");
138 auto MIB = buildInstr(TargetOpcode::DBG_VALUE);
139 if (auto *CI = dyn_cast<ConstantInt>(&C)) {
140 if (CI->getBitWidth() > 64)
143 MIB.addImm(CI->getZExtValue());
144 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
147 // Insert %noreg if we didn't find a usable constant and had to drop it.
151 return MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
154 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
155 assert(isa<DILabel>(Label) && "not a label");
156 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
157 "Expected inlined-at fields to agree");
158 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
160 return MIB.addMetadata(Label);
163 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(unsigned Res, int Idx) {
164 assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
165 return buildInstr(TargetOpcode::G_FRAME_INDEX)
170 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(unsigned Res,
171 const GlobalValue *GV) {
172 assert(getMRI()->getType(Res).isPointer() && "invalid operand type");
173 assert(getMRI()->getType(Res).getAddressSpace() ==
174 GV->getType()->getAddressSpace() &&
175 "address space mismatch");
177 return buildInstr(TargetOpcode::G_GLOBAL_VALUE)
179 .addGlobalAddress(GV);
182 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
184 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
185 assert((Res == Op0 && Res == Op1) && "type mismatch");
188 MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
190 assert(getMRI()->getType(Res).isPointer() &&
191 getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
192 assert(getMRI()->getType(Op1).isScalar() && "invalid offset type");
194 return buildInstr(TargetOpcode::G_GEP)
200 Optional<MachineInstrBuilder>
201 MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
202 const LLT &ValueTy, uint64_t Value) {
203 assert(Res == 0 && "Res is a result argument");
204 assert(ValueTy.isScalar() && "invalid offset type");
211 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
212 unsigned TmpReg = getMRI()->createGenericVirtualRegister(ValueTy);
214 buildConstant(TmpReg, Value);
215 return buildGEP(Res, Op0, TmpReg);
218 MachineInstrBuilder MachineIRBuilder::buildPtrMask(unsigned Res, unsigned Op0,
220 assert(getMRI()->getType(Res).isPointer() &&
221 getMRI()->getType(Res) == getMRI()->getType(Op0) && "type mismatch");
223 return buildInstr(TargetOpcode::G_PTR_MASK)
229 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
230 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
233 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(unsigned Tgt) {
234 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
235 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
240 return buildInstr(TargetOpcode::COPY, Res, Op);
243 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
244 const ConstantInt &Val) {
245 LLT Ty = Res.getLLTTy(*getMRI());
247 assert((Ty.isScalar() || Ty.isPointer()) && "invalid operand type");
249 const ConstantInt *NewVal = &Val;
250 if (Ty.getSizeInBits() != Val.getBitWidth())
251 NewVal = ConstantInt::get(getMF().getFunction().getContext(),
252 Val.getValue().sextOrTrunc(Ty.getSizeInBits()));
254 auto MIB = buildInstr(TargetOpcode::G_CONSTANT);
255 Res.addDefToMIB(*getMRI(), MIB);
260 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
262 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
263 Res.getLLTTy(*getMRI()).getSizeInBits());
264 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
265 return buildConstant(Res, *CI);
268 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
269 const ConstantFP &Val) {
270 assert(Res.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
272 auto MIB = buildInstr(TargetOpcode::G_FCONSTANT);
273 Res.addDefToMIB(*getMRI(), MIB);
278 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
280 LLT DstTy = Res.getLLTTy(*getMRI());
281 auto &Ctx = getMF().getFunction().getContext();
283 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getSizeInBits()));
284 return buildFConstant(Res, *CFP);
287 MachineInstrBuilder MachineIRBuilder::buildBrCond(unsigned Tst,
288 MachineBasicBlock &Dest) {
289 assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
291 return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
294 MachineInstrBuilder MachineIRBuilder::buildLoad(unsigned Res, unsigned Addr,
295 MachineMemOperand &MMO) {
296 return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
299 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
302 MachineMemOperand &MMO) {
303 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
304 assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
306 return buildInstr(Opcode)
309 .addMemOperand(&MMO);
312 MachineInstrBuilder MachineIRBuilder::buildStore(unsigned Val, unsigned Addr,
313 MachineMemOperand &MMO) {
314 assert(getMRI()->getType(Val).isValid() && "invalid operand type");
315 assert(getMRI()->getType(Addr).isPointer() && "invalid operand type");
317 return buildInstr(TargetOpcode::G_STORE)
320 .addMemOperand(&MMO);
323 MachineInstrBuilder MachineIRBuilder::buildUAdde(const DstOp &Res,
324 const DstOp &CarryOut,
327 const SrcOp &CarryIn) {
328 return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
329 {Op0, Op1, CarryIn});
332 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
334 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
337 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
339 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
342 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
344 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
347 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
350 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
351 TargetOpcode::G_SEXT == ExtOpc) &&
352 "Expecting Extending Opc");
353 assert(Res.getLLTTy(*getMRI()).isScalar() ||
354 Res.getLLTTy(*getMRI()).isVector());
355 assert(Res.getLLTTy(*getMRI()).isScalar() ==
356 Op.getLLTTy(*getMRI()).isScalar());
358 unsigned Opcode = TargetOpcode::COPY;
359 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
360 Op.getLLTTy(*getMRI()).getSizeInBits())
362 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
363 Op.getLLTTy(*getMRI()).getSizeInBits())
364 Opcode = TargetOpcode::G_TRUNC;
366 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
368 return buildInstr(Opcode, Res, Op);
371 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
373 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
376 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
378 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
381 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
383 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
386 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
388 LLT SrcTy = Src.getLLTTy(*getMRI());
389 LLT DstTy = Dst.getLLTTy(*getMRI());
391 return buildCopy(Dst, Src);
394 if (SrcTy.isPointer() && DstTy.isScalar())
395 Opcode = TargetOpcode::G_PTRTOINT;
396 else if (DstTy.isPointer() && SrcTy.isScalar())
397 Opcode = TargetOpcode::G_INTTOPTR;
399 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
400 Opcode = TargetOpcode::G_BITCAST;
403 return buildInstr(Opcode, Dst, Src);
406 MachineInstrBuilder MachineIRBuilder::buildExtract(unsigned Res, unsigned Src,
409 assert(getMRI()->getType(Src).isValid() && "invalid operand type");
410 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
411 assert(Index + getMRI()->getType(Res).getSizeInBits() <=
412 getMRI()->getType(Src).getSizeInBits() &&
413 "extracting off end of register");
416 if (getMRI()->getType(Res).getSizeInBits() ==
417 getMRI()->getType(Src).getSizeInBits()) {
418 assert(Index == 0 && "insertion past the end of a register");
419 return buildCast(Res, Src);
422 return buildInstr(TargetOpcode::G_EXTRACT)
428 void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
429 ArrayRef<uint64_t> Indices) {
431 assert(Ops.size() == Indices.size() && "incompatible args");
432 assert(!Ops.empty() && "invalid trivial sequence");
433 assert(std::is_sorted(Indices.begin(), Indices.end()) &&
434 "sequence offsets must be in ascending order");
436 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
438 assert(getMRI()->getType(Op).isValid() && "invalid operand type");
441 LLT ResTy = getMRI()->getType(Res);
442 LLT OpTy = getMRI()->getType(Ops[0]);
443 unsigned OpSize = OpTy.getSizeInBits();
444 bool MaybeMerge = true;
445 for (unsigned i = 0; i < Ops.size(); ++i) {
446 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
452 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
453 buildMerge(Res, Ops);
457 unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
460 for (unsigned i = 0; i < Ops.size(); ++i) {
461 unsigned ResOut = i + 1 == Ops.size()
463 : getMRI()->createGenericVirtualRegister(ResTy);
464 buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
469 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
470 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
473 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
474 ArrayRef<unsigned> Ops) {
475 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
476 // we need some temporary storage for the DstOp objects. Here we use a
477 // sufficiently large SmallVector to not go through the heap.
478 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
479 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
482 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
484 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
485 // we need some temporary storage for the DstOp objects. Here we use a
486 // sufficiently large SmallVector to not go through the heap.
487 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
488 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
491 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
493 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
494 // we need some temporary storage for the DstOp objects. Here we use a
495 // sufficiently large SmallVector to not go through the heap.
496 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
497 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
500 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
501 ArrayRef<unsigned> Ops) {
502 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
503 // we need some temporary storage for the DstOp objects. Here we use a
504 // sufficiently large SmallVector to not go through the heap.
505 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
506 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
510 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
511 ArrayRef<unsigned> Ops) {
512 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
513 // we need some temporary storage for the DstOp objects. Here we use a
514 // sufficiently large SmallVector to not go through the heap.
515 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
516 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
520 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
521 // Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
522 // we need some temporary storage for the DstOp objects. Here we use a
523 // sufficiently large SmallVector to not go through the heap.
524 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
525 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
528 MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
529 unsigned Op, unsigned Index) {
530 assert(Index + getMRI()->getType(Op).getSizeInBits() <=
531 getMRI()->getType(Res).getSizeInBits() &&
532 "insertion past the end of a register");
534 if (getMRI()->getType(Res).getSizeInBits() ==
535 getMRI()->getType(Op).getSizeInBits()) {
536 return buildCast(Res, Op);
539 return buildInstr(TargetOpcode::G_INSERT)
546 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
548 bool HasSideEffects) {
550 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
551 : TargetOpcode::G_INTRINSIC);
554 MIB.addIntrinsicID(ID);
558 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
560 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
563 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
565 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op);
568 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
572 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
575 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
580 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1});
583 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
588 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1});
592 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
593 const SrcOp &Elt, const SrcOp &Idx) {
594 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
598 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
600 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
603 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
604 unsigned OldValRes, unsigned SuccessRes, unsigned Addr, unsigned CmpVal,
605 unsigned NewVal, MachineMemOperand &MMO) {
607 LLT OldValResTy = getMRI()->getType(OldValRes);
608 LLT SuccessResTy = getMRI()->getType(SuccessRes);
609 LLT AddrTy = getMRI()->getType(Addr);
610 LLT CmpValTy = getMRI()->getType(CmpVal);
611 LLT NewValTy = getMRI()->getType(NewVal);
612 assert(OldValResTy.isScalar() && "invalid operand type");
613 assert(SuccessResTy.isScalar() && "invalid operand type");
614 assert(AddrTy.isPointer() && "invalid operand type");
615 assert(CmpValTy.isValid() && "invalid operand type");
616 assert(NewValTy.isValid() && "invalid operand type");
617 assert(OldValResTy == CmpValTy && "type mismatch");
618 assert(OldValResTy == NewValTy && "type mismatch");
621 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
627 .addMemOperand(&MMO);
631 MachineIRBuilder::buildAtomicCmpXchg(unsigned OldValRes, unsigned Addr,
632 unsigned CmpVal, unsigned NewVal,
633 MachineMemOperand &MMO) {
635 LLT OldValResTy = getMRI()->getType(OldValRes);
636 LLT AddrTy = getMRI()->getType(Addr);
637 LLT CmpValTy = getMRI()->getType(CmpVal);
638 LLT NewValTy = getMRI()->getType(NewVal);
639 assert(OldValResTy.isScalar() && "invalid operand type");
640 assert(AddrTy.isPointer() && "invalid operand type");
641 assert(CmpValTy.isValid() && "invalid operand type");
642 assert(NewValTy.isValid() && "invalid operand type");
643 assert(OldValResTy == CmpValTy && "type mismatch");
644 assert(OldValResTy == NewValTy && "type mismatch");
647 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
652 .addMemOperand(&MMO);
655 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(unsigned Opcode,
659 MachineMemOperand &MMO) {
661 LLT OldValResTy = getMRI()->getType(OldValRes);
662 LLT AddrTy = getMRI()->getType(Addr);
663 LLT ValTy = getMRI()->getType(Val);
664 assert(OldValResTy.isScalar() && "invalid operand type");
665 assert(AddrTy.isPointer() && "invalid operand type");
666 assert(ValTy.isValid() && "invalid operand type");
667 assert(OldValResTy == ValTy && "type mismatch");
670 return buildInstr(Opcode)
674 .addMemOperand(&MMO);
678 MachineIRBuilder::buildAtomicRMWXchg(unsigned OldValRes, unsigned Addr,
679 unsigned Val, MachineMemOperand &MMO) {
680 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
684 MachineIRBuilder::buildAtomicRMWAdd(unsigned OldValRes, unsigned Addr,
685 unsigned Val, MachineMemOperand &MMO) {
686 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
690 MachineIRBuilder::buildAtomicRMWSub(unsigned OldValRes, unsigned Addr,
691 unsigned Val, MachineMemOperand &MMO) {
692 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
696 MachineIRBuilder::buildAtomicRMWAnd(unsigned OldValRes, unsigned Addr,
697 unsigned Val, MachineMemOperand &MMO) {
698 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
702 MachineIRBuilder::buildAtomicRMWNand(unsigned OldValRes, unsigned Addr,
703 unsigned Val, MachineMemOperand &MMO) {
704 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
707 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(unsigned OldValRes,
710 MachineMemOperand &MMO) {
711 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
715 MachineIRBuilder::buildAtomicRMWXor(unsigned OldValRes, unsigned Addr,
716 unsigned Val, MachineMemOperand &MMO) {
717 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
721 MachineIRBuilder::buildAtomicRMWMax(unsigned OldValRes, unsigned Addr,
722 unsigned Val, MachineMemOperand &MMO) {
723 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
727 MachineIRBuilder::buildAtomicRMWMin(unsigned OldValRes, unsigned Addr,
728 unsigned Val, MachineMemOperand &MMO) {
729 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
733 MachineIRBuilder::buildAtomicRMWUmax(unsigned OldValRes, unsigned Addr,
734 unsigned Val, MachineMemOperand &MMO) {
735 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
739 MachineIRBuilder::buildAtomicRMWUmin(unsigned OldValRes, unsigned Addr,
740 unsigned Val, MachineMemOperand &MMO) {
741 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
746 MachineIRBuilder::buildBlockAddress(unsigned Res, const BlockAddress *BA) {
748 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
751 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
754 void MachineIRBuilder::validateTruncExt(const LLT &DstTy, const LLT &SrcTy,
757 if (DstTy.isVector()) {
758 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
759 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
760 "different number of elements in a trunc/ext");
762 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
765 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
766 "invalid narrowing extend");
768 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
769 "invalid widening trunc");
773 void MachineIRBuilder::validateSelectOp(const LLT &ResTy, const LLT &TstTy,
774 const LLT &Op0Ty, const LLT &Op1Ty) {
776 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
777 "invalid operand type");
778 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
779 if (ResTy.isScalar() || ResTy.isPointer())
780 assert(TstTy.isScalar() && "type mismatch");
782 assert((TstTy.isScalar() ||
784 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
789 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
790 ArrayRef<DstOp> DstOps,
791 ArrayRef<SrcOp> SrcOps,
792 Optional<unsigned> Flags) {
796 case TargetOpcode::G_SELECT: {
797 assert(DstOps.size() == 1 && "Invalid select");
798 assert(SrcOps.size() == 3 && "Invalid select");
800 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
801 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
804 case TargetOpcode::G_ADD:
805 case TargetOpcode::G_AND:
806 case TargetOpcode::G_ASHR:
807 case TargetOpcode::G_LSHR:
808 case TargetOpcode::G_MUL:
809 case TargetOpcode::G_OR:
810 case TargetOpcode::G_SHL:
811 case TargetOpcode::G_SUB:
812 case TargetOpcode::G_XOR:
813 case TargetOpcode::G_UDIV:
814 case TargetOpcode::G_SDIV:
815 case TargetOpcode::G_UREM:
816 case TargetOpcode::G_SREM: {
817 // All these are binary ops.
818 assert(DstOps.size() == 1 && "Invalid Dst");
819 assert(SrcOps.size() == 2 && "Invalid Srcs");
820 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
821 SrcOps[0].getLLTTy(*getMRI()),
822 SrcOps[1].getLLTTy(*getMRI()));
824 case TargetOpcode::G_SEXT:
825 case TargetOpcode::G_ZEXT:
826 case TargetOpcode::G_ANYEXT:
827 assert(DstOps.size() == 1 && "Invalid Dst");
828 assert(SrcOps.size() == 1 && "Invalid Srcs");
829 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
830 SrcOps[0].getLLTTy(*getMRI()), true);
832 case TargetOpcode::G_TRUNC:
833 case TargetOpcode::G_FPTRUNC:
834 assert(DstOps.size() == 1 && "Invalid Dst");
835 assert(SrcOps.size() == 1 && "Invalid Srcs");
836 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
837 SrcOps[0].getLLTTy(*getMRI()), false);
840 case TargetOpcode::COPY:
841 assert(DstOps.size() == 1 && "Invalid Dst");
842 assert(SrcOps.size() == 1 && "Invalid Srcs");
843 assert(DstOps[0].getLLTTy(*getMRI()) == LLT() ||
844 SrcOps[0].getLLTTy(*getMRI()) == LLT() ||
845 DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI()));
847 case TargetOpcode::G_FCMP:
848 case TargetOpcode::G_ICMP: {
849 assert(DstOps.size() == 1 && "Invalid Dst Operands");
850 assert(SrcOps.size() == 3 && "Invalid Src Operands");
851 // For F/ICMP, the first src operand is the predicate, followed by
852 // the two comparands.
853 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
854 "Expecting predicate");
855 assert([&]() -> bool {
856 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
857 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
858 : CmpInst::isFPPredicate(Pred);
859 }() && "Invalid predicate");
860 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
862 assert([&]() -> bool {
863 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
864 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
865 if (Op0Ty.isScalar() || Op0Ty.isPointer())
866 return DstTy.isScalar();
868 return DstTy.isVector() &&
869 DstTy.getNumElements() == Op0Ty.getNumElements();
870 }() && "Type Mismatch");
873 case TargetOpcode::G_UNMERGE_VALUES: {
874 assert(!DstOps.empty() && "Invalid trivial sequence");
875 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
876 assert(std::all_of(DstOps.begin(), DstOps.end(),
877 [&, this](const DstOp &Op) {
878 return Op.getLLTTy(*getMRI()) ==
879 DstOps[0].getLLTTy(*getMRI());
881 "type mismatch in output list");
882 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
883 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
884 "input operands do not cover output register");
887 case TargetOpcode::G_MERGE_VALUES: {
888 assert(!SrcOps.empty() && "invalid trivial sequence");
889 assert(DstOps.size() == 1 && "Invalid Dst");
890 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
891 [&, this](const SrcOp &Op) {
892 return Op.getLLTTy(*getMRI()) ==
893 SrcOps[0].getLLTTy(*getMRI());
895 "type mismatch in input list");
896 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
897 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
898 "input operands do not cover output register");
899 if (SrcOps.size() == 1)
900 return buildCast(DstOps[0], SrcOps[0]);
901 if (DstOps[0].getLLTTy(*getMRI()).isVector())
902 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
905 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
906 assert(DstOps.size() == 1 && "Invalid Dst size");
907 assert(SrcOps.size() == 2 && "Invalid Src size");
908 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
909 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
910 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
911 "Invalid operand type");
912 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
913 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
914 DstOps[0].getLLTTy(*getMRI()) &&
918 case TargetOpcode::G_INSERT_VECTOR_ELT: {
919 assert(DstOps.size() == 1 && "Invalid dst size");
920 assert(SrcOps.size() == 3 && "Invalid src size");
921 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
922 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
923 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
924 SrcOps[1].getLLTTy(*getMRI()) &&
926 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
927 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
928 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
932 case TargetOpcode::G_BUILD_VECTOR: {
933 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
934 "Must have at least 2 operands");
935 assert(DstOps.size() == 1 && "Invalid DstOps");
936 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
937 "Res type must be a vector");
938 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
939 [&, this](const SrcOp &Op) {
940 return Op.getLLTTy(*getMRI()) ==
941 SrcOps[0].getLLTTy(*getMRI());
943 "type mismatch in input list");
944 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
945 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
946 "input scalars do not exactly cover the outpur vector register");
949 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
950 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
951 "Must have at least 2 operands");
952 assert(DstOps.size() == 1 && "Invalid DstOps");
953 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
954 "Res type must be a vector");
955 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
956 [&, this](const SrcOp &Op) {
957 return Op.getLLTTy(*getMRI()) ==
958 SrcOps[0].getLLTTy(*getMRI());
960 "type mismatch in input list");
961 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
962 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
963 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
966 case TargetOpcode::G_CONCAT_VECTORS: {
967 assert(DstOps.size() == 1 && "Invalid DstOps");
968 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
969 "Must have at least 2 operands");
970 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
971 [&, this](const SrcOp &Op) {
972 return (Op.getLLTTy(*getMRI()).isVector() &&
973 Op.getLLTTy(*getMRI()) ==
974 SrcOps[0].getLLTTy(*getMRI()));
976 "type mismatch in input list");
977 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
978 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
979 "input vectors do not exactly cover the outpur vector register");
982 case TargetOpcode::G_UADDE: {
983 assert(DstOps.size() == 2 && "Invalid no of dst operands");
984 assert(SrcOps.size() == 3 && "Invalid no of src operands");
985 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
986 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
987 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
989 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
990 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
996 auto MIB = buildInstr(Opc);
997 for (const DstOp &Op : DstOps)
998 Op.addDefToMIB(*getMRI(), MIB);
999 for (const SrcOp &Op : SrcOps)
1000 Op.addSrcToMIB(MIB);
1002 MIB->setFlags(*Flags);