1 //===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file This file implements the LegalizerHelper class to legalize
11 /// individual instructions and the LegalizeMachineIR wrapper pass for the
12 /// primary legalization.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
26 #define DEBUG_TYPE "legalizer"
29 using namespace LegalizeActions;
31 LegalizerHelper::LegalizerHelper(MachineFunction &MF)
32 : MRI(MF.getRegInfo()), LI(*MF.getSubtarget().getLegalizerInfo()) {
36 LegalizerHelper::LegalizeResult
37 LegalizerHelper::legalizeInstrStep(MachineInstr &MI) {
38 LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs()));
40 auto Step = LI.getAction(MI, MRI);
41 switch (Step.Action) {
43 LLVM_DEBUG(dbgs() << ".. Already legal\n");
46 LLVM_DEBUG(dbgs() << ".. Convert to libcall\n");
49 LLVM_DEBUG(dbgs() << ".. Narrow scalar\n");
50 return narrowScalar(MI, Step.TypeIdx, Step.NewType);
52 LLVM_DEBUG(dbgs() << ".. Widen scalar\n");
53 return widenScalar(MI, Step.TypeIdx, Step.NewType);
55 LLVM_DEBUG(dbgs() << ".. Lower\n");
56 return lower(MI, Step.TypeIdx, Step.NewType);
58 LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n");
59 return fewerElementsVector(MI, Step.TypeIdx, Step.NewType);
61 LLVM_DEBUG(dbgs() << ".. Custom legalization\n");
62 return LI.legalizeCustom(MI, MRI, MIRBuilder) ? Legalized
65 LLVM_DEBUG(dbgs() << ".. Unable to legalize\n");
66 return UnableToLegalize;
70 void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
71 SmallVectorImpl<unsigned> &VRegs) {
72 for (int i = 0; i < NumParts; ++i)
73 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
74 MIRBuilder.buildUnmerge(VRegs, Reg);
77 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
79 case TargetOpcode::G_SDIV:
80 assert(Size == 32 && "Unsupported size");
81 return RTLIB::SDIV_I32;
82 case TargetOpcode::G_UDIV:
83 assert(Size == 32 && "Unsupported size");
84 return RTLIB::UDIV_I32;
85 case TargetOpcode::G_SREM:
86 assert(Size == 32 && "Unsupported size");
87 return RTLIB::SREM_I32;
88 case TargetOpcode::G_UREM:
89 assert(Size == 32 && "Unsupported size");
90 return RTLIB::UREM_I32;
91 case TargetOpcode::G_FADD:
92 assert((Size == 32 || Size == 64) && "Unsupported size");
93 return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
94 case TargetOpcode::G_FSUB:
95 assert((Size == 32 || Size == 64) && "Unsupported size");
96 return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32;
97 case TargetOpcode::G_FMUL:
98 assert((Size == 32 || Size == 64) && "Unsupported size");
99 return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32;
100 case TargetOpcode::G_FDIV:
101 assert((Size == 32 || Size == 64) && "Unsupported size");
102 return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32;
103 case TargetOpcode::G_FREM:
104 return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32;
105 case TargetOpcode::G_FPOW:
106 return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32;
107 case TargetOpcode::G_FMA:
108 assert((Size == 32 || Size == 64) && "Unsupported size");
109 return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32;
111 llvm_unreachable("Unknown libcall function");
114 LegalizerHelper::LegalizeResult
115 llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
116 const CallLowering::ArgInfo &Result,
117 ArrayRef<CallLowering::ArgInfo> Args) {
118 auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
119 auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
120 const char *Name = TLI.getLibcallName(Libcall);
122 MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
123 if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall),
124 MachineOperand::CreateES(Name), Result, Args))
125 return LegalizerHelper::UnableToLegalize;
127 return LegalizerHelper::Legalized;
130 // Useful for libcalls where all operands have the same type.
131 static LegalizerHelper::LegalizeResult
132 simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
134 auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
136 SmallVector<CallLowering::ArgInfo, 3> Args;
137 for (unsigned i = 1; i < MI.getNumOperands(); i++)
138 Args.push_back({MI.getOperand(i).getReg(), OpType});
139 return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType},
143 static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType,
145 auto ToMVT = MVT::getVT(ToType);
146 auto FromMVT = MVT::getVT(FromType);
149 case TargetOpcode::G_FPEXT:
150 return RTLIB::getFPEXT(FromMVT, ToMVT);
151 case TargetOpcode::G_FPTRUNC:
152 return RTLIB::getFPROUND(FromMVT, ToMVT);
153 case TargetOpcode::G_FPTOSI:
154 return RTLIB::getFPTOSINT(FromMVT, ToMVT);
155 case TargetOpcode::G_FPTOUI:
156 return RTLIB::getFPTOUINT(FromMVT, ToMVT);
157 case TargetOpcode::G_SITOFP:
158 return RTLIB::getSINTTOFP(FromMVT, ToMVT);
159 case TargetOpcode::G_UITOFP:
160 return RTLIB::getUINTTOFP(FromMVT, ToMVT);
162 llvm_unreachable("Unsupported libcall function");
165 static LegalizerHelper::LegalizeResult
166 conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType,
168 RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType);
169 return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType},
170 {{MI.getOperand(1).getReg(), FromType}});
173 LegalizerHelper::LegalizeResult
174 LegalizerHelper::libcall(MachineInstr &MI) {
175 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
176 unsigned Size = LLTy.getSizeInBits();
177 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
179 MIRBuilder.setInstr(MI);
181 switch (MI.getOpcode()) {
183 return UnableToLegalize;
184 case TargetOpcode::G_SDIV:
185 case TargetOpcode::G_UDIV:
186 case TargetOpcode::G_SREM:
187 case TargetOpcode::G_UREM: {
188 Type *HLTy = Type::getInt32Ty(Ctx);
189 auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
190 if (Status != Legalized)
194 case TargetOpcode::G_FADD:
195 case TargetOpcode::G_FSUB:
196 case TargetOpcode::G_FMUL:
197 case TargetOpcode::G_FDIV:
198 case TargetOpcode::G_FMA:
199 case TargetOpcode::G_FPOW:
200 case TargetOpcode::G_FREM: {
201 Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
202 auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy);
203 if (Status != Legalized)
207 case TargetOpcode::G_FPEXT: {
208 // FIXME: Support other floating point types (half, fp128 etc)
209 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
210 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
211 if (ToSize != 64 || FromSize != 32)
212 return UnableToLegalize;
213 LegalizeResult Status = conversionLibcall(
214 MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx));
215 if (Status != Legalized)
219 case TargetOpcode::G_FPTRUNC: {
220 // FIXME: Support other floating point types (half, fp128 etc)
221 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
222 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
223 if (ToSize != 32 || FromSize != 64)
224 return UnableToLegalize;
225 LegalizeResult Status = conversionLibcall(
226 MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx));
227 if (Status != Legalized)
231 case TargetOpcode::G_FPTOSI:
232 case TargetOpcode::G_FPTOUI: {
233 // FIXME: Support other types
234 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
235 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
236 if (ToSize != 32 || (FromSize != 32 && FromSize != 64))
237 return UnableToLegalize;
238 LegalizeResult Status = conversionLibcall(
239 MI, MIRBuilder, Type::getInt32Ty(Ctx),
240 FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx));
241 if (Status != Legalized)
245 case TargetOpcode::G_SITOFP:
246 case TargetOpcode::G_UITOFP: {
247 // FIXME: Support other types
248 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
249 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
250 if (FromSize != 32 || (ToSize != 32 && ToSize != 64))
251 return UnableToLegalize;
252 LegalizeResult Status = conversionLibcall(
254 ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx),
255 Type::getInt32Ty(Ctx));
256 if (Status != Legalized)
262 MI.eraseFromParent();
266 LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
269 // FIXME: Don't know how to handle secondary types yet.
270 if (TypeIdx != 0 && MI.getOpcode() != TargetOpcode::G_EXTRACT)
271 return UnableToLegalize;
273 MIRBuilder.setInstr(MI);
275 uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
276 uint64_t NarrowSize = NarrowTy.getSizeInBits();
278 switch (MI.getOpcode()) {
280 return UnableToLegalize;
281 case TargetOpcode::G_IMPLICIT_DEF: {
282 // FIXME: add support for when SizeOp0 isn't an exact multiple of
284 if (SizeOp0 % NarrowSize != 0)
285 return UnableToLegalize;
286 int NumParts = SizeOp0 / NarrowSize;
288 SmallVector<unsigned, 2> DstRegs;
289 for (int i = 0; i < NumParts; ++i)
291 MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
292 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
293 MI.eraseFromParent();
296 case TargetOpcode::G_ADD: {
297 // FIXME: add support for when SizeOp0 isn't an exact multiple of
299 if (SizeOp0 % NarrowSize != 0)
300 return UnableToLegalize;
301 // Expand in terms of carry-setting/consuming G_ADDE instructions.
302 int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
304 SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
305 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
306 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
308 unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
309 MIRBuilder.buildConstant(CarryIn, 0);
311 for (int i = 0; i < NumParts; ++i) {
312 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
313 unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
315 MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
316 Src2Regs[i], CarryIn);
318 DstRegs.push_back(DstReg);
321 unsigned DstReg = MI.getOperand(0).getReg();
322 MIRBuilder.buildMerge(DstReg, DstRegs);
323 MI.eraseFromParent();
326 case TargetOpcode::G_EXTRACT: {
328 return UnableToLegalize;
330 int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
331 // FIXME: add support for when SizeOp1 isn't an exact multiple of
333 if (SizeOp1 % NarrowSize != 0)
334 return UnableToLegalize;
335 int NumParts = SizeOp1 / NarrowSize;
337 SmallVector<unsigned, 2> SrcRegs, DstRegs;
338 SmallVector<uint64_t, 2> Indexes;
339 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
341 unsigned OpReg = MI.getOperand(0).getReg();
342 uint64_t OpStart = MI.getOperand(2).getImm();
343 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
344 for (int i = 0; i < NumParts; ++i) {
345 unsigned SrcStart = i * NarrowSize;
347 if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) {
348 // No part of the extract uses this subregister, ignore it.
350 } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
351 // The entire subregister is extracted, forward the value.
352 DstRegs.push_back(SrcRegs[i]);
356 // OpSegStart is where this destination segment would start in OpReg if it
357 // extended infinitely in both directions.
358 int64_t ExtractOffset;
360 if (OpStart < SrcStart) {
362 SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart);
364 ExtractOffset = OpStart - SrcStart;
365 SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize);
368 unsigned SegReg = SrcRegs[i];
369 if (ExtractOffset != 0 || SegSize != NarrowSize) {
370 // A genuine extract is needed.
371 SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
372 MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset);
375 DstRegs.push_back(SegReg);
378 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
379 MI.eraseFromParent();
382 case TargetOpcode::G_INSERT: {
383 // FIXME: add support for when SizeOp0 isn't an exact multiple of
385 if (SizeOp0 % NarrowSize != 0)
386 return UnableToLegalize;
388 int NumParts = SizeOp0 / NarrowSize;
390 SmallVector<unsigned, 2> SrcRegs, DstRegs;
391 SmallVector<uint64_t, 2> Indexes;
392 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
394 unsigned OpReg = MI.getOperand(2).getReg();
395 uint64_t OpStart = MI.getOperand(3).getImm();
396 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
397 for (int i = 0; i < NumParts; ++i) {
398 unsigned DstStart = i * NarrowSize;
400 if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) {
401 // No part of the insert affects this subregister, forward the original.
402 DstRegs.push_back(SrcRegs[i]);
404 } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) {
405 // The entire subregister is defined by this insert, forward the new
407 DstRegs.push_back(OpReg);
411 // OpSegStart is where this destination segment would start in OpReg if it
412 // extended infinitely in both directions.
413 int64_t ExtractOffset, InsertOffset;
415 if (OpStart < DstStart) {
417 ExtractOffset = DstStart - OpStart;
418 SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart);
420 InsertOffset = OpStart - DstStart;
423 std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart);
426 unsigned SegReg = OpReg;
427 if (ExtractOffset != 0 || SegSize != OpSize) {
428 // A genuine extract is needed.
429 SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
430 MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset);
433 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
434 MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset);
435 DstRegs.push_back(DstReg);
438 assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered");
439 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs);
440 MI.eraseFromParent();
443 case TargetOpcode::G_LOAD: {
444 // FIXME: add support for when SizeOp0 isn't an exact multiple of
446 if (SizeOp0 % NarrowSize != 0)
447 return UnableToLegalize;
449 const auto &MMO = **MI.memoperands_begin();
450 // This implementation doesn't work for atomics. Give up instead of doing
451 // something invalid.
452 if (MMO.getOrdering() != AtomicOrdering::NotAtomic ||
453 MMO.getFailureOrdering() != AtomicOrdering::NotAtomic)
454 return UnableToLegalize;
456 int NumParts = SizeOp0 / NarrowSize;
457 LLT OffsetTy = LLT::scalar(
458 MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
460 SmallVector<unsigned, 2> DstRegs;
461 for (int i = 0; i < NumParts; ++i) {
462 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
464 unsigned Adjustment = i * NarrowSize / 8;
466 MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand(
467 MMO.getPointerInfo().getWithOffset(Adjustment), MMO.getFlags(),
468 NarrowSize / 8, i == 0 ? MMO.getAlignment() : NarrowSize / 8,
469 MMO.getAAInfo(), MMO.getRanges(), MMO.getSyncScopeID(),
470 MMO.getOrdering(), MMO.getFailureOrdering());
472 MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy,
475 MIRBuilder.buildLoad(DstReg, SrcReg, *SplitMMO);
477 DstRegs.push_back(DstReg);
479 unsigned DstReg = MI.getOperand(0).getReg();
480 MIRBuilder.buildMerge(DstReg, DstRegs);
481 MI.eraseFromParent();
484 case TargetOpcode::G_STORE: {
485 // FIXME: add support for when SizeOp0 isn't an exact multiple of
487 if (SizeOp0 % NarrowSize != 0)
488 return UnableToLegalize;
490 const auto &MMO = **MI.memoperands_begin();
491 // This implementation doesn't work for atomics. Give up instead of doing
492 // something invalid.
493 if (MMO.getOrdering() != AtomicOrdering::NotAtomic ||
494 MMO.getFailureOrdering() != AtomicOrdering::NotAtomic)
495 return UnableToLegalize;
497 int NumParts = SizeOp0 / NarrowSize;
498 LLT OffsetTy = LLT::scalar(
499 MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
501 SmallVector<unsigned, 2> SrcRegs;
502 extractParts(MI.getOperand(0).getReg(), NarrowTy, NumParts, SrcRegs);
504 for (int i = 0; i < NumParts; ++i) {
506 unsigned Adjustment = i * NarrowSize / 8;
508 MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand(
509 MMO.getPointerInfo().getWithOffset(Adjustment), MMO.getFlags(),
510 NarrowSize / 8, i == 0 ? MMO.getAlignment() : NarrowSize / 8,
511 MMO.getAAInfo(), MMO.getRanges(), MMO.getSyncScopeID(),
512 MMO.getOrdering(), MMO.getFailureOrdering());
514 MIRBuilder.materializeGEP(DstReg, MI.getOperand(1).getReg(), OffsetTy,
517 MIRBuilder.buildStore(SrcRegs[i], DstReg, *SplitMMO);
519 MI.eraseFromParent();
522 case TargetOpcode::G_CONSTANT: {
523 // FIXME: add support for when SizeOp0 isn't an exact multiple of
525 if (SizeOp0 % NarrowSize != 0)
526 return UnableToLegalize;
527 int NumParts = SizeOp0 / NarrowSize;
528 const APInt &Cst = MI.getOperand(1).getCImm()->getValue();
529 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
531 SmallVector<unsigned, 2> DstRegs;
532 for (int i = 0; i < NumParts; ++i) {
533 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
535 ConstantInt::get(Ctx, Cst.lshr(NarrowSize * i).trunc(NarrowSize));
536 MIRBuilder.buildConstant(DstReg, *CI);
537 DstRegs.push_back(DstReg);
539 unsigned DstReg = MI.getOperand(0).getReg();
540 MIRBuilder.buildMerge(DstReg, DstRegs);
541 MI.eraseFromParent();
544 case TargetOpcode::G_OR: {
545 // Legalize bitwise operation:
546 // A = BinOp<Ty> B, C
548 // B1, ..., BN = G_UNMERGE_VALUES B
549 // C1, ..., CN = G_UNMERGE_VALUES C
550 // A1 = BinOp<Ty/N> B1, C2
552 // AN = BinOp<Ty/N> BN, CN
553 // A = G_MERGE_VALUES A1, ..., AN
555 // FIXME: add support for when SizeOp0 isn't an exact multiple of
557 if (SizeOp0 % NarrowSize != 0)
558 return UnableToLegalize;
559 int NumParts = SizeOp0 / NarrowSize;
561 // List the registers where the destination will be scattered.
562 SmallVector<unsigned, 2> DstRegs;
563 // List the registers where the first argument will be split.
564 SmallVector<unsigned, 2> SrcsReg1;
565 // List the registers where the second argument will be split.
566 SmallVector<unsigned, 2> SrcsReg2;
567 // Create all the temporary registers.
568 for (int i = 0; i < NumParts; ++i) {
569 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
570 unsigned SrcReg1 = MRI.createGenericVirtualRegister(NarrowTy);
571 unsigned SrcReg2 = MRI.createGenericVirtualRegister(NarrowTy);
573 DstRegs.push_back(DstReg);
574 SrcsReg1.push_back(SrcReg1);
575 SrcsReg2.push_back(SrcReg2);
577 // Explode the big arguments into smaller chunks.
578 MIRBuilder.buildUnmerge(SrcsReg1, MI.getOperand(1).getReg());
579 MIRBuilder.buildUnmerge(SrcsReg2, MI.getOperand(2).getReg());
581 // Do the operation on each small part.
582 for (int i = 0; i < NumParts; ++i)
583 MIRBuilder.buildOr(DstRegs[i], SrcsReg1[i], SrcsReg2[i]);
585 // Gather the destination registers into the final destination.
586 unsigned DstReg = MI.getOperand(0).getReg();
587 MIRBuilder.buildMerge(DstReg, DstRegs);
588 MI.eraseFromParent();
594 void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
595 unsigned OpIdx, unsigned ExtOpcode) {
596 MachineOperand &MO = MI.getOperand(OpIdx);
597 auto ExtB = MIRBuilder.buildInstr(ExtOpcode, WideTy, MO.getReg());
598 MO.setReg(ExtB->getOperand(0).getReg());
601 void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
602 unsigned OpIdx, unsigned TruncOpcode) {
603 MachineOperand &MO = MI.getOperand(OpIdx);
604 unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
605 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
606 MIRBuilder.buildInstr(TruncOpcode, MO.getReg(), DstExt);
610 LegalizerHelper::LegalizeResult
611 LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
612 MIRBuilder.setInstr(MI);
614 switch (MI.getOpcode()) {
616 return UnableToLegalize;
618 case TargetOpcode::G_ADD:
619 case TargetOpcode::G_AND:
620 case TargetOpcode::G_MUL:
621 case TargetOpcode::G_OR:
622 case TargetOpcode::G_XOR:
623 case TargetOpcode::G_SUB:
624 // Perform operation at larger width (any extension is fine here, high bits
625 // don't affect the result) and then truncate the result back to the
627 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
628 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
629 widenScalarDst(MI, WideTy);
630 MIRBuilder.recordInsertion(&MI);
633 case TargetOpcode::G_SHL:
634 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
635 // The "number of bits to shift" operand must preserve its value as an
637 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
638 widenScalarDst(MI, WideTy);
639 MIRBuilder.recordInsertion(&MI);
642 case TargetOpcode::G_SDIV:
643 case TargetOpcode::G_SREM:
644 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
645 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
646 widenScalarDst(MI, WideTy);
647 MIRBuilder.recordInsertion(&MI);
650 case TargetOpcode::G_ASHR:
651 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
652 // The "number of bits to shift" operand must preserve its value as an
654 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
655 widenScalarDst(MI, WideTy);
656 MIRBuilder.recordInsertion(&MI);
659 case TargetOpcode::G_UDIV:
660 case TargetOpcode::G_UREM:
661 case TargetOpcode::G_LSHR:
662 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
663 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
664 widenScalarDst(MI, WideTy);
665 MIRBuilder.recordInsertion(&MI);
668 case TargetOpcode::G_SELECT:
670 return UnableToLegalize;
671 // Perform operation at larger width (any extension is fine here, high bits
672 // don't affect the result) and then truncate the result back to the
674 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
675 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
676 widenScalarDst(MI, WideTy);
677 MIRBuilder.recordInsertion(&MI);
680 case TargetOpcode::G_FPTOSI:
681 case TargetOpcode::G_FPTOUI:
683 return UnableToLegalize;
684 widenScalarDst(MI, WideTy);
685 MIRBuilder.recordInsertion(&MI);
688 case TargetOpcode::G_SITOFP:
690 return UnableToLegalize;
691 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
692 MIRBuilder.recordInsertion(&MI);
695 case TargetOpcode::G_UITOFP:
697 return UnableToLegalize;
698 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
699 MIRBuilder.recordInsertion(&MI);
702 case TargetOpcode::G_INSERT:
704 return UnableToLegalize;
705 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
706 widenScalarDst(MI, WideTy);
707 MIRBuilder.recordInsertion(&MI);
710 case TargetOpcode::G_LOAD:
711 // For some types like i24, we might try to widen to i32. To properly handle
712 // this we should be using a dedicated extending load, until then avoid
713 // trying to legalize.
714 if (alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) !=
715 WideTy.getSizeInBits())
716 return UnableToLegalize;
718 case TargetOpcode::G_SEXTLOAD:
719 case TargetOpcode::G_ZEXTLOAD:
720 widenScalarDst(MI, WideTy);
721 MIRBuilder.recordInsertion(&MI);
724 case TargetOpcode::G_STORE: {
725 if (MRI.getType(MI.getOperand(0).getReg()) != LLT::scalar(1) ||
726 WideTy != LLT::scalar(8))
727 return UnableToLegalize;
729 widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ZEXT);
730 MIRBuilder.recordInsertion(&MI);
733 case TargetOpcode::G_CONSTANT: {
734 MachineOperand &SrcMO = MI.getOperand(1);
735 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
736 const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits());
737 SrcMO.setCImm(ConstantInt::get(Ctx, Val));
739 widenScalarDst(MI, WideTy);
740 MIRBuilder.recordInsertion(&MI);
743 case TargetOpcode::G_FCONSTANT: {
744 MachineOperand &SrcMO = MI.getOperand(1);
745 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
746 APFloat Val = SrcMO.getFPImm()->getValueAPF();
748 switch (WideTy.getSizeInBits()) {
750 Val.convert(APFloat::IEEEsingle(), APFloat::rmTowardZero, &LosesInfo);
753 Val.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &LosesInfo);
756 llvm_unreachable("Unhandled fp widen type");
758 SrcMO.setFPImm(ConstantFP::get(Ctx, Val));
760 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
761 MIRBuilder.recordInsertion(&MI);
764 case TargetOpcode::G_BRCOND:
765 widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
766 MIRBuilder.recordInsertion(&MI);
769 case TargetOpcode::G_FCMP:
771 widenScalarDst(MI, WideTy);
773 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
774 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
776 MIRBuilder.recordInsertion(&MI);
779 case TargetOpcode::G_ICMP:
781 widenScalarDst(MI, WideTy);
783 unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
784 MI.getOperand(1).getPredicate()))
785 ? TargetOpcode::G_SEXT
786 : TargetOpcode::G_ZEXT;
787 widenScalarSrc(MI, WideTy, 2, ExtOpcode);
788 widenScalarSrc(MI, WideTy, 3, ExtOpcode);
790 MIRBuilder.recordInsertion(&MI);
793 case TargetOpcode::G_GEP:
794 assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
795 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
796 MIRBuilder.recordInsertion(&MI);
799 case TargetOpcode::G_PHI: {
800 assert(TypeIdx == 0 && "Expecting only Idx 0");
802 for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
803 MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
804 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
805 widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
808 MachineBasicBlock &MBB = *MI.getParent();
809 MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
810 widenScalarDst(MI, WideTy);
811 MIRBuilder.recordInsertion(&MI);
817 LegalizerHelper::LegalizeResult
818 LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
819 using namespace TargetOpcode;
820 MIRBuilder.setInstr(MI);
822 switch(MI.getOpcode()) {
824 return UnableToLegalize;
825 case TargetOpcode::G_SREM:
826 case TargetOpcode::G_UREM: {
827 unsigned QuotReg = MRI.createGenericVirtualRegister(Ty);
828 MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
830 .addUse(MI.getOperand(1).getReg())
831 .addUse(MI.getOperand(2).getReg());
833 unsigned ProdReg = MRI.createGenericVirtualRegister(Ty);
834 MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
835 MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
837 MI.eraseFromParent();
840 case TargetOpcode::G_SMULO:
841 case TargetOpcode::G_UMULO: {
842 // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the
844 unsigned Res = MI.getOperand(0).getReg();
845 unsigned Overflow = MI.getOperand(1).getReg();
846 unsigned LHS = MI.getOperand(2).getReg();
847 unsigned RHS = MI.getOperand(3).getReg();
849 MIRBuilder.buildMul(Res, LHS, RHS);
851 unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO
852 ? TargetOpcode::G_SMULH
853 : TargetOpcode::G_UMULH;
855 unsigned HiPart = MRI.createGenericVirtualRegister(Ty);
856 MIRBuilder.buildInstr(Opcode)
861 unsigned Zero = MRI.createGenericVirtualRegister(Ty);
862 MIRBuilder.buildConstant(Zero, 0);
864 // For *signed* multiply, overflow is detected by checking:
865 // (hi != (lo >> bitwidth-1))
866 if (Opcode == TargetOpcode::G_SMULH) {
867 unsigned Shifted = MRI.createGenericVirtualRegister(Ty);
868 unsigned ShiftAmt = MRI.createGenericVirtualRegister(Ty);
869 MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
870 MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
874 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted);
876 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero);
878 MI.eraseFromParent();
881 case TargetOpcode::G_FNEG: {
882 // TODO: Handle vector types once we are able to
885 return UnableToLegalize;
886 unsigned Res = MI.getOperand(0).getReg();
888 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
889 switch (Ty.getSizeInBits()) {
891 ZeroTy = Type::getHalfTy(Ctx);
894 ZeroTy = Type::getFloatTy(Ctx);
897 ZeroTy = Type::getDoubleTy(Ctx);
900 ZeroTy = Type::getFP128Ty(Ctx);
903 llvm_unreachable("unexpected floating-point type");
905 ConstantFP &ZeroForNegation =
906 *cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
907 auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
908 MIRBuilder.buildInstr(TargetOpcode::G_FSUB)
910 .addUse(Zero->getOperand(0).getReg())
911 .addUse(MI.getOperand(1).getReg());
912 MI.eraseFromParent();
915 case TargetOpcode::G_FSUB: {
916 // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)).
917 // First, check if G_FNEG is marked as Lower. If so, we may
918 // end up with an infinite loop as G_FSUB is used to legalize G_FNEG.
919 if (LI.getAction({G_FNEG, {Ty}}).Action == Lower)
920 return UnableToLegalize;
921 unsigned Res = MI.getOperand(0).getReg();
922 unsigned LHS = MI.getOperand(1).getReg();
923 unsigned RHS = MI.getOperand(2).getReg();
924 unsigned Neg = MRI.createGenericVirtualRegister(Ty);
925 MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
926 MIRBuilder.buildInstr(TargetOpcode::G_FADD)
930 MI.eraseFromParent();
933 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
934 unsigned OldValRes = MI.getOperand(0).getReg();
935 unsigned SuccessRes = MI.getOperand(1).getReg();
936 unsigned Addr = MI.getOperand(2).getReg();
937 unsigned CmpVal = MI.getOperand(3).getReg();
938 unsigned NewVal = MI.getOperand(4).getReg();
939 MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
940 **MI.memoperands_begin());
941 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
942 MI.eraseFromParent();
945 case TargetOpcode::G_LOAD:
946 case TargetOpcode::G_SEXTLOAD:
947 case TargetOpcode::G_ZEXTLOAD: {
948 // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
949 unsigned DstReg = MI.getOperand(0).getReg();
950 unsigned PtrReg = MI.getOperand(1).getReg();
951 LLT DstTy = MRI.getType(DstReg);
952 auto &MMO = **MI.memoperands_begin();
954 if (DstTy.getSizeInBits() == MMO.getSize() /* in bytes */ * 8) {
955 // In the case of G_LOAD, this was a non-extending load already and we're
956 // about to lower to the same instruction.
957 if (MI.getOpcode() == TargetOpcode::G_LOAD)
958 return UnableToLegalize;
959 MIRBuilder.buildLoad(DstReg, PtrReg, MMO);
960 MI.eraseFromParent();
964 if (DstTy.isScalar()) {
965 unsigned TmpReg = MRI.createGenericVirtualRegister(
966 LLT::scalar(MMO.getSize() /* in bytes */ * 8));
967 MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
968 switch (MI.getOpcode()) {
970 llvm_unreachable("Unexpected opcode");
971 case TargetOpcode::G_LOAD:
972 MIRBuilder.buildAnyExt(DstReg, TmpReg);
974 case TargetOpcode::G_SEXTLOAD:
975 MIRBuilder.buildSExt(DstReg, TmpReg);
977 case TargetOpcode::G_ZEXTLOAD:
978 MIRBuilder.buildZExt(DstReg, TmpReg);
981 MI.eraseFromParent();
985 return UnableToLegalize;
990 LegalizerHelper::LegalizeResult
991 LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
993 // FIXME: Don't know how to handle secondary types yet.
995 return UnableToLegalize;
996 switch (MI.getOpcode()) {
998 return UnableToLegalize;
999 case TargetOpcode::G_ADD: {
1000 unsigned NarrowSize = NarrowTy.getSizeInBits();
1001 unsigned DstReg = MI.getOperand(0).getReg();
1002 unsigned Size = MRI.getType(DstReg).getSizeInBits();
1003 int NumParts = Size / NarrowSize;
1004 // FIXME: Don't know how to handle the situation where the small vectors
1005 // aren't all the same size yet.
1006 if (Size % NarrowSize != 0)
1007 return UnableToLegalize;
1009 MIRBuilder.setInstr(MI);
1011 SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
1012 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
1013 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
1015 for (int i = 0; i < NumParts; ++i) {
1016 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
1017 MIRBuilder.buildAdd(DstReg, Src1Regs[i], Src2Regs[i]);
1018 DstRegs.push_back(DstReg);
1021 MIRBuilder.buildMerge(DstReg, DstRegs);
1022 MI.eraseFromParent();