1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "R600Defines.h"
20 #include "R600FrameLowering.h"
21 #include "R600RegisterInfo.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/Support/ErrorHandling.h"
46 #define GET_INSTRINFO_CTOR_DTOR
47 #include "AMDGPUGenDFAPacketizer.inc"
49 R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
50 : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
52 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
53 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
56 void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
57 MachineBasicBlock::iterator MI,
58 const DebugLoc &DL, unsigned DestReg,
59 unsigned SrcReg, bool KillSrc) const {
60 unsigned VectorComponents = 0;
61 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
62 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
63 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
64 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
66 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
67 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
68 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
69 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
73 if (VectorComponents > 0) {
74 for (unsigned I = 0; I < VectorComponents; I++) {
75 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
76 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
77 RI.getSubReg(DestReg, SubRegIndex),
78 RI.getSubReg(SrcReg, SubRegIndex))
80 RegState::Define | RegState::Implicit);
83 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
85 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
90 /// \returns true if \p MBBI can be moved into a new basic.
91 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
92 MachineBasicBlock::iterator MBBI) const {
93 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
94 E = MBBI->operands_end(); I != E; ++I) {
95 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
96 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
102 bool R600InstrInfo::isMov(unsigned Opcode) const {
107 case AMDGPU::MOV_IMM_F32:
108 case AMDGPU::MOV_IMM_I32:
113 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
117 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
119 default: return false;
120 case AMDGPU::CUBE_r600_pseudo:
121 case AMDGPU::CUBE_r600_real:
122 case AMDGPU::CUBE_eg_pseudo:
123 case AMDGPU::CUBE_eg_real:
128 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
129 unsigned TargetFlags = get(Opcode).TSFlags;
131 return (TargetFlags & R600_InstFlag::ALU_INST);
134 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
135 unsigned TargetFlags = get(Opcode).TSFlags;
137 return ((TargetFlags & R600_InstFlag::OP1) |
138 (TargetFlags & R600_InstFlag::OP2) |
139 (TargetFlags & R600_InstFlag::OP3));
142 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
143 unsigned TargetFlags = get(Opcode).TSFlags;
145 return ((TargetFlags & R600_InstFlag::LDS_1A) |
146 (TargetFlags & R600_InstFlag::LDS_1A1D) |
147 (TargetFlags & R600_InstFlag::LDS_1A2D));
150 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
151 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
154 bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
155 if (isALUInstr(MI.getOpcode()))
157 if (isVector(MI) || isCubeOp(MI.getOpcode()))
159 switch (MI.getOpcode()) {
161 case AMDGPU::INTERP_PAIR_XY:
162 case AMDGPU::INTERP_PAIR_ZW:
163 case AMDGPU::INTERP_VEC_LOAD:
172 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
173 if (ST.hasCaymanISA())
175 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
178 bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
179 return isTransOnly(MI.getOpcode());
182 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
183 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
186 bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
187 return isVectorOnly(MI.getOpcode());
190 bool R600InstrInfo::isExport(unsigned Opcode) const {
191 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
194 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
195 return ST.hasVertexCache() && IS_VTX(get(Opcode));
198 bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
199 const MachineFunction *MF = MI.getParent()->getParent();
200 return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
201 usesVertexCache(MI.getOpcode());
204 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
205 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
208 bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
209 const MachineFunction *MF = MI.getParent()->getParent();
210 return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
211 usesVertexCache(MI.getOpcode())) ||
212 usesTextureCache(MI.getOpcode());
215 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
218 case AMDGPU::GROUP_BARRIER:
225 bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
226 return MI.findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
229 bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
230 return MI.findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
233 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
234 if (!isALUInstr(MI.getOpcode())) {
237 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
238 E = MI.operands_end();
240 if (!I->isReg() || !I->isUse() ||
241 TargetRegisterInfo::isVirtualRegister(I->getReg()))
244 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
250 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
251 static const unsigned SrcSelTable[][2] = {
252 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
253 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
254 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
255 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
256 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
257 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
258 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
259 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
260 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
261 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
262 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
265 for (const auto &Row : SrcSelTable) {
266 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
267 return getOperandIdx(Opcode, Row[1]);
273 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
274 R600InstrInfo::getSrcs(MachineInstr &MI) const {
275 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
277 if (MI.getOpcode() == AMDGPU::DOT_4) {
278 static const unsigned OpTable[8][2] = {
279 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
280 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
281 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
282 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
283 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
284 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
285 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
286 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
289 for (unsigned j = 0; j < 8; j++) {
291 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
292 unsigned Reg = MO.getReg();
293 if (Reg == AMDGPU::ALU_CONST) {
294 MachineOperand &Sel =
295 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
296 Result.push_back(std::make_pair(&MO, Sel.getImm()));
304 static const unsigned OpTable[3][2] = {
305 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
306 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
307 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
310 for (unsigned j = 0; j < 3; j++) {
311 int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
314 MachineOperand &MO = MI.getOperand(SrcIdx);
315 unsigned Reg = MO.getReg();
316 if (Reg == AMDGPU::ALU_CONST) {
317 MachineOperand &Sel =
318 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
319 Result.push_back(std::make_pair(&MO, Sel.getImm()));
322 if (Reg == AMDGPU::ALU_LITERAL_X) {
323 MachineOperand &Operand =
324 MI.getOperand(getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
325 if (Operand.isImm()) {
326 Result.push_back(std::make_pair(&MO, Operand.getImm()));
329 assert(Operand.isGlobal());
331 Result.push_back(std::make_pair(&MO, 0));
336 std::vector<std::pair<int, unsigned>>
337 R600InstrInfo::ExtractSrcs(MachineInstr &MI,
338 const DenseMap<unsigned, unsigned> &PV,
339 unsigned &ConstCount) const {
341 const std::pair<int, unsigned> DummyPair(-1, 0);
342 std::vector<std::pair<int, unsigned>> Result;
344 for (const auto &Src : getSrcs(MI)) {
346 unsigned Reg = Src.first->getReg();
347 int Index = RI.getEncodingValue(Reg) & 0xff;
348 if (Reg == AMDGPU::OQAP) {
349 Result.push_back(std::make_pair(Index, 0U));
351 if (PV.find(Reg) != PV.end()) {
352 // 255 is used to tells its a PS/PV reg
353 Result.push_back(std::make_pair(255, 0U));
358 Result.push_back(DummyPair);
361 unsigned Chan = RI.getHWRegChan(Reg);
362 Result.push_back(std::make_pair(Index, Chan));
365 Result.push_back(DummyPair);
369 static std::vector<std::pair<int, unsigned>>
370 Swizzle(std::vector<std::pair<int, unsigned>> Src,
371 R600InstrInfo::BankSwizzle Swz) {
372 if (Src[0] == Src[1])
375 case R600InstrInfo::ALU_VEC_012_SCL_210:
377 case R600InstrInfo::ALU_VEC_021_SCL_122:
378 std::swap(Src[1], Src[2]);
380 case R600InstrInfo::ALU_VEC_102_SCL_221:
381 std::swap(Src[0], Src[1]);
383 case R600InstrInfo::ALU_VEC_120_SCL_212:
384 std::swap(Src[0], Src[1]);
385 std::swap(Src[0], Src[2]);
387 case R600InstrInfo::ALU_VEC_201:
388 std::swap(Src[0], Src[2]);
389 std::swap(Src[0], Src[1]);
391 case R600InstrInfo::ALU_VEC_210:
392 std::swap(Src[0], Src[2]);
398 static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
400 case R600InstrInfo::ALU_VEC_012_SCL_210: {
401 unsigned Cycles[3] = { 2, 1, 0};
404 case R600InstrInfo::ALU_VEC_021_SCL_122: {
405 unsigned Cycles[3] = { 1, 2, 2};
408 case R600InstrInfo::ALU_VEC_120_SCL_212: {
409 unsigned Cycles[3] = { 2, 1, 2};
412 case R600InstrInfo::ALU_VEC_102_SCL_221: {
413 unsigned Cycles[3] = { 2, 2, 1};
417 llvm_unreachable("Wrong Swizzle for Trans Slot");
421 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
422 /// in the same Instruction Group while meeting read port limitations given a
423 /// Swz swizzle sequence.
424 unsigned R600InstrInfo::isLegalUpTo(
425 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
426 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
427 const std::vector<std::pair<int, unsigned>> &TransSrcs,
428 R600InstrInfo::BankSwizzle TransSwz) const {
430 memset(Vector, -1, sizeof(Vector));
431 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
432 const std::vector<std::pair<int, unsigned>> &Srcs =
433 Swizzle(IGSrcs[i], Swz[i]);
434 for (unsigned j = 0; j < 3; j++) {
435 const std::pair<int, unsigned> &Src = Srcs[j];
436 if (Src.first < 0 || Src.first == 255)
438 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
439 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
440 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
441 // The value from output queue A (denoted by register OQAP) can
442 // only be fetched during the first cycle.
445 // OQAP does not count towards the normal read port restrictions
448 if (Vector[Src.second][j] < 0)
449 Vector[Src.second][j] = Src.first;
450 if (Vector[Src.second][j] != Src.first)
454 // Now check Trans Alu
455 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
456 const std::pair<int, unsigned> &Src = TransSrcs[i];
457 unsigned Cycle = getTransSwizzle(TransSwz, i);
460 if (Src.first == 255)
462 if (Vector[Src.second][Cycle] < 0)
463 Vector[Src.second][Cycle] = Src.first;
464 if (Vector[Src.second][Cycle] != Src.first)
465 return IGSrcs.size() - 1;
467 return IGSrcs.size();
470 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
471 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
472 /// Idx can be skipped
474 NextPossibleSolution(
475 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
477 assert(Idx < SwzCandidate.size());
479 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
481 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
482 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
486 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
487 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
491 /// Enumerate all possible Swizzle sequence to find one that can meet all
492 /// read port requirements.
493 bool R600InstrInfo::FindSwizzleForVectorSlot(
494 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
495 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
496 const std::vector<std::pair<int, unsigned>> &TransSrcs,
497 R600InstrInfo::BankSwizzle TransSwz) const {
498 unsigned ValidUpTo = 0;
500 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
501 if (ValidUpTo == IGSrcs.size())
503 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
507 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
508 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
510 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
511 const std::vector<std::pair<int, unsigned>> &TransOps,
512 unsigned ConstCount) {
513 // TransALU can't read 3 constants
516 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
517 const std::pair<int, unsigned> &Src = TransOps[i];
518 unsigned Cycle = getTransSwizzle(TransSwz, i);
521 if (ConstCount > 0 && Cycle == 0)
523 if (ConstCount > 1 && Cycle == 1)
530 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
531 const DenseMap<unsigned, unsigned> &PV,
532 std::vector<BankSwizzle> &ValidSwizzle,
535 //Todo : support shared src0 - src1 operand
537 std::vector<std::vector<std::pair<int, unsigned>>> IGSrcs;
538 ValidSwizzle.clear();
540 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
541 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
542 IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
543 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
544 AMDGPU::OpName::bank_swizzle);
545 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
546 IG[i]->getOperand(Op).getImm());
548 std::vector<std::pair<int, unsigned>> TransOps;
550 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
552 TransOps = std::move(IGSrcs.back());
554 ValidSwizzle.pop_back();
556 static const R600InstrInfo::BankSwizzle TransSwz[] = {
562 for (unsigned i = 0; i < 4; i++) {
563 TransBS = TransSwz[i];
564 if (!isConstCompatible(TransBS, TransOps, ConstCount))
566 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
569 ValidSwizzle.push_back(TransBS);
578 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
580 assert (Consts.size() <= 12 && "Too many operands in instructions group");
581 unsigned Pair1 = 0, Pair2 = 0;
582 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
583 unsigned ReadConstHalf = Consts[i] & 2;
584 unsigned ReadConstIndex = Consts[i] & (~3);
585 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
587 Pair1 = ReadHalfConst;
590 if (Pair1 == ReadHalfConst)
593 Pair2 = ReadHalfConst;
596 if (Pair2 != ReadHalfConst)
603 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
605 std::vector<unsigned> Consts;
606 SmallSet<int64_t, 4> Literals;
607 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
608 MachineInstr &MI = *MIs[i];
609 if (!isALUInstr(MI.getOpcode()))
612 for (const auto &Src : getSrcs(MI)) {
613 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
614 Literals.insert(Src.second);
615 if (Literals.size() > 4)
617 if (Src.first->getReg() == AMDGPU::ALU_CONST)
618 Consts.push_back(Src.second);
619 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
620 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
621 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
622 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
623 Consts.push_back((Index << 2) | Chan);
627 return fitsConstReadLimitations(Consts);
631 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
632 const InstrItineraryData *II = STI.getInstrItineraryData();
633 return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
637 isPredicateSetter(unsigned Opcode) {
646 static MachineInstr *
647 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
648 MachineBasicBlock::iterator I) {
649 while (I != MBB.begin()) {
651 MachineInstr &MI = *I;
652 if (isPredicateSetter(MI.getOpcode()))
660 bool isJump(unsigned Opcode) {
661 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
664 static bool isBranch(unsigned Opcode) {
665 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
666 Opcode == AMDGPU::BRANCH_COND_f32;
669 bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
670 MachineBasicBlock *&TBB,
671 MachineBasicBlock *&FBB,
672 SmallVectorImpl<MachineOperand> &Cond,
673 bool AllowModify) const {
674 // Most of the following comes from the ARM implementation of AnalyzeBranch
676 // If the block has no terminators, it just falls into the block after it.
677 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
681 // AMDGPU::BRANCH* instructions are only available after isel and are not
683 if (isBranch(I->getOpcode()))
685 if (!isJump(I->getOpcode())) {
689 // Remove successive JUMP
690 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
691 MachineBasicBlock::iterator PriorI = std::prev(I);
693 I->removeFromParent();
696 MachineInstr &LastInst = *I;
698 // If there is only one terminator instruction, process it.
699 unsigned LastOpc = LastInst.getOpcode();
700 if (I == MBB.begin() || !isJump((--I)->getOpcode())) {
701 if (LastOpc == AMDGPU::JUMP) {
702 TBB = LastInst.getOperand(0).getMBB();
704 } else if (LastOpc == AMDGPU::JUMP_COND) {
706 while (!isPredicateSetter(predSet->getOpcode())) {
709 TBB = LastInst.getOperand(0).getMBB();
710 Cond.push_back(predSet->getOperand(1));
711 Cond.push_back(predSet->getOperand(2));
712 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
715 return true; // Can't handle indirect branch.
718 // Get the instruction before it if it is a terminator.
719 MachineInstr &SecondLastInst = *I;
720 unsigned SecondLastOpc = SecondLastInst.getOpcode();
722 // If the block ends with a B and a Bcc, handle it.
723 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
725 while (!isPredicateSetter(predSet->getOpcode())) {
728 TBB = SecondLastInst.getOperand(0).getMBB();
729 FBB = LastInst.getOperand(0).getMBB();
730 Cond.push_back(predSet->getOperand(1));
731 Cond.push_back(predSet->getOperand(2));
732 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
736 // Otherwise, can't handle this.
741 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
742 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
744 if (It->getOpcode() == AMDGPU::CF_ALU ||
745 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
746 return It.getReverse();
751 unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
752 MachineBasicBlock *TBB,
753 MachineBasicBlock *FBB,
754 ArrayRef<MachineOperand> Cond,
756 int *BytesAdded) const {
757 assert(TBB && "insertBranch must not be told to insert a fallthrough");
758 assert(!BytesAdded && "code size not handled");
762 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
765 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
766 assert(PredSet && "No previous predicate !");
767 addFlag(*PredSet, 0, MO_FLAG_PUSH);
768 PredSet->getOperand(2).setImm(Cond[1].getImm());
770 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
772 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
773 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
774 if (CfAlu == MBB.end())
776 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
777 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
781 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
782 assert(PredSet && "No previous predicate !");
783 addFlag(*PredSet, 0, MO_FLAG_PUSH);
784 PredSet->getOperand(2).setImm(Cond[1].getImm());
785 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
787 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
788 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
789 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
790 if (CfAlu == MBB.end())
792 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
793 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
798 unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
799 int *BytesRemoved) const {
800 assert(!BytesRemoved && "code size not handled");
802 // Note : we leave PRED* instructions there.
803 // They may be needed when predicating instructions.
805 MachineBasicBlock::iterator I = MBB.end();
807 if (I == MBB.begin()) {
811 switch (I->getOpcode()) {
814 case AMDGPU::JUMP_COND: {
815 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
816 clearFlag(*predSet, 0, MO_FLAG_PUSH);
817 I->eraseFromParent();
818 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
819 if (CfAlu == MBB.end())
821 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
822 CfAlu->setDesc(get(AMDGPU::CF_ALU));
826 I->eraseFromParent();
831 if (I == MBB.begin()) {
835 switch (I->getOpcode()) {
836 // FIXME: only one case??
839 case AMDGPU::JUMP_COND: {
840 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
841 clearFlag(*predSet, 0, MO_FLAG_PUSH);
842 I->eraseFromParent();
843 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
844 if (CfAlu == MBB.end())
846 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
847 CfAlu->setDesc(get(AMDGPU::CF_ALU));
851 I->eraseFromParent();
857 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
858 int idx = MI.findFirstPredOperandIdx();
862 unsigned Reg = MI.getOperand(idx).getReg();
864 default: return false;
865 case AMDGPU::PRED_SEL_ONE:
866 case AMDGPU::PRED_SEL_ZERO:
867 case AMDGPU::PREDICATE_BIT:
872 bool R600InstrInfo::isPredicable(const MachineInstr &MI) const {
873 // XXX: KILL* instructions can be predicated, but they must be the last
874 // instruction in a clause, so this means any instructions after them cannot
875 // be predicated. Until we have proper support for instruction clauses in the
876 // backend, we will mark KILL* instructions as unpredicable.
878 if (MI.getOpcode() == AMDGPU::KILLGT) {
880 } else if (MI.getOpcode() == AMDGPU::CF_ALU) {
881 // If the clause start in the middle of MBB then the MBB has more
882 // than a single clause, unable to predicate several clauses.
883 if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI))
885 // TODO: We don't support KC merging atm
886 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
887 } else if (isVector(MI)) {
890 return AMDGPUInstrInfo::isPredicable(MI);
895 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
897 unsigned ExtraPredCycles,
898 BranchProbability Probability) const{
903 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
905 unsigned ExtraTCycles,
906 MachineBasicBlock &FMBB,
908 unsigned ExtraFCycles,
909 BranchProbability Probability) const {
914 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
916 BranchProbability Probability)
922 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
923 MachineBasicBlock &FMBB) const {
928 R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
929 MachineOperand &MO = Cond[1];
930 switch (MO.getImm()) {
931 case AMDGPU::PRED_SETE_INT:
932 MO.setImm(AMDGPU::PRED_SETNE_INT);
934 case AMDGPU::PRED_SETNE_INT:
935 MO.setImm(AMDGPU::PRED_SETE_INT);
937 case AMDGPU::PRED_SETE:
938 MO.setImm(AMDGPU::PRED_SETNE);
940 case AMDGPU::PRED_SETNE:
941 MO.setImm(AMDGPU::PRED_SETE);
947 MachineOperand &MO2 = Cond[2];
948 switch (MO2.getReg()) {
949 case AMDGPU::PRED_SEL_ZERO:
950 MO2.setReg(AMDGPU::PRED_SEL_ONE);
952 case AMDGPU::PRED_SEL_ONE:
953 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
961 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
962 std::vector<MachineOperand> &Pred) const {
963 return isPredicateSetter(MI.getOpcode());
966 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
967 ArrayRef<MachineOperand> Pred) const {
968 int PIdx = MI.findFirstPredOperandIdx();
970 if (MI.getOpcode() == AMDGPU::CF_ALU) {
971 MI.getOperand(8).setImm(0);
975 if (MI.getOpcode() == AMDGPU::DOT_4) {
976 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X))
977 .setReg(Pred[2].getReg());
978 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y))
979 .setReg(Pred[2].getReg());
980 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z))
981 .setReg(Pred[2].getReg());
982 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W))
983 .setReg(Pred[2].getReg());
984 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
985 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
990 MachineOperand &PMO = MI.getOperand(PIdx);
991 PMO.setReg(Pred[2].getReg());
992 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
993 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1000 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
1004 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1005 const MachineInstr &,
1006 unsigned *PredCost) const {
1012 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1013 unsigned Channel) const {
1014 assert(Channel == 0);
1018 bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1019 switch (MI.getOpcode()) {
1021 MachineBasicBlock *MBB = MI.getParent();
1023 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::addr);
1024 // addr is a custom operand with multiple MI operands, and only the
1025 // first MI operand is given a name.
1026 int RegOpIdx = OffsetOpIdx + 1;
1028 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::chan);
1029 if (isRegisterLoad(MI)) {
1031 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
1032 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1033 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1034 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1035 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1036 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1037 buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
1038 getIndirectAddrRegClass()->getRegister(Address));
1040 buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1043 } else if (isRegisterStore(MI)) {
1045 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::val);
1046 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1047 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1048 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1049 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1050 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1051 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
1052 MI.getOperand(ValOpIdx).getReg());
1054 buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
1055 calculateIndirectAddress(RegIndex, Channel),
1065 case AMDGPU::R600_EXTRACT_ELT_V2:
1066 case AMDGPU::R600_EXTRACT_ELT_V4:
1067 buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1068 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1069 MI.getOperand(2).getReg(),
1070 RI.getHWRegChan(MI.getOperand(1).getReg()));
1072 case AMDGPU::R600_INSERT_ELT_V2:
1073 case AMDGPU::R600_INSERT_ELT_V4:
1074 buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1075 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1076 MI.getOperand(3).getReg(), // Offset
1077 RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
1080 MI.eraseFromParent();
1084 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1085 const MachineFunction &MF) const {
1086 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1087 const R600FrameLowering *TFL = ST.getFrameLowering();
1089 unsigned StackWidth = TFL->getStackWidth(MF);
1090 int End = getIndirectIndexEnd(MF);
1095 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1096 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1097 Reserved.set(SuperReg);
1098 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1099 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1105 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1106 return &AMDGPU::R600_TReg32_XRegClass;
1109 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1110 MachineBasicBlock::iterator I,
1111 unsigned ValueReg, unsigned Address,
1112 unsigned OffsetReg) const {
1113 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1116 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1117 MachineBasicBlock::iterator I,
1118 unsigned ValueReg, unsigned Address,
1120 unsigned AddrChan) const {
1123 default: llvm_unreachable("Invalid Channel");
1124 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1125 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1126 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1127 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1129 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1130 AMDGPU::AR_X, OffsetReg);
1131 setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
1133 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1135 .addReg(AMDGPU::AR_X,
1136 RegState::Implicit | RegState::Kill);
1137 setImmOperand(*Mov, AMDGPU::OpName::dst_rel, 1);
1141 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1142 MachineBasicBlock::iterator I,
1143 unsigned ValueReg, unsigned Address,
1144 unsigned OffsetReg) const {
1145 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1148 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1149 MachineBasicBlock::iterator I,
1150 unsigned ValueReg, unsigned Address,
1152 unsigned AddrChan) const {
1155 default: llvm_unreachable("Invalid Channel");
1156 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1157 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1158 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1159 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1161 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1164 setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
1165 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1168 .addReg(AMDGPU::AR_X,
1169 RegState::Implicit | RegState::Kill);
1170 setImmOperand(*Mov, AMDGPU::OpName::src0_rel, 1);
1175 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1176 const MachineRegisterInfo &MRI = MF.getRegInfo();
1177 const MachineFrameInfo &MFI = MF.getFrameInfo();
1180 if (MFI.getNumObjects() == 0) {
1184 if (MRI.livein_empty()) {
1188 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
1189 for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
1190 unsigned Reg = LI.first;
1191 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
1192 !IndirectRC->contains(Reg))
1197 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1199 if (IndirectRC->getRegister(RegIndex) == Reg)
1202 Offset = std::max(Offset, (int)RegIndex);
1208 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1210 const MachineFrameInfo &MFI = MF.getFrameInfo();
1212 // Variable sized objects are not supported
1213 if (MFI.hasVarSizedObjects()) {
1217 if (MFI.getNumObjects() == 0) {
1221 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1222 const R600FrameLowering *TFL = ST.getFrameLowering();
1224 unsigned IgnoredFrameReg;
1225 Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
1227 return getIndirectIndexBegin(MF) + Offset;
1230 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1234 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1235 MachineBasicBlock::iterator I,
1239 unsigned Src1Reg) const {
1240 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1244 MIB.addImm(0) // $update_exec_mask
1245 .addImm(0); // $update_predicate
1247 MIB.addImm(1) // $write
1249 .addImm(0) // $dst_rel
1250 .addImm(0) // $dst_clamp
1251 .addReg(Src0Reg) // $src0
1252 .addImm(0) // $src0_neg
1253 .addImm(0) // $src0_rel
1254 .addImm(0) // $src0_abs
1255 .addImm(-1); // $src0_sel
1258 MIB.addReg(Src1Reg) // $src1
1259 .addImm(0) // $src1_neg
1260 .addImm(0) // $src1_rel
1261 .addImm(0) // $src1_abs
1262 .addImm(-1); // $src1_sel
1265 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1266 //scheduling to the backend, we can change the default to 0.
1267 MIB.addImm(1) // $last
1268 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1269 .addImm(0) // $literal
1270 .addImm(0); // $bank_swizzle
1275 #define OPERAND_CASE(Label) \
1277 static const unsigned Ops[] = \
1287 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1289 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1290 OPERAND_CASE(AMDGPU::OpName::update_pred)
1291 OPERAND_CASE(AMDGPU::OpName::write)
1292 OPERAND_CASE(AMDGPU::OpName::omod)
1293 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1294 OPERAND_CASE(AMDGPU::OpName::clamp)
1295 OPERAND_CASE(AMDGPU::OpName::src0)
1296 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1297 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1298 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1299 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1300 OPERAND_CASE(AMDGPU::OpName::src1)
1301 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1302 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1303 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1304 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1305 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1307 llvm_unreachable("Wrong Operand");
1313 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1314 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1316 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1318 if (ST.getGeneration() <= R600Subtarget::R700)
1319 Opcode = AMDGPU::DOT4_r600;
1321 Opcode = AMDGPU::DOT4_eg;
1322 MachineBasicBlock::iterator I = MI;
1323 MachineOperand &Src0 = MI->getOperand(
1324 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1325 MachineOperand &Src1 = MI->getOperand(
1326 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1327 MachineInstr *MIB = buildDefaultInstruction(
1328 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1329 static const unsigned Operands[14] = {
1330 AMDGPU::OpName::update_exec_mask,
1331 AMDGPU::OpName::update_pred,
1332 AMDGPU::OpName::write,
1333 AMDGPU::OpName::omod,
1334 AMDGPU::OpName::dst_rel,
1335 AMDGPU::OpName::clamp,
1336 AMDGPU::OpName::src0_neg,
1337 AMDGPU::OpName::src0_rel,
1338 AMDGPU::OpName::src0_abs,
1339 AMDGPU::OpName::src0_sel,
1340 AMDGPU::OpName::src1_neg,
1341 AMDGPU::OpName::src1_rel,
1342 AMDGPU::OpName::src1_abs,
1343 AMDGPU::OpName::src1_sel,
1346 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1347 getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1348 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1349 .setReg(MO.getReg());
1351 for (unsigned i = 0; i < 14; i++) {
1352 MachineOperand &MO = MI->getOperand(
1353 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1354 assert (MO.isImm());
1355 setImmOperand(*MIB, Operands[i], MO.getImm());
1357 MIB->getOperand(20).setImm(0);
1361 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1362 MachineBasicBlock::iterator I,
1364 uint64_t Imm) const {
1365 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1366 AMDGPU::ALU_LITERAL_X);
1367 setImmOperand(*MovImm, AMDGPU::OpName::literal, Imm);
1371 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1372 MachineBasicBlock::iterator I,
1373 unsigned DstReg, unsigned SrcReg) const {
1374 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1377 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1378 return getOperandIdx(MI.getOpcode(), Op);
1381 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1382 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1385 void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
1386 int64_t Imm) const {
1387 int Idx = getOperandIdx(MI, Op);
1388 assert(Idx != -1 && "Operand not supported for this instruction.");
1389 assert(MI.getOperand(Idx).isImm());
1390 MI.getOperand(Idx).setImm(Imm);
1393 //===----------------------------------------------------------------------===//
1394 // Instruction flag getters/setters
1395 //===----------------------------------------------------------------------===//
1397 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
1398 unsigned Flag) const {
1399 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1402 // If we pass something other than the default value of Flag to this
1403 // function, it means we are want to set a flag on an instruction
1404 // that uses native encoding.
1405 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1406 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1409 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::clamp);
1412 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::write);
1414 case MO_FLAG_NOT_LAST:
1416 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::last);
1421 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_neg);
1424 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_neg);
1427 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src2_neg);
1433 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1438 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_abs);
1441 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_abs);
1450 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1452 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1453 assert(FlagIndex != 0 &&
1454 "Instruction flags not supported for this instruction");
1457 MachineOperand &FlagOp = MI.getOperand(FlagIndex);
1458 assert(FlagOp.isImm());
1462 void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
1463 unsigned Flag) const {
1464 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1468 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1469 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1470 if (Flag == MO_FLAG_NOT_LAST) {
1471 clearFlag(MI, Operand, MO_FLAG_LAST);
1472 } else if (Flag == MO_FLAG_MASK) {
1473 clearFlag(MI, Operand, Flag);
1478 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1479 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1483 void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
1484 unsigned Flag) const {
1485 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1486 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1487 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1490 MachineOperand &FlagOp = getFlagOp(MI);
1491 unsigned InstFlags = FlagOp.getImm();
1492 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1493 FlagOp.setImm(InstFlags);
1497 unsigned R600InstrInfo::getAddressSpaceForPseudoSourceKind(
1498 PseudoSourceValue::PSVKind Kind) const {
1500 case PseudoSourceValue::Stack:
1501 case PseudoSourceValue::FixedStack:
1502 return AMDGPUASI.PRIVATE_ADDRESS;
1503 case PseudoSourceValue::ConstantPool:
1504 case PseudoSourceValue::GOT:
1505 case PseudoSourceValue::JumpTable:
1506 case PseudoSourceValue::GlobalValueCallEntry:
1507 case PseudoSourceValue::ExternalSymbolCallEntry:
1508 case PseudoSourceValue::TargetCustom:
1509 return AMDGPUASI.CONSTANT_ADDRESS;
1511 llvm_unreachable("Invalid pseudo source kind");
1512 return AMDGPUASI.PRIVATE_ADDRESS;