1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
32 : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
34 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
35 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
38 void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
39 MachineBasicBlock::iterator MI,
40 const DebugLoc &DL, unsigned DestReg,
41 unsigned SrcReg, bool KillSrc) const {
42 unsigned VectorComponents = 0;
43 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
44 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
45 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
46 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
48 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
49 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
50 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
51 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
55 if (VectorComponents > 0) {
56 for (unsigned I = 0; I < VectorComponents; I++) {
57 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
58 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
59 RI.getSubReg(DestReg, SubRegIndex),
60 RI.getSubReg(SrcReg, SubRegIndex))
62 RegState::Define | RegState::Implicit);
65 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
67 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
72 /// \returns true if \p MBBI can be moved into a new basic.
73 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
74 MachineBasicBlock::iterator MBBI) const {
75 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
76 E = MBBI->operands_end(); I != E; ++I) {
77 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
78 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
84 bool R600InstrInfo::isMov(unsigned Opcode) const {
89 case AMDGPU::MOV_IMM_F32:
90 case AMDGPU::MOV_IMM_I32:
95 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
99 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
101 default: return false;
102 case AMDGPU::CUBE_r600_pseudo:
103 case AMDGPU::CUBE_r600_real:
104 case AMDGPU::CUBE_eg_pseudo:
105 case AMDGPU::CUBE_eg_real:
110 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
111 unsigned TargetFlags = get(Opcode).TSFlags;
113 return (TargetFlags & R600_InstFlag::ALU_INST);
116 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
117 unsigned TargetFlags = get(Opcode).TSFlags;
119 return ((TargetFlags & R600_InstFlag::OP1) |
120 (TargetFlags & R600_InstFlag::OP2) |
121 (TargetFlags & R600_InstFlag::OP3));
124 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
125 unsigned TargetFlags = get(Opcode).TSFlags;
127 return ((TargetFlags & R600_InstFlag::LDS_1A) |
128 (TargetFlags & R600_InstFlag::LDS_1A1D) |
129 (TargetFlags & R600_InstFlag::LDS_1A2D));
132 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
133 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
136 bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
137 if (isALUInstr(MI.getOpcode()))
139 if (isVector(MI) || isCubeOp(MI.getOpcode()))
141 switch (MI.getOpcode()) {
143 case AMDGPU::INTERP_PAIR_XY:
144 case AMDGPU::INTERP_PAIR_ZW:
145 case AMDGPU::INTERP_VEC_LOAD:
154 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
155 if (ST.hasCaymanISA())
157 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
160 bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
161 return isTransOnly(MI.getOpcode());
164 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
165 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
168 bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
169 return isVectorOnly(MI.getOpcode());
172 bool R600InstrInfo::isExport(unsigned Opcode) const {
173 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
176 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
177 return ST.hasVertexCache() && IS_VTX(get(Opcode));
180 bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
181 const MachineFunction *MF = MI.getParent()->getParent();
182 return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
183 usesVertexCache(MI.getOpcode());
186 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
187 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
190 bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
191 const MachineFunction *MF = MI.getParent()->getParent();
192 return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
193 usesVertexCache(MI.getOpcode())) ||
194 usesTextureCache(MI.getOpcode());
197 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
200 case AMDGPU::GROUP_BARRIER:
207 bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
208 return MI.findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
211 bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
212 return MI.findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
215 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
216 if (!isALUInstr(MI.getOpcode())) {
219 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
220 E = MI.operands_end();
222 if (!I->isReg() || !I->isUse() ||
223 TargetRegisterInfo::isVirtualRegister(I->getReg()))
226 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
232 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
233 static const unsigned SrcSelTable[][2] = {
234 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
235 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
236 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
237 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
238 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
239 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
240 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
241 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
242 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
243 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
244 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
247 for (const auto &Row : SrcSelTable) {
248 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
249 return getOperandIdx(Opcode, Row[1]);
255 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
256 R600InstrInfo::getSrcs(MachineInstr &MI) const {
257 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
259 if (MI.getOpcode() == AMDGPU::DOT_4) {
260 static const unsigned OpTable[8][2] = {
261 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
262 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
263 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
264 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
265 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
266 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
267 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
268 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
271 for (unsigned j = 0; j < 8; j++) {
273 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
274 unsigned Reg = MO.getReg();
275 if (Reg == AMDGPU::ALU_CONST) {
276 MachineOperand &Sel =
277 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
278 Result.push_back(std::make_pair(&MO, Sel.getImm()));
286 static const unsigned OpTable[3][2] = {
287 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
288 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
289 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
292 for (unsigned j = 0; j < 3; j++) {
293 int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
296 MachineOperand &MO = MI.getOperand(SrcIdx);
297 unsigned Reg = MO.getReg();
298 if (Reg == AMDGPU::ALU_CONST) {
299 MachineOperand &Sel =
300 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
301 Result.push_back(std::make_pair(&MO, Sel.getImm()));
304 if (Reg == AMDGPU::ALU_LITERAL_X) {
305 MachineOperand &Operand =
306 MI.getOperand(getOperandIdx(MI.getOpcode(), AMDGPU::OpName::literal));
307 if (Operand.isImm()) {
308 Result.push_back(std::make_pair(&MO, Operand.getImm()));
311 assert(Operand.isGlobal());
313 Result.push_back(std::make_pair(&MO, 0));
318 std::vector<std::pair<int, unsigned>>
319 R600InstrInfo::ExtractSrcs(MachineInstr &MI,
320 const DenseMap<unsigned, unsigned> &PV,
321 unsigned &ConstCount) const {
323 const std::pair<int, unsigned> DummyPair(-1, 0);
324 std::vector<std::pair<int, unsigned> > Result;
326 for (const auto &Src : getSrcs(MI)) {
328 unsigned Reg = Src.first->getReg();
329 int Index = RI.getEncodingValue(Reg) & 0xff;
330 if (Reg == AMDGPU::OQAP) {
331 Result.push_back(std::make_pair(Index, 0U));
333 if (PV.find(Reg) != PV.end()) {
334 // 255 is used to tells its a PS/PV reg
335 Result.push_back(std::make_pair(255, 0U));
340 Result.push_back(DummyPair);
343 unsigned Chan = RI.getHWRegChan(Reg);
344 Result.push_back(std::make_pair(Index, Chan));
347 Result.push_back(DummyPair);
351 static std::vector<std::pair<int, unsigned> >
352 Swizzle(std::vector<std::pair<int, unsigned> > Src,
353 R600InstrInfo::BankSwizzle Swz) {
354 if (Src[0] == Src[1])
357 case R600InstrInfo::ALU_VEC_012_SCL_210:
359 case R600InstrInfo::ALU_VEC_021_SCL_122:
360 std::swap(Src[1], Src[2]);
362 case R600InstrInfo::ALU_VEC_102_SCL_221:
363 std::swap(Src[0], Src[1]);
365 case R600InstrInfo::ALU_VEC_120_SCL_212:
366 std::swap(Src[0], Src[1]);
367 std::swap(Src[0], Src[2]);
369 case R600InstrInfo::ALU_VEC_201:
370 std::swap(Src[0], Src[2]);
371 std::swap(Src[0], Src[1]);
373 case R600InstrInfo::ALU_VEC_210:
374 std::swap(Src[0], Src[2]);
380 static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
382 case R600InstrInfo::ALU_VEC_012_SCL_210: {
383 unsigned Cycles[3] = { 2, 1, 0};
386 case R600InstrInfo::ALU_VEC_021_SCL_122: {
387 unsigned Cycles[3] = { 1, 2, 2};
390 case R600InstrInfo::ALU_VEC_120_SCL_212: {
391 unsigned Cycles[3] = { 2, 1, 2};
394 case R600InstrInfo::ALU_VEC_102_SCL_221: {
395 unsigned Cycles[3] = { 2, 2, 1};
399 llvm_unreachable("Wrong Swizzle for Trans Slot");
403 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
404 /// in the same Instruction Group while meeting read port limitations given a
405 /// Swz swizzle sequence.
406 unsigned R600InstrInfo::isLegalUpTo(
407 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
408 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
409 const std::vector<std::pair<int, unsigned> > &TransSrcs,
410 R600InstrInfo::BankSwizzle TransSwz) const {
412 memset(Vector, -1, sizeof(Vector));
413 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
414 const std::vector<std::pair<int, unsigned> > &Srcs =
415 Swizzle(IGSrcs[i], Swz[i]);
416 for (unsigned j = 0; j < 3; j++) {
417 const std::pair<int, unsigned> &Src = Srcs[j];
418 if (Src.first < 0 || Src.first == 255)
420 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
421 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
422 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
423 // The value from output queue A (denoted by register OQAP) can
424 // only be fetched during the first cycle.
427 // OQAP does not count towards the normal read port restrictions
430 if (Vector[Src.second][j] < 0)
431 Vector[Src.second][j] = Src.first;
432 if (Vector[Src.second][j] != Src.first)
436 // Now check Trans Alu
437 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
438 const std::pair<int, unsigned> &Src = TransSrcs[i];
439 unsigned Cycle = getTransSwizzle(TransSwz, i);
442 if (Src.first == 255)
444 if (Vector[Src.second][Cycle] < 0)
445 Vector[Src.second][Cycle] = Src.first;
446 if (Vector[Src.second][Cycle] != Src.first)
447 return IGSrcs.size() - 1;
449 return IGSrcs.size();
452 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
453 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
454 /// Idx can be skipped
456 NextPossibleSolution(
457 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
459 assert(Idx < SwzCandidate.size());
461 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
463 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
464 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
468 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
469 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
473 /// Enumerate all possible Swizzle sequence to find one that can meet all
474 /// read port requirements.
475 bool R600InstrInfo::FindSwizzleForVectorSlot(
476 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
477 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
478 const std::vector<std::pair<int, unsigned> > &TransSrcs,
479 R600InstrInfo::BankSwizzle TransSwz) const {
480 unsigned ValidUpTo = 0;
482 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
483 if (ValidUpTo == IGSrcs.size())
485 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
489 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
490 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
492 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
493 const std::vector<std::pair<int, unsigned> > &TransOps,
494 unsigned ConstCount) {
495 // TransALU can't read 3 constants
498 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
499 const std::pair<int, unsigned> &Src = TransOps[i];
500 unsigned Cycle = getTransSwizzle(TransSwz, i);
503 if (ConstCount > 0 && Cycle == 0)
505 if (ConstCount > 1 && Cycle == 1)
512 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
513 const DenseMap<unsigned, unsigned> &PV,
514 std::vector<BankSwizzle> &ValidSwizzle,
517 //Todo : support shared src0 - src1 operand
519 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
520 ValidSwizzle.clear();
522 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
523 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
524 IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
525 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
526 AMDGPU::OpName::bank_swizzle);
527 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
528 IG[i]->getOperand(Op).getImm());
530 std::vector<std::pair<int, unsigned> > TransOps;
532 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
534 TransOps = std::move(IGSrcs.back());
536 ValidSwizzle.pop_back();
538 static const R600InstrInfo::BankSwizzle TransSwz[] = {
544 for (unsigned i = 0; i < 4; i++) {
545 TransBS = TransSwz[i];
546 if (!isConstCompatible(TransBS, TransOps, ConstCount))
548 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
551 ValidSwizzle.push_back(TransBS);
561 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
563 assert (Consts.size() <= 12 && "Too many operands in instructions group");
564 unsigned Pair1 = 0, Pair2 = 0;
565 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
566 unsigned ReadConstHalf = Consts[i] & 2;
567 unsigned ReadConstIndex = Consts[i] & (~3);
568 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
570 Pair1 = ReadHalfConst;
573 if (Pair1 == ReadHalfConst)
576 Pair2 = ReadHalfConst;
579 if (Pair2 != ReadHalfConst)
586 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
588 std::vector<unsigned> Consts;
589 SmallSet<int64_t, 4> Literals;
590 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
591 MachineInstr &MI = *MIs[i];
592 if (!isALUInstr(MI.getOpcode()))
595 for (const auto &Src : getSrcs(MI)) {
596 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
597 Literals.insert(Src.second);
598 if (Literals.size() > 4)
600 if (Src.first->getReg() == AMDGPU::ALU_CONST)
601 Consts.push_back(Src.second);
602 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
603 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
604 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
605 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
606 Consts.push_back((Index << 2) | Chan);
610 return fitsConstReadLimitations(Consts);
614 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
615 const InstrItineraryData *II = STI.getInstrItineraryData();
616 return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
620 isPredicateSetter(unsigned Opcode) {
629 static MachineInstr *
630 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
631 MachineBasicBlock::iterator I) {
632 while (I != MBB.begin()) {
634 MachineInstr &MI = *I;
635 if (isPredicateSetter(MI.getOpcode()))
643 bool isJump(unsigned Opcode) {
644 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
647 static bool isBranch(unsigned Opcode) {
648 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
649 Opcode == AMDGPU::BRANCH_COND_f32;
652 bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
653 MachineBasicBlock *&TBB,
654 MachineBasicBlock *&FBB,
655 SmallVectorImpl<MachineOperand> &Cond,
656 bool AllowModify) const {
657 // Most of the following comes from the ARM implementation of AnalyzeBranch
659 // If the block has no terminators, it just falls into the block after it.
660 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
664 // AMDGPU::BRANCH* instructions are only available after isel and are not
666 if (isBranch(I->getOpcode()))
668 if (!isJump(I->getOpcode())) {
672 // Remove successive JUMP
673 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
674 MachineBasicBlock::iterator PriorI = std::prev(I);
676 I->removeFromParent();
679 MachineInstr &LastInst = *I;
681 // If there is only one terminator instruction, process it.
682 unsigned LastOpc = LastInst.getOpcode();
683 if (I == MBB.begin() || !isJump((--I)->getOpcode())) {
684 if (LastOpc == AMDGPU::JUMP) {
685 TBB = LastInst.getOperand(0).getMBB();
687 } else if (LastOpc == AMDGPU::JUMP_COND) {
689 while (!isPredicateSetter(predSet->getOpcode())) {
692 TBB = LastInst.getOperand(0).getMBB();
693 Cond.push_back(predSet->getOperand(1));
694 Cond.push_back(predSet->getOperand(2));
695 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
698 return true; // Can't handle indirect branch.
701 // Get the instruction before it if it is a terminator.
702 MachineInstr &SecondLastInst = *I;
703 unsigned SecondLastOpc = SecondLastInst.getOpcode();
705 // If the block ends with a B and a Bcc, handle it.
706 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
708 while (!isPredicateSetter(predSet->getOpcode())) {
711 TBB = SecondLastInst.getOperand(0).getMBB();
712 FBB = LastInst.getOperand(0).getMBB();
713 Cond.push_back(predSet->getOperand(1));
714 Cond.push_back(predSet->getOperand(2));
715 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
719 // Otherwise, can't handle this.
724 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
725 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
727 if (It->getOpcode() == AMDGPU::CF_ALU ||
728 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
729 return It.getReverse();
734 unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
735 MachineBasicBlock *TBB,
736 MachineBasicBlock *FBB,
737 ArrayRef<MachineOperand> Cond,
739 int *BytesAdded) const {
740 assert(TBB && "insertBranch must not be told to insert a fallthrough");
741 assert(!BytesAdded && "code size not handled");
745 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
748 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
749 assert(PredSet && "No previous predicate !");
750 addFlag(*PredSet, 0, MO_FLAG_PUSH);
751 PredSet->getOperand(2).setImm(Cond[1].getImm());
753 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
755 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
756 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
757 if (CfAlu == MBB.end())
759 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
760 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
764 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
765 assert(PredSet && "No previous predicate !");
766 addFlag(*PredSet, 0, MO_FLAG_PUSH);
767 PredSet->getOperand(2).setImm(Cond[1].getImm());
768 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
770 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
771 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
772 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
773 if (CfAlu == MBB.end())
775 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
776 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
781 unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
782 int *BytesRemoved) const {
783 assert(!BytesRemoved && "code size not handled");
785 // Note : we leave PRED* instructions there.
786 // They may be needed when predicating instructions.
788 MachineBasicBlock::iterator I = MBB.end();
790 if (I == MBB.begin()) {
794 switch (I->getOpcode()) {
797 case AMDGPU::JUMP_COND: {
798 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
799 clearFlag(*predSet, 0, MO_FLAG_PUSH);
800 I->eraseFromParent();
801 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
802 if (CfAlu == MBB.end())
804 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
805 CfAlu->setDesc(get(AMDGPU::CF_ALU));
809 I->eraseFromParent();
814 if (I == MBB.begin()) {
818 switch (I->getOpcode()) {
819 // FIXME: only one case??
822 case AMDGPU::JUMP_COND: {
823 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
824 clearFlag(*predSet, 0, MO_FLAG_PUSH);
825 I->eraseFromParent();
826 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
827 if (CfAlu == MBB.end())
829 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
830 CfAlu->setDesc(get(AMDGPU::CF_ALU));
834 I->eraseFromParent();
840 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
841 int idx = MI.findFirstPredOperandIdx();
845 unsigned Reg = MI.getOperand(idx).getReg();
847 default: return false;
848 case AMDGPU::PRED_SEL_ONE:
849 case AMDGPU::PRED_SEL_ZERO:
850 case AMDGPU::PREDICATE_BIT:
855 bool R600InstrInfo::isPredicable(MachineInstr &MI) const {
856 // XXX: KILL* instructions can be predicated, but they must be the last
857 // instruction in a clause, so this means any instructions after them cannot
858 // be predicated. Until we have proper support for instruction clauses in the
859 // backend, we will mark KILL* instructions as unpredicable.
861 if (MI.getOpcode() == AMDGPU::KILLGT) {
863 } else if (MI.getOpcode() == AMDGPU::CF_ALU) {
864 // If the clause start in the middle of MBB then the MBB has more
865 // than a single clause, unable to predicate several clauses.
866 if (MI.getParent()->begin() != MachineBasicBlock::iterator(MI))
868 // TODO: We don't support KC merging atm
869 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
870 } else if (isVector(MI)) {
873 return AMDGPUInstrInfo::isPredicable(MI);
879 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
881 unsigned ExtraPredCycles,
882 BranchProbability Probability) const{
887 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
889 unsigned ExtraTCycles,
890 MachineBasicBlock &FMBB,
892 unsigned ExtraFCycles,
893 BranchProbability Probability) const {
898 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
900 BranchProbability Probability)
906 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
907 MachineBasicBlock &FMBB) const {
913 R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
914 MachineOperand &MO = Cond[1];
915 switch (MO.getImm()) {
916 case AMDGPU::PRED_SETE_INT:
917 MO.setImm(AMDGPU::PRED_SETNE_INT);
919 case AMDGPU::PRED_SETNE_INT:
920 MO.setImm(AMDGPU::PRED_SETE_INT);
922 case AMDGPU::PRED_SETE:
923 MO.setImm(AMDGPU::PRED_SETNE);
925 case AMDGPU::PRED_SETNE:
926 MO.setImm(AMDGPU::PRED_SETE);
932 MachineOperand &MO2 = Cond[2];
933 switch (MO2.getReg()) {
934 case AMDGPU::PRED_SEL_ZERO:
935 MO2.setReg(AMDGPU::PRED_SEL_ONE);
937 case AMDGPU::PRED_SEL_ONE:
938 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
946 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
947 std::vector<MachineOperand> &Pred) const {
948 return isPredicateSetter(MI.getOpcode());
952 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
953 ArrayRef<MachineOperand> Pred) const {
954 int PIdx = MI.findFirstPredOperandIdx();
956 if (MI.getOpcode() == AMDGPU::CF_ALU) {
957 MI.getOperand(8).setImm(0);
961 if (MI.getOpcode() == AMDGPU::DOT_4) {
962 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_X))
963 .setReg(Pred[2].getReg());
964 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Y))
965 .setReg(Pred[2].getReg());
966 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_Z))
967 .setReg(Pred[2].getReg());
968 MI.getOperand(getOperandIdx(MI, AMDGPU::OpName::pred_sel_W))
969 .setReg(Pred[2].getReg());
970 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
971 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
976 MachineOperand &PMO = MI.getOperand(PIdx);
977 PMO.setReg(Pred[2].getReg());
978 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
979 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
986 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
990 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
991 const MachineInstr &,
992 unsigned *PredCost) const {
998 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
999 unsigned Channel) const {
1000 assert(Channel == 0);
1004 bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1005 switch (MI.getOpcode()) {
1007 MachineBasicBlock *MBB = MI.getParent();
1009 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::addr);
1010 // addr is a custom operand with multiple MI operands, and only the
1011 // first MI operand is given a name.
1012 int RegOpIdx = OffsetOpIdx + 1;
1014 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::chan);
1015 if (isRegisterLoad(MI)) {
1017 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
1018 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1019 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1020 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1021 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1022 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1023 buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
1024 getIndirectAddrRegClass()->getRegister(Address));
1026 buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1029 } else if (isRegisterStore(MI)) {
1031 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::val);
1032 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1033 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1034 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1035 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1036 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
1037 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
1038 MI.getOperand(ValOpIdx).getReg());
1040 buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
1041 calculateIndirectAddress(RegIndex, Channel),
1051 case AMDGPU::R600_EXTRACT_ELT_V2:
1052 case AMDGPU::R600_EXTRACT_ELT_V4:
1053 buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1054 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1055 MI.getOperand(2).getReg(),
1056 RI.getHWRegChan(MI.getOperand(1).getReg()));
1058 case AMDGPU::R600_INSERT_ELT_V2:
1059 case AMDGPU::R600_INSERT_ELT_V4:
1060 buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1061 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1062 MI.getOperand(3).getReg(), // Offset
1063 RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
1066 MI.eraseFromParent();
1070 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1071 const MachineFunction &MF) const {
1072 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1073 const R600FrameLowering *TFL = ST.getFrameLowering();
1075 unsigned StackWidth = TFL->getStackWidth(MF);
1076 int End = getIndirectIndexEnd(MF);
1081 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1082 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1083 Reserved.set(SuperReg);
1084 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1085 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1091 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1092 return &AMDGPU::R600_TReg32_XRegClass;
1095 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1096 MachineBasicBlock::iterator I,
1097 unsigned ValueReg, unsigned Address,
1098 unsigned OffsetReg) const {
1099 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1102 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1103 MachineBasicBlock::iterator I,
1104 unsigned ValueReg, unsigned Address,
1106 unsigned AddrChan) const {
1109 default: llvm_unreachable("Invalid Channel");
1110 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1111 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1112 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1113 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1115 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1116 AMDGPU::AR_X, OffsetReg);
1117 setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
1119 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1121 .addReg(AMDGPU::AR_X,
1122 RegState::Implicit | RegState::Kill);
1123 setImmOperand(*Mov, AMDGPU::OpName::dst_rel, 1);
1127 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1128 MachineBasicBlock::iterator I,
1129 unsigned ValueReg, unsigned Address,
1130 unsigned OffsetReg) const {
1131 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1134 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1135 MachineBasicBlock::iterator I,
1136 unsigned ValueReg, unsigned Address,
1138 unsigned AddrChan) const {
1141 default: llvm_unreachable("Invalid Channel");
1142 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1143 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1144 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1145 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1147 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1150 setImmOperand(*MOVA, AMDGPU::OpName::write, 0);
1151 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1154 .addReg(AMDGPU::AR_X,
1155 RegState::Implicit | RegState::Kill);
1156 setImmOperand(*Mov, AMDGPU::OpName::src0_rel, 1);
1161 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1162 const MachineRegisterInfo &MRI = MF.getRegInfo();
1163 const MachineFrameInfo &MFI = MF.getFrameInfo();
1166 if (MFI.getNumObjects() == 0) {
1170 if (MRI.livein_empty()) {
1174 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
1175 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1176 LE = MRI.livein_end();
1178 unsigned Reg = LI->first;
1179 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
1180 !IndirectRC->contains(Reg))
1185 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1187 if (IndirectRC->getRegister(RegIndex) == Reg)
1190 Offset = std::max(Offset, (int)RegIndex);
1196 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1198 const MachineFrameInfo &MFI = MF.getFrameInfo();
1200 // Variable sized objects are not supported
1201 if (MFI.hasVarSizedObjects()) {
1205 if (MFI.getNumObjects() == 0) {
1209 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1210 const R600FrameLowering *TFL = ST.getFrameLowering();
1212 unsigned IgnoredFrameReg;
1213 Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
1215 return getIndirectIndexBegin(MF) + Offset;
1218 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1222 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1223 MachineBasicBlock::iterator I,
1227 unsigned Src1Reg) const {
1228 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1232 MIB.addImm(0) // $update_exec_mask
1233 .addImm(0); // $update_predicate
1235 MIB.addImm(1) // $write
1237 .addImm(0) // $dst_rel
1238 .addImm(0) // $dst_clamp
1239 .addReg(Src0Reg) // $src0
1240 .addImm(0) // $src0_neg
1241 .addImm(0) // $src0_rel
1242 .addImm(0) // $src0_abs
1243 .addImm(-1); // $src0_sel
1246 MIB.addReg(Src1Reg) // $src1
1247 .addImm(0) // $src1_neg
1248 .addImm(0) // $src1_rel
1249 .addImm(0) // $src1_abs
1250 .addImm(-1); // $src1_sel
1253 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1254 //scheduling to the backend, we can change the default to 0.
1255 MIB.addImm(1) // $last
1256 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1257 .addImm(0) // $literal
1258 .addImm(0); // $bank_swizzle
1263 #define OPERAND_CASE(Label) \
1265 static const unsigned Ops[] = \
1275 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1277 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1278 OPERAND_CASE(AMDGPU::OpName::update_pred)
1279 OPERAND_CASE(AMDGPU::OpName::write)
1280 OPERAND_CASE(AMDGPU::OpName::omod)
1281 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1282 OPERAND_CASE(AMDGPU::OpName::clamp)
1283 OPERAND_CASE(AMDGPU::OpName::src0)
1284 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1285 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1286 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1287 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1288 OPERAND_CASE(AMDGPU::OpName::src1)
1289 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1290 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1291 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1292 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1293 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1295 llvm_unreachable("Wrong Operand");
1301 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1302 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1304 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1306 if (ST.getGeneration() <= R600Subtarget::R700)
1307 Opcode = AMDGPU::DOT4_r600;
1309 Opcode = AMDGPU::DOT4_eg;
1310 MachineBasicBlock::iterator I = MI;
1311 MachineOperand &Src0 = MI->getOperand(
1312 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1313 MachineOperand &Src1 = MI->getOperand(
1314 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1315 MachineInstr *MIB = buildDefaultInstruction(
1316 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1317 static const unsigned Operands[14] = {
1318 AMDGPU::OpName::update_exec_mask,
1319 AMDGPU::OpName::update_pred,
1320 AMDGPU::OpName::write,
1321 AMDGPU::OpName::omod,
1322 AMDGPU::OpName::dst_rel,
1323 AMDGPU::OpName::clamp,
1324 AMDGPU::OpName::src0_neg,
1325 AMDGPU::OpName::src0_rel,
1326 AMDGPU::OpName::src0_abs,
1327 AMDGPU::OpName::src0_sel,
1328 AMDGPU::OpName::src1_neg,
1329 AMDGPU::OpName::src1_rel,
1330 AMDGPU::OpName::src1_abs,
1331 AMDGPU::OpName::src1_sel,
1334 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1335 getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1336 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1337 .setReg(MO.getReg());
1339 for (unsigned i = 0; i < 14; i++) {
1340 MachineOperand &MO = MI->getOperand(
1341 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1342 assert (MO.isImm());
1343 setImmOperand(*MIB, Operands[i], MO.getImm());
1345 MIB->getOperand(20).setImm(0);
1349 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1350 MachineBasicBlock::iterator I,
1352 uint64_t Imm) const {
1353 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1354 AMDGPU::ALU_LITERAL_X);
1355 setImmOperand(*MovImm, AMDGPU::OpName::literal, Imm);
1359 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1360 MachineBasicBlock::iterator I,
1361 unsigned DstReg, unsigned SrcReg) const {
1362 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1365 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1366 return getOperandIdx(MI.getOpcode(), Op);
1369 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1370 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1373 void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
1374 int64_t Imm) const {
1375 int Idx = getOperandIdx(MI, Op);
1376 assert(Idx != -1 && "Operand not supported for this instruction.");
1377 assert(MI.getOperand(Idx).isImm());
1378 MI.getOperand(Idx).setImm(Imm);
1381 //===----------------------------------------------------------------------===//
1382 // Instruction flag getters/setters
1383 //===----------------------------------------------------------------------===//
1385 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
1386 unsigned Flag) const {
1387 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1390 // If we pass something other than the default value of Flag to this
1391 // function, it means we are want to set a flag on an instruction
1392 // that uses native encoding.
1393 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1394 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1397 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::clamp);
1400 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::write);
1402 case MO_FLAG_NOT_LAST:
1404 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::last);
1409 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_neg);
1412 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_neg);
1415 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src2_neg);
1421 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1426 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src0_abs);
1429 FlagIndex = getOperandIdx(MI, AMDGPU::OpName::src1_abs);
1438 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1440 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1441 assert(FlagIndex != 0 &&
1442 "Instruction flags not supported for this instruction");
1445 MachineOperand &FlagOp = MI.getOperand(FlagIndex);
1446 assert(FlagOp.isImm());
1450 void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
1451 unsigned Flag) const {
1452 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1456 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1457 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1458 if (Flag == MO_FLAG_NOT_LAST) {
1459 clearFlag(MI, Operand, MO_FLAG_LAST);
1460 } else if (Flag == MO_FLAG_MASK) {
1461 clearFlag(MI, Operand, Flag);
1466 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1467 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1471 void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
1472 unsigned Flag) const {
1473 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1474 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1475 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1478 MachineOperand &FlagOp = getFlagOp(MI);
1479 unsigned InstFlags = FlagOp.getImm();
1480 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1481 FlagOp.setImm(InstFlags);