1 //===-- AMDGPUAtomicOptimizer.cpp -----------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass optimizes atomic operations by using a single lane of a wavefront
12 /// to perform the atomic operation, thus reducing contention on that memory
15 //===----------------------------------------------------------------------===//
18 #include "AMDGPUSubtarget.h"
19 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
20 #include "llvm/CodeGen/TargetPassConfig.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/InstVisitor.h"
23 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
25 #define DEBUG_TYPE "amdgpu-atomic-optimizer"
37 DPP_ROW_BCAST15 = 0x142,
38 DPP_ROW_BCAST31 = 0x143
41 struct ReplacementInfo {
43 Instruction::BinaryOps Op;
48 class AMDGPUAtomicOptimizer : public FunctionPass,
49 public InstVisitor<AMDGPUAtomicOptimizer> {
51 SmallVector<ReplacementInfo, 8> ToReplace;
52 const LegacyDivergenceAnalysis *DA;
58 void optimizeAtomic(Instruction &I, Instruction::BinaryOps Op,
59 unsigned ValIdx, bool ValDivergent) const;
61 void setConvergent(CallInst *const CI) const;
66 AMDGPUAtomicOptimizer() : FunctionPass(ID) {}
68 bool runOnFunction(Function &F) override;
70 void getAnalysisUsage(AnalysisUsage &AU) const override {
71 AU.addPreserved<DominatorTreeWrapperPass>();
72 AU.addRequired<LegacyDivergenceAnalysis>();
73 AU.addRequired<TargetPassConfig>();
76 void visitAtomicRMWInst(AtomicRMWInst &I);
77 void visitIntrinsicInst(IntrinsicInst &I);
82 char AMDGPUAtomicOptimizer::ID = 0;
84 char &llvm::AMDGPUAtomicOptimizerID = AMDGPUAtomicOptimizer::ID;
86 bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) {
87 if (skipFunction(F)) {
91 DA = &getAnalysis<LegacyDivergenceAnalysis>();
92 DL = &F.getParent()->getDataLayout();
93 DominatorTreeWrapperPass *const DTW =
94 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
95 DT = DTW ? &DTW->getDomTree() : nullptr;
96 const TargetPassConfig &TPC = getAnalysis<TargetPassConfig>();
97 const TargetMachine &TM = TPC.getTM<TargetMachine>();
98 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
100 IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS;
104 const bool Changed = !ToReplace.empty();
106 for (ReplacementInfo &Info : ToReplace) {
107 optimizeAtomic(*Info.I, Info.Op, Info.ValIdx, Info.ValDivergent);
115 void AMDGPUAtomicOptimizer::visitAtomicRMWInst(AtomicRMWInst &I) {
116 // Early exit for unhandled address space atomic instructions.
117 switch (I.getPointerAddressSpace()) {
120 case AMDGPUAS::GLOBAL_ADDRESS:
121 case AMDGPUAS::LOCAL_ADDRESS:
125 Instruction::BinaryOps Op;
127 switch (I.getOperation()) {
130 case AtomicRMWInst::Add:
131 Op = Instruction::Add;
133 case AtomicRMWInst::Sub:
134 Op = Instruction::Sub;
138 const unsigned PtrIdx = 0;
139 const unsigned ValIdx = 1;
141 // If the pointer operand is divergent, then each lane is doing an atomic
142 // operation on a different address, and we cannot optimize that.
143 if (DA->isDivergent(I.getOperand(PtrIdx))) {
147 const bool ValDivergent = DA->isDivergent(I.getOperand(ValIdx));
149 // If the value operand is divergent, each lane is contributing a different
150 // value to the atomic calculation. We can only optimize divergent values if
151 // we have DPP available on our subtarget, and the atomic operation is 32
153 if (ValDivergent && (!HasDPP || (DL->getTypeSizeInBits(I.getType()) != 32))) {
157 // If we get here, we can optimize the atomic using a single wavefront-wide
158 // atomic operation to do the calculation for the entire wavefront, so
159 // remember the instruction so we can come back to it.
160 const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent};
162 ToReplace.push_back(Info);
165 void AMDGPUAtomicOptimizer::visitIntrinsicInst(IntrinsicInst &I) {
166 Instruction::BinaryOps Op;
168 switch (I.getIntrinsicID()) {
171 case Intrinsic::amdgcn_buffer_atomic_add:
172 case Intrinsic::amdgcn_struct_buffer_atomic_add:
173 case Intrinsic::amdgcn_raw_buffer_atomic_add:
174 Op = Instruction::Add;
176 case Intrinsic::amdgcn_buffer_atomic_sub:
177 case Intrinsic::amdgcn_struct_buffer_atomic_sub:
178 case Intrinsic::amdgcn_raw_buffer_atomic_sub:
179 Op = Instruction::Sub;
183 const unsigned ValIdx = 0;
185 const bool ValDivergent = DA->isDivergent(I.getOperand(ValIdx));
187 // If the value operand is divergent, each lane is contributing a different
188 // value to the atomic calculation. We can only optimize divergent values if
189 // we have DPP available on our subtarget, and the atomic operation is 32
191 if (ValDivergent && (!HasDPP || (DL->getTypeSizeInBits(I.getType()) != 32))) {
195 // If any of the other arguments to the intrinsic are divergent, we can't
196 // optimize the operation.
197 for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) {
198 if (DA->isDivergent(I.getOperand(Idx))) {
203 // If we get here, we can optimize the atomic using a single wavefront-wide
204 // atomic operation to do the calculation for the entire wavefront, so
205 // remember the instruction so we can come back to it.
206 const ReplacementInfo Info = {&I, Op, ValIdx, ValDivergent};
208 ToReplace.push_back(Info);
211 void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I,
212 Instruction::BinaryOps Op,
214 bool ValDivergent) const {
215 LLVMContext &Context = I.getContext();
217 // Start building just before the instruction.
220 // If we are in a pixel shader, because of how we have to mask out helper
221 // lane invocations, we need to record the entry and exit BB's.
222 BasicBlock *PixelEntryBB = nullptr;
223 BasicBlock *PixelExitBB = nullptr;
225 // If we're optimizing an atomic within a pixel shader, we need to wrap the
226 // entire atomic operation in a helper-lane check. We do not want any helper
227 // lanes that are around only for the purposes of derivatives to take part
228 // in any cross-lane communication, and we use a branch on whether the lane is
231 // Record I's original position as the entry block.
232 PixelEntryBB = I.getParent();
234 Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {});
235 Instruction *const NonHelperTerminator =
236 SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);
238 // Record I's new position as the exit block.
239 PixelExitBB = I.getParent();
241 I.moveBefore(NonHelperTerminator);
242 B.SetInsertPoint(&I);
245 Type *const Ty = I.getType();
246 const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
247 Type *const VecTy = VectorType::get(B.getInt32Ty(), 2);
249 // This is the value in the atomic operation we need to combine in order to
250 // reduce the number of atomic operations.
251 Value *const V = I.getOperand(ValIdx);
253 // We need to know how many lanes are active within the wavefront, and we do
254 // this by getting the exec register, which tells us all the lanes that are
256 MDNode *const RegName =
257 llvm::MDNode::get(Context, llvm::MDString::get(Context, "exec"));
258 Value *const Metadata = llvm::MetadataAsValue::get(Context, RegName);
259 CallInst *const Exec =
260 B.CreateIntrinsic(Intrinsic::read_register, {B.getInt64Ty()}, {Metadata});
263 // We need to know how many lanes are active within the wavefront that are
264 // below us. If we counted each lane linearly starting from 0, a lane is
265 // below us only if its associated index was less than ours. We do this by
266 // using the mbcnt intrinsic.
267 Value *const BitCast = B.CreateBitCast(Exec, VecTy);
268 Value *const ExtractLo = B.CreateExtractElement(BitCast, B.getInt32(0));
269 Value *const ExtractHi = B.CreateExtractElement(BitCast, B.getInt32(1));
270 CallInst *const PartialMbcnt = B.CreateIntrinsic(
271 Intrinsic::amdgcn_mbcnt_lo, {}, {ExtractLo, B.getInt32(0)});
272 CallInst *const Mbcnt = B.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {},
273 {ExtractHi, PartialMbcnt});
275 Value *const MbcntCast = B.CreateIntCast(Mbcnt, Ty, false);
277 Value *LaneOffset = nullptr;
278 Value *NewV = nullptr;
280 // If we have a divergent value in each lane, we need to combine the value
283 // First we need to set all inactive invocations to 0, so that they can
284 // correctly contribute to the final result.
285 CallInst *const SetInactive = B.CreateIntrinsic(
286 Intrinsic::amdgcn_set_inactive, Ty, {V, B.getIntN(TyBitWidth, 0)});
287 setConvergent(SetInactive);
290 const unsigned Iters = 6;
291 const unsigned DPPCtrl[Iters] = {DPP_ROW_SR1, DPP_ROW_SR2,
292 DPP_ROW_SR4, DPP_ROW_SR8,
293 DPP_ROW_BCAST15, DPP_ROW_BCAST31};
294 const unsigned RowMask[Iters] = {0xf, 0xf, 0xf, 0xf, 0xa, 0xc};
296 // This loop performs an inclusive scan across the wavefront, with all lanes
297 // active (by using the WWM intrinsic).
298 for (unsigned Idx = 0; Idx < Iters; Idx++) {
299 CallInst *const DPP = B.CreateIntrinsic(Intrinsic::amdgcn_mov_dpp, Ty,
300 {NewV, B.getInt32(DPPCtrl[Idx]),
301 B.getInt32(RowMask[Idx]),
302 B.getInt32(0xf), B.getFalse()});
304 Value *const WWM = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, DPP);
306 NewV = B.CreateBinOp(Op, NewV, WWM);
307 NewV = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, NewV);
310 // NewV has returned the inclusive scan of V, but for the lane offset we
311 // require an exclusive scan. We do this by shifting the values from the
312 // entire wavefront right by 1, and by setting the bound_ctrl (last argument
313 // to the intrinsic below) to true, we can guarantee that 0 will be shifted
314 // into the 0'th invocation.
315 CallInst *const DPP =
316 B.CreateIntrinsic(Intrinsic::amdgcn_mov_dpp, {Ty},
317 {NewV, B.getInt32(DPP_WF_SR1), B.getInt32(0xf),
318 B.getInt32(0xf), B.getTrue()});
320 LaneOffset = B.CreateIntrinsic(Intrinsic::amdgcn_wwm, Ty, DPP);
322 // Read the value from the last lane, which has accumlated the values of
323 // each active lane in the wavefront. This will be our new value with which
324 // we will provide to the atomic operation.
325 if (TyBitWidth == 64) {
326 Value *const ExtractLo = B.CreateTrunc(NewV, B.getInt32Ty());
327 Value *const ExtractHi =
328 B.CreateTrunc(B.CreateLShr(NewV, B.getInt64(32)), B.getInt32Ty());
329 CallInst *const ReadLaneLo = B.CreateIntrinsic(
330 Intrinsic::amdgcn_readlane, {}, {ExtractLo, B.getInt32(63)});
331 setConvergent(ReadLaneLo);
332 CallInst *const ReadLaneHi = B.CreateIntrinsic(
333 Intrinsic::amdgcn_readlane, {}, {ExtractHi, B.getInt32(63)});
334 setConvergent(ReadLaneHi);
335 Value *const PartialInsert = B.CreateInsertElement(
336 UndefValue::get(VecTy), ReadLaneLo, B.getInt32(0));
337 Value *const Insert =
338 B.CreateInsertElement(PartialInsert, ReadLaneHi, B.getInt32(1));
339 NewV = B.CreateBitCast(Insert, Ty);
340 } else if (TyBitWidth == 32) {
341 CallInst *const ReadLane = B.CreateIntrinsic(Intrinsic::amdgcn_readlane,
342 {}, {NewV, B.getInt32(63)});
343 setConvergent(ReadLane);
346 llvm_unreachable("Unhandled atomic bit width");
349 // Get the total number of active lanes we have by using popcount.
350 Instruction *const Ctpop = B.CreateUnaryIntrinsic(Intrinsic::ctpop, Exec);
351 Value *const CtpopCast = B.CreateIntCast(Ctpop, Ty, false);
353 // Calculate the new value we will be contributing to the atomic operation
354 // for the entire wavefront.
355 NewV = B.CreateMul(V, CtpopCast);
356 LaneOffset = B.CreateMul(V, MbcntCast);
359 // We only want a single lane to enter our new control flow, and we do this
360 // by checking if there are any active lanes below us. Only one lane will
361 // have 0 active lanes below us, so that will be the only one to progress.
362 Value *const Cond = B.CreateICmpEQ(MbcntCast, B.getIntN(TyBitWidth, 0));
364 // Store I's original basic block before we split the block.
365 BasicBlock *const EntryBB = I.getParent();
367 // We need to introduce some new control flow to force a single lane to be
368 // active. We do this by splitting I's basic block at I, and introducing the
369 // new block such that:
370 // entry --> single_lane -\
371 // \------------------> exit
372 Instruction *const SingleLaneTerminator =
373 SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);
375 // Move the IR builder into single_lane next.
376 B.SetInsertPoint(SingleLaneTerminator);
378 // Clone the original atomic operation into single lane, replacing the
379 // original value with our newly created one.
380 Instruction *const NewI = I.clone();
382 NewI->setOperand(ValIdx, NewV);
384 // Move the IR builder into exit next, and start inserting just before the
385 // original instruction.
386 B.SetInsertPoint(&I);
388 // Create a PHI node to get our new atomic result into the exit block.
389 PHINode *const PHI = B.CreatePHI(Ty, 2);
390 PHI->addIncoming(UndefValue::get(Ty), EntryBB);
391 PHI->addIncoming(NewI, SingleLaneTerminator->getParent());
393 // We need to broadcast the value who was the lowest active lane (the first
394 // lane) to all other lanes in the wavefront. We use an intrinsic for this,
395 // but have to handle 64-bit broadcasts with two calls to this intrinsic.
396 Value *BroadcastI = nullptr;
398 if (TyBitWidth == 64) {
399 Value *const ExtractLo = B.CreateTrunc(PHI, B.getInt32Ty());
400 Value *const ExtractHi =
401 B.CreateTrunc(B.CreateLShr(PHI, B.getInt64(32)), B.getInt32Ty());
402 CallInst *const ReadFirstLaneLo =
403 B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractLo);
404 setConvergent(ReadFirstLaneLo);
405 CallInst *const ReadFirstLaneHi =
406 B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, ExtractHi);
407 setConvergent(ReadFirstLaneHi);
408 Value *const PartialInsert = B.CreateInsertElement(
409 UndefValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0));
410 Value *const Insert =
411 B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1));
412 BroadcastI = B.CreateBitCast(Insert, Ty);
413 } else if (TyBitWidth == 32) {
414 CallInst *const ReadFirstLane =
415 B.CreateIntrinsic(Intrinsic::amdgcn_readfirstlane, {}, PHI);
416 setConvergent(ReadFirstLane);
417 BroadcastI = ReadFirstLane;
419 llvm_unreachable("Unhandled atomic bit width");
422 // Now that we have the result of our single atomic operation, we need to
423 // get our individual lane's slice into the result. We use the lane offset we
424 // previously calculated combined with the atomic result value we got from the
425 // first lane, to get our lane's index into the atomic result.
426 Value *const Result = B.CreateBinOp(Op, BroadcastI, LaneOffset);
429 // Need a final PHI to reconverge to above the helper lane branch mask.
430 B.SetInsertPoint(PixelExitBB->getFirstNonPHI());
432 PHINode *const PHI = B.CreatePHI(Ty, 2);
433 PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB);
434 PHI->addIncoming(Result, I.getParent());
435 I.replaceAllUsesWith(PHI);
437 // Replace the original atomic instruction with the new one.
438 I.replaceAllUsesWith(Result);
441 // And delete the original.
445 void AMDGPUAtomicOptimizer::setConvergent(CallInst *const CI) const {
446 CI->addAttribute(AttributeList::FunctionIndex, Attribute::Convergent);
449 INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE,
450 "AMDGPU atomic optimizations", false, false)
451 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
452 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
453 INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE,
454 "AMDGPU atomic optimizations", false, false)
456 FunctionPass *llvm::createAMDGPUAtomicOptimizerPass() {
457 return new AMDGPUAtomicOptimizer();