1 ; RUN: opt -S -mtriple=amdgcn-- -structurizecfg -si-annotate-control-flow < %s | FileCheck -check-prefix=OPT %s
2 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
4 ; OPT-LABEL: {{^}}define amdgpu_vs void @multi_else_break(
8 ; OPT: [[if:%[0-9]+]] = call { i1, i64 } @llvm.amdgcn.if(
9 ; OPT: [[if_exec:%[0-9]+]] = extractvalue { i1, i64 } [[if]], 1
13 ; Ensure two else.break calls, for both the inner and outer loops
15 ; OPT: call i64 @llvm.amdgcn.else.break(i64 [[if_exec]],
16 ; OPT-NEXT: call i64 @llvm.amdgcn.else.break(i64 [[if_exec]],
17 ; OPT-NEXT: call void @llvm.amdgcn.end.cf
21 ; GCN-LABEL: {{^}}multi_else_break:
23 ; GCN: [[OUTER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP.outer{{$}}
25 ; GCN: [[INNER_LOOP:BB[0-9]+_[0-9]+]]: ; %LOOP{{$}}
26 ; GCN: s_and_saveexec_b64 [[SAVE_BREAK:s\[[0-9]+:[0-9]+\]]], vcc
28 ; GCN: BB{{[0-9]+}}_{{[0-9]+}}: ; %Flow{{$}}
29 ; GCN-NEXT: ; in Loop: Header=[[INNER_LOOP]] Depth=2
31 ; Ensure extra or eliminated
32 ; GCN-NEXT: s_or_b64 exec, exec, [[SAVE_BREAK]]
33 ; GCN-NEXT: s_or_b64 [[OR_BREAK:s\[[0-9]+:[0-9]+\]]], [[SAVE_BREAK]], s{{\[[0-9]+:[0-9]+\]}}
34 ; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]]
35 ; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]]
37 ; GCN: ; BB#{{[0-9]+}}: ; %Flow1{{$}}
38 ; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1
40 ; Ensure copy is eliminated
41 ; GCN-NEXT: s_or_b64 exec, exec, [[OR_BREAK]]
42 ; GCN-NEXT: s_or_b64 [[OUTER_OR_BREAK:s\[[0-9]+:[0-9]+\]]], [[SAVE_BREAK]], s{{\[[0-9]+:[0-9]+\]}}
43 ; GCN-NEXT: s_andn2_b64 exec, exec, [[OUTER_OR_BREAK]]
44 ; GCN-NEXT: s_cbranch_execnz [[OUTER_LOOP]]
45 define amdgpu_vs void @multi_else_break(<4 x float> %vec, i32 %ub, i32 %cont) {
49 LOOP.outer: ; preds = %ENDIF, %main_body
50 %tmp43 = phi i32 [ 0, %main_body ], [ %tmp47, %ENDIF ]
53 LOOP: ; preds = %ENDIF, %LOOP.outer
54 %tmp45 = phi i32 [ %tmp43, %LOOP.outer ], [ %tmp47, %ENDIF ]
55 %tmp47 = add i32 %tmp45, 1
56 %tmp48 = icmp slt i32 %tmp45, %ub
57 br i1 %tmp48, label %ENDIF, label %IF
62 ENDIF: ; preds = %LOOP
63 %tmp51 = icmp eq i32 %tmp47, %cont
64 br i1 %tmp51, label %LOOP, label %LOOP.outer
67 ; OPT-LABEL: define void @multi_if_break_loop(
68 ; OPT: llvm.amdgcn.break
69 ; OPT: llvm.amdgcn.loop
70 ; OPT: llvm.amdgcn.if.break
71 ; OPT: llvm.amdgcn.if.break
72 ; OPT: llvm.amdgcn.end.cf
74 ; GCN-LABEL: {{^}}multi_if_break_loop:
75 ; GCN: s_mov_b64 [[BREAK_REG:s\[[0-9]+:[0-9]+\]]], 0{{$}}
77 ; GCN: [[LOOP:BB[0-9]+_[0-9]+]]: ; %bb1{{$}}
79 ; Uses a copy intsead of an or
80 ; GCN: s_mov_b64 [[COPY:s\[[0-9]+:[0-9]+\]]], [[BREAK_REG]]
81 ; GCN: s_or_b64 [[BREAK_REG]], exec, [[COPY]]
82 define void @multi_if_break_loop(i32 %arg) #0 {
84 %id = call i32 @llvm.amdgcn.workitem.id.x()
85 %tmp = sub i32 %id, %arg
89 %lsr.iv = phi i32 [ undef, %bb ], [ %lsr.iv.next, %case0 ], [ %lsr.iv.next, %case1 ]
90 %lsr.iv.next = add i32 %lsr.iv, 1
91 %cmp0 = icmp slt i32 %lsr.iv.next, 0
92 %load0 = load volatile i32, i32 addrspace(1)* undef, align 4
93 switch i32 %load0, label %bb9 [
99 %load1 = load volatile i32, i32 addrspace(1)* undef, align 4
100 %cmp1 = icmp slt i32 %tmp, %load1
101 br i1 %cmp1, label %bb1, label %bb9
104 %load2 = load volatile i32, i32 addrspace(1)* undef, align 4
105 %cmp2 = icmp slt i32 %tmp, %load2
106 br i1 %cmp2, label %bb1, label %bb9
112 declare i32 @llvm.amdgcn.workitem.id.x() #1
114 attributes #0 = { nounwind }
115 attributes #1 = { nounwind readnone }