1 // Test target codegen - host bc file has to be created first.
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
3 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
6 // RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
7 // expected-no-diagnostics
11 // Check that the execution mode of all 2 target regions is set to Generic Mode.
12 // CHECK-DAG: {{@__omp_offloading_.+l27}}_exec_mode = weak constant i8 1
13 // CHECK-DAG: {{@__omp_offloading_.+l32}}_exec_mode = weak constant i8 1
14 // CHECK-DAG: {{@__omp_offloading_.+l37}}_exec_mode = weak constant i8 0
22 #pragma omp target teams if(0)
27 #pragma omp target teams if(1)
32 #pragma omp target teams if(n>40)
37 #pragma omp target teams
50 a += ftemplate<char>(n);
55 // CHECK-NOT: define {{.*}}void {{@__omp_offloading_.+template.+l22}}_worker()
62 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l27}}_worker()
63 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
64 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
65 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
66 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
67 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
69 // CHECK: [[AWAIT_WORK]]
70 // CHECK: call void @llvm.nvvm.barrier0()
71 // CHECK: [[KPR:%.+]] = call i1 @__kmpc_kernel_parallel(i8** [[OMP_WORK_FN]], i16 1)
72 // CHECK: [[KPRB:%.+]] = zext i1 [[KPR]] to i8
73 // store i8 [[KPRB]], i8* [[OMP_EXEC_STATUS]], align 1
74 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
75 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
76 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
78 // CHECK: [[SEL_WORKERS]]
79 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]]
80 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
81 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
83 // CHECK: [[EXEC_PARALLEL]]
84 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
86 // CHECK: [[TERM_PARALLEL]]
87 // CHECK: call void @__kmpc_kernel_end_parallel()
88 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
90 // CHECK: [[BAR_PARALLEL]]
91 // CHECK: call void @llvm.nvvm.barrier0()
92 // CHECK: br label {{%?}}[[AWAIT_WORK]]
97 // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+template.+l27]](i[[SZ:32|64]] [[A:%[^)]+]])
98 // CHECK: store i[[SZ]] [[A]], i[[SZ]]* [[A_ADDR:%.+]], align
99 // CHECK: [[CONV:%.+]] = bitcast i[[SZ]]* [[A_ADDR]] to i8*
101 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
102 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
103 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
104 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
105 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
106 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
109 // CHECK: {{call|invoke}} void [[T1]]_worker()
110 // CHECK: br label {{%?}}[[EXIT:.+]]
112 // CHECK: [[CHECK_MASTER]]
113 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
114 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
115 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
116 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
117 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
120 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
121 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
122 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
123 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
125 // CHECK-NOT: kmpc_fork_teams
126 // CHECK: [[A_VAL:%.+]] = load i8, i8* [[CONV]], align
127 // CHECK: [[ACP:%.+]] = bitcast i[[SZ]]* [[AC:%.+]] to i8*
128 // CHECK: store i8 [[A_VAL]], i8* [[ACP]], align
129 // CHECK: [[ACV:%.+]] = load i[[SZ]], i[[SZ]]* [[AC]], align
130 // CHECK: store i[[SZ]] [[ACV]], i[[SZ]]* [[A_ADDR_T:%.+]], align
131 // CHECK: [[CONV2:%.+]] = bitcast i[[SZ]]* [[A_ADDR_T]] to i8*
132 // CHECK: store i8 49, i8* [[CONV2]], align
133 // CHECK: br label {{%?}}[[TERMINATE:.+]]
135 // CHECK: [[TERMINATE]]
136 // CHECK: call void @__kmpc_kernel_deinit(
137 // CHECK: call void @llvm.nvvm.barrier0()
138 // CHECK: br label {{%?}}[[EXIT]]
148 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l32}}_worker()
149 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
150 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
151 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
152 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
153 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
155 // CHECK: [[AWAIT_WORK]]
156 // CHECK: call void @llvm.nvvm.barrier0()
157 // CHECK: [[KPR:%.+]] = call i1 @__kmpc_kernel_parallel(i8** [[OMP_WORK_FN]], i16 1)
158 // CHECK: [[KPRB:%.+]] = zext i1 [[KPR]] to i8
159 // store i8 [[KPRB]], i8* [[OMP_EXEC_STATUS]], align 1
160 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
161 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
162 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
164 // CHECK: [[SEL_WORKERS]]
165 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]]
166 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
167 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
169 // CHECK: [[EXEC_PARALLEL]]
170 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
172 // CHECK: [[TERM_PARALLEL]]
173 // CHECK: call void @__kmpc_kernel_end_parallel()
174 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
176 // CHECK: [[BAR_PARALLEL]]
177 // CHECK: call void @llvm.nvvm.barrier0()
178 // CHECK: br label {{%?}}[[AWAIT_WORK]]
183 // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+template.+l32]](i[[SZ:32|64]] [[AA:%[^)]+]])
184 // CHECK: store i[[SZ]] [[AA]], i[[SZ]]* [[AA_ADDR:%.+]], align
185 // CHECK: [[CONV:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
187 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
188 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
189 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
190 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
191 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
192 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
195 // CHECK: {{call|invoke}} void [[T2]]_worker()
196 // CHECK: br label {{%?}}[[EXIT:.+]]
198 // CHECK: [[CHECK_MASTER]]
199 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
200 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
201 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
202 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
203 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
206 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
207 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
208 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
209 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
211 // CHECK-NOT: kmpc_fork_teams
212 // CHECK: [[AA_VAL:%.+]] = load i16, i16* [[CONV]], align
213 // CHECK: [[ACP:%.+]] = bitcast i[[SZ]]* [[AC:%.+]] to i16*
214 // CHECK: store i16 [[AA_VAL]], i16* [[ACP]], align
215 // CHECK: [[ACV:%.+]] = load i[[SZ]], i[[SZ]]* [[AC]], align
216 // CHECK: store i[[SZ]] [[ACV]], i[[SZ]]* [[AA_ADDR_T:%.+]], align
217 // CHECK: [[CONV2:%.+]] = bitcast i[[SZ]]* [[AA_ADDR_T]] to i16*
218 // CHECK: store i16 1, i16* [[CONV2]], align
219 // CHECK: br label {{%?}}[[TERMINATE:.+]]
221 // CHECK: [[TERMINATE]]
222 // CHECK: call void @__kmpc_kernel_deinit(
223 // CHECK: call void @llvm.nvvm.barrier0()
224 // CHECK: br label {{%?}}[[EXIT]]
229 // CHECK: define weak void @__omp_offloading_{{.*}}ftemplate{{.*}}_l37(
230 // CHECK: call void @__kmpc_spmd_kernel_init(
231 // CHECK: call void @__kmpc_data_sharing_init_stack_spmd
232 // CHECK: call i8* @__kmpc_data_sharing_push_stack(
233 // CHECK-NOT: call void @__kmpc_serialized_parallel(
234 // CHECK: call void [[L0:@.+]](i32* %{{.+}}, i32* %{{.+}}, i16* %{{.*}})
235 // CHECK-NOT: call void @__kmpc_end_serialized_parallel(
236 // CHECK: call void @__kmpc_data_sharing_pop_stack(
237 // CHECK: call void @__kmpc_spmd_kernel_deinit()
240 // CHECK: define internal void [[L0]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i16* dereferenceable
241 // CHECK: call void @__kmpc_serialized_parallel(
242 // CHECK: call void [[L1:@.+]](i32* %{{.+}}, i32* %{{.+}}, i16* %{{.+}})
243 // CHECK: call void @__kmpc_end_serialized_parallel(
246 // CHECK: define internal void [[L1]](i32* noalias %{{.+}}, i32* noalias %{{.+}}, i16* dereferenceable
247 // CHECK: store i16 1, i16* %