1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16\
2 // RUN: -fallow-half-arguments-and-returns -S -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -mem2reg \
6 // REQUIRES: aarch64-registered-target
10 // CHECK-LABEL: test_vabsh_f16
11 // CHECK: [[ABS:%.*]] = call half @llvm.fabs.f16(half %a)
12 // CHECK: ret half [[ABS]]
13 float16_t test_vabsh_f16(float16_t a) {
17 // CHECK-LABEL: test_vceqzh_f16
18 // CHECK: [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
19 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
20 // CHECK: ret i16 [[TMP2]]
21 uint16_t test_vceqzh_f16(float16_t a) {
25 // CHECK-LABEL: test_vcgezh_f16
26 // CHECK: [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
27 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
28 // CHECK: ret i16 [[TMP2]]
29 uint16_t test_vcgezh_f16(float16_t a) {
33 // CHECK-LABEL: test_vcgtzh_f16
34 // CHECK: [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
35 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
36 // CHECK: ret i16 [[TMP2]]
37 uint16_t test_vcgtzh_f16(float16_t a) {
41 // CHECK-LABEL: test_vclezh_f16
42 // CHECK: [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
43 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
44 // CHECK: ret i16 [[TMP2]]
45 uint16_t test_vclezh_f16(float16_t a) {
49 // CHECK-LABEL: test_vcltzh_f16
50 // CHECK: [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
51 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
52 // CHECK: ret i16 [[TMP2]]
53 uint16_t test_vcltzh_f16(float16_t a) {
57 // CHECK-LABEL: test_vcvth_f16_s16
58 // CHECK: [[VCVT:%.*]] = sitofp i16 %a to half
59 // CHECK: ret half [[VCVT]]
60 float16_t test_vcvth_f16_s16 (int16_t a) {
61 return vcvth_f16_s16(a);
64 // CHECK-LABEL: test_vcvth_f16_s32
65 // CHECK: [[VCVT:%.*]] = sitofp i32 %a to half
66 // CHECK: ret half [[VCVT]]
67 float16_t test_vcvth_f16_s32 (int32_t a) {
68 return vcvth_f16_s32(a);
71 // CHECK-LABEL: test_vcvth_f16_s64
72 // CHECK: [[VCVT:%.*]] = sitofp i64 %a to half
73 // CHECK: ret half [[VCVT]]
74 float16_t test_vcvth_f16_s64 (int64_t a) {
75 return vcvth_f16_s64(a);
78 // CHECK-LABEL: test_vcvth_f16_u16
79 // CHECK: [[VCVT:%.*]] = uitofp i16 %a to half
80 // CHECK: ret half [[VCVT]]
81 float16_t test_vcvth_f16_u16 (uint16_t a) {
82 return vcvth_f16_u16(a);
85 // CHECK-LABEL: test_vcvth_f16_u32
86 // CHECK: [[VCVT:%.*]] = uitofp i32 %a to half
87 // CHECK: ret half [[VCVT]]
88 float16_t test_vcvth_f16_u32 (uint32_t a) {
89 return vcvth_f16_u32(a);
92 // CHECK-LABEL: test_vcvth_f16_u64
93 // CHECK: [[VCVT:%.*]] = uitofp i64 %a to half
94 // CHECK: ret half [[VCVT]]
95 float16_t test_vcvth_f16_u64 (uint64_t a) {
96 return vcvth_f16_u64(a);
99 // CHECK-LABEL: test_vcvth_s16_f16
100 // CHECK: [[VCVT:%.*]] = fptosi half %a to i16
101 // CHECK: ret i16 [[VCVT]]
102 int16_t test_vcvth_s16_f16 (float16_t a) {
103 return vcvth_s16_f16(a);
106 // CHECK-LABEL: test_vcvth_s32_f16
107 // CHECK: [[VCVT:%.*]] = fptosi half %a to i32
108 // CHECK: ret i32 [[VCVT]]
109 int32_t test_vcvth_s32_f16 (float16_t a) {
110 return vcvth_s32_f16(a);
113 // CHECK-LABEL: test_vcvth_s64_f16
114 // CHECK: [[VCVT:%.*]] = fptosi half %a to i64
115 // CHECK: ret i64 [[VCVT]]
116 int64_t test_vcvth_s64_f16 (float16_t a) {
117 return vcvth_s64_f16(a);
120 // CHECK-LABEL: test_vcvth_u16_f16
121 // CHECK: [[VCVT:%.*]] = fptoui half %a to i16
122 // CHECK: ret i16 [[VCVT]]
123 uint16_t test_vcvth_u16_f16 (float16_t a) {
124 return vcvth_u16_f16(a);
127 // CHECK-LABEL: test_vcvth_u32_f16
128 // CHECK: [[VCVT:%.*]] = fptoui half %a to i32
129 // CHECK: ret i32 [[VCVT]]
130 uint32_t test_vcvth_u32_f16 (float16_t a) {
131 return vcvth_u32_f16(a);
134 // CHECK-LABEL: test_vcvth_u64_f16
135 // CHECK: [[VCVT:%.*]] = fptoui half %a to i64
136 // CHECK: ret i64 [[VCVT]]
137 uint64_t test_vcvth_u64_f16 (float16_t a) {
138 return vcvth_u64_f16(a);
141 // CHECK-LABEL: test_vcvtah_s16_f16
142 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
143 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
144 // CHECK: ret i16 [[RET]]
145 int16_t test_vcvtah_s16_f16 (float16_t a) {
146 return vcvtah_s16_f16(a);
149 // CHECK-LABEL: test_vcvtah_s32_f16
150 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f16(half %a)
151 // CHECK: ret i32 [[VCVT]]
152 int32_t test_vcvtah_s32_f16 (float16_t a) {
153 return vcvtah_s32_f16(a);
156 // CHECK-LABEL: test_vcvtah_s64_f16
157 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtas.i64.f16(half %a)
158 // CHECK: ret i64 [[VCVT]]
159 int64_t test_vcvtah_s64_f16 (float16_t a) {
160 return vcvtah_s64_f16(a);
163 // CHECK-LABEL: test_vcvtah_u16_f16
164 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
165 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
166 // CHECK: ret i16 [[RET]]
167 uint16_t test_vcvtah_u16_f16 (float16_t a) {
168 return vcvtah_u16_f16(a);
171 // CHECK-LABEL: test_vcvtah_u32_f16
172 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f16(half %a)
173 // CHECK: ret i32 [[VCVT]]
174 uint32_t test_vcvtah_u32_f16 (float16_t a) {
175 return vcvtah_u32_f16(a);
178 // CHECK-LABEL: test_vcvtah_u64_f16
179 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtau.i64.f16(half %a)
180 // CHECK: ret i64 [[VCVT]]
181 uint64_t test_vcvtah_u64_f16 (float16_t a) {
182 return vcvtah_u64_f16(a);
185 // CHECK-LABEL: test_vcvtmh_s16_f16
186 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
187 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
188 // CHECK: ret i16 [[RET]]
189 int16_t test_vcvtmh_s16_f16 (float16_t a) {
190 return vcvtmh_s16_f16(a);
193 // CHECK-LABEL: test_vcvtmh_s32_f16
194 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f16(half %a)
195 // CHECK: ret i32 [[VCVT]]
196 int32_t test_vcvtmh_s32_f16 (float16_t a) {
197 return vcvtmh_s32_f16(a);
200 // CHECK-LABEL: test_vcvtmh_s64_f16
201 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtms.i64.f16(half %a)
202 // CHECK: ret i64 [[VCVT]]
203 int64_t test_vcvtmh_s64_f16 (float16_t a) {
204 return vcvtmh_s64_f16(a);
207 // CHECK-LABEL: test_vcvtmh_u16_f16
208 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
209 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
210 // CHECK: ret i16 [[RET]]
211 uint16_t test_vcvtmh_u16_f16 (float16_t a) {
212 return vcvtmh_u16_f16(a);
215 // CHECK-LABEL: test_vcvtmh_u32_f16
216 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f16(half %a)
217 // CHECK: ret i32 [[VCVT]]
218 uint32_t test_vcvtmh_u32_f16 (float16_t a) {
219 return vcvtmh_u32_f16(a);
222 // CHECK-LABEL: test_vcvtmh_u64_f16
223 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtmu.i64.f16(half %a)
224 // CHECK: ret i64 [[VCVT]]
225 uint64_t test_vcvtmh_u64_f16 (float16_t a) {
226 return vcvtmh_u64_f16(a);
229 // CHECK-LABEL: test_vcvtnh_s16_f16
230 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
231 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
232 // CHECK: ret i16 [[RET]]
233 int16_t test_vcvtnh_s16_f16 (float16_t a) {
234 return vcvtnh_s16_f16(a);
237 // CHECK-LABEL: test_vcvtnh_s32_f16
238 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f16(half %a)
239 // CHECK: ret i32 [[VCVT]]
240 int32_t test_vcvtnh_s32_f16 (float16_t a) {
241 return vcvtnh_s32_f16(a);
244 // CHECK-LABEL: test_vcvtnh_s64_f16
245 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtns.i64.f16(half %a)
246 // CHECK: ret i64 [[VCVT]]
247 int64_t test_vcvtnh_s64_f16 (float16_t a) {
248 return vcvtnh_s64_f16(a);
251 // CHECK-LABEL: test_vcvtnh_u16_f16
252 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
253 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
254 // CHECK: ret i16 [[RET]]
255 uint16_t test_vcvtnh_u16_f16 (float16_t a) {
256 return vcvtnh_u16_f16(a);
259 // CHECK-LABEL: test_vcvtnh_u32_f16
260 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f16(half %a)
261 // CHECK: ret i32 [[VCVT]]
262 uint32_t test_vcvtnh_u32_f16 (float16_t a) {
263 return vcvtnh_u32_f16(a);
266 // CHECK-LABEL: test_vcvtnh_u64_f16
267 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtnu.i64.f16(half %a)
268 // CHECK: ret i64 [[VCVT]]
269 uint64_t test_vcvtnh_u64_f16 (float16_t a) {
270 return vcvtnh_u64_f16(a);
273 // CHECK-LABEL: test_vcvtph_s16_f16
274 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
275 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
276 // CHECK: ret i16 [[RET]]
277 int16_t test_vcvtph_s16_f16 (float16_t a) {
278 return vcvtph_s16_f16(a);
281 // CHECK-LABEL: test_vcvtph_s32_f16
282 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f16(half %a)
283 // CHECK: ret i32 [[VCVT]]
284 int32_t test_vcvtph_s32_f16 (float16_t a) {
285 return vcvtph_s32_f16(a);
288 // CHECK-LABEL: test_vcvtph_s64_f16
289 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtps.i64.f16(half %a)
290 // CHECK: ret i64 [[VCVT]]
291 int64_t test_vcvtph_s64_f16 (float16_t a) {
292 return vcvtph_s64_f16(a);
295 // CHECK-LABEL: test_vcvtph_u16_f16
296 // CHECK: [[FCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
297 // CHECK: [[RET:%.*]] = trunc i32 [[FCVT]] to i16
298 // CHECK: ret i16 [[RET]]
299 uint16_t test_vcvtph_u16_f16 (float16_t a) {
300 return vcvtph_u16_f16(a);
303 // CHECK-LABEL: test_vcvtph_u32_f16
304 // CHECK: [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half %a)
305 // CHECK: ret i32 [[VCVT]]
306 uint32_t test_vcvtph_u32_f16 (float16_t a) {
307 return vcvtph_u32_f16(a);
310 // CHECK-LABEL: test_vcvtph_u64_f16
311 // CHECK: [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half %a)
312 // CHECK: ret i64 [[VCVT]]
313 uint64_t test_vcvtph_u64_f16 (float16_t a) {
314 return vcvtph_u64_f16(a);
317 // CHECK-LABEL: test_vnegh_f16
318 // CHECK: [[NEG:%.*]] = fsub half 0xH8000, %a
319 // CHECK: ret half [[NEG]]
320 float16_t test_vnegh_f16(float16_t a) {
324 // CHECK-LABEL: test_vrecpeh_f16
325 // CHECK: [[VREC:%.*]] = call half @llvm.aarch64.neon.frecpe.f16(half %a)
326 // CHECK: ret half [[VREC]]
327 float16_t test_vrecpeh_f16(float16_t a) {
328 return vrecpeh_f16(a);
331 // CHECK-LABEL: test_vrecpxh_f16
332 // CHECK: [[VREC:%.*]] = call half @llvm.aarch64.neon.frecpx.f16(half %a)
333 // CHECK: ret half [[VREC]]
334 float16_t test_vrecpxh_f16(float16_t a) {
335 return vrecpxh_f16(a);
338 // CHECK-LABEL: test_vrndh_f16
339 // CHECK: [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
340 // CHECK: ret half [[RND]]
341 float16_t test_vrndh_f16(float16_t a) {
345 // CHECK-LABEL: test_vrndah_f16
346 // CHECK: [[RND:%.*]] = call half @llvm.round.f16(half %a)
347 // CHECK: ret half [[RND]]
348 float16_t test_vrndah_f16(float16_t a) {
349 return vrndah_f16(a);
352 // CHECK-LABEL: test_vrndih_f16
353 // CHECK: [[RND:%.*]] = call half @llvm.nearbyint.f16(half %a)
354 // CHECK: ret half [[RND]]
355 float16_t test_vrndih_f16(float16_t a) {
356 return vrndih_f16(a);
359 // CHECK-LABEL: test_vrndmh_f16
360 // CHECK: [[RND:%.*]] = call half @llvm.floor.f16(half %a)
361 // CHECK: ret half [[RND]]
362 float16_t test_vrndmh_f16(float16_t a) {
363 return vrndmh_f16(a);
366 // CHECK-LABEL: test_vrndnh_f16
367 // CHECK: [[RND:%.*]] = call half @llvm.aarch64.neon.frintn.f16(half %a)
368 // CHECK: ret half [[RND]]
369 float16_t test_vrndnh_f16(float16_t a) {
370 return vrndnh_f16(a);
373 // CHECK-LABEL: test_vrndph_f16
374 // CHECK: [[RND:%.*]] = call half @llvm.ceil.f16(half %a)
375 // CHECK: ret half [[RND]]
376 float16_t test_vrndph_f16(float16_t a) {
377 return vrndph_f16(a);
380 // CHECK-LABEL: test_vrndxh_f16
381 // CHECK: [[RND:%.*]] = call half @llvm.rint.f16(half %a)
382 // CHECK: ret half [[RND]]
383 float16_t test_vrndxh_f16(float16_t a) {
384 return vrndxh_f16(a);
387 // CHECK-LABEL: test_vrsqrteh_f16
388 // CHECK: [[RND:%.*]] = call half @llvm.aarch64.neon.frsqrte.f16(half %a)
389 // CHECK: ret half [[RND]]
390 float16_t test_vrsqrteh_f16(float16_t a) {
391 return vrsqrteh_f16(a);
394 // CHECK-LABEL: test_vsqrth_f16
395 // CHECK: [[SQR:%.*]] = call half @llvm.sqrt.f16(half %a)
396 // CHECK: ret half [[SQR]]
397 float16_t test_vsqrth_f16(float16_t a) {
398 return vsqrth_f16(a);
401 // CHECK-LABEL: test_vaddh_f16
402 // CHECK: [[ADD:%.*]] = fadd half %a, %b
403 // CHECK: ret half [[ADD]]
404 float16_t test_vaddh_f16(float16_t a, float16_t b) {
405 return vaddh_f16(a, b);
408 // CHECK-LABEL: test_vabdh_f16
409 // CHECK: [[ABD:%.*]] = call half @llvm.aarch64.sisd.fabd.f16(half %a, half %b)
410 // CHECK: ret half [[ABD]]
411 float16_t test_vabdh_f16(float16_t a, float16_t b) {
412 return vabdh_f16(a, b);
415 // CHECK-LABEL: test_vcageh_f16
416 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f16(half %a, half %b)
417 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
418 // CHECK: ret i16 [[RET]]
419 uint16_t test_vcageh_f16(float16_t a, float16_t b) {
420 return vcageh_f16(a, b);
423 // CHECK-LABEL: test_vcagth_f16
424 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f16(half %a, half %b)
425 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
426 // CHECK: ret i16 [[RET]]
427 uint16_t test_vcagth_f16(float16_t a, float16_t b) {
428 return vcagth_f16(a, b);
431 // CHECK-LABEL: test_vcaleh_f16
432 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facge.i32.f16(half %b, half %a)
433 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
434 // CHECK: ret i16 [[RET]]
435 uint16_t test_vcaleh_f16(float16_t a, float16_t b) {
436 return vcaleh_f16(a, b);
439 // CHECK-LABEL: test_vcalth_f16
440 // CHECK: [[FACG:%.*]] = call i32 @llvm.aarch64.neon.facgt.i32.f16(half %b, half %a)
441 // CHECK: [[RET:%.*]] = trunc i32 [[FACG]] to i16
442 // CHECK: ret i16 [[RET]]
443 uint16_t test_vcalth_f16(float16_t a, float16_t b) {
444 return vcalth_f16(a, b);
447 // CHECK-LABEL: test_vceqh_f16
448 // CHECK: [[TMP1:%.*]] = fcmp oeq half %a, %b
449 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
450 // CHECK: ret i16 [[TMP2]]
451 uint16_t test_vceqh_f16(float16_t a, float16_t b) {
452 return vceqh_f16(a, b);
455 // CHECK-LABEL: test_vcgeh_f16
456 // CHECK: [[TMP1:%.*]] = fcmp oge half %a, %b
457 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
458 // CHECK: ret i16 [[TMP2]]
459 uint16_t test_vcgeh_f16(float16_t a, float16_t b) {
460 return vcgeh_f16(a, b);
463 // CHECK-LABEL: test_vcgth_f16
464 //CHECK: [[TMP1:%.*]] = fcmp ogt half %a, %b
465 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
466 // CHECK: ret i16 [[TMP2]]
467 uint16_t test_vcgth_f16(float16_t a, float16_t b) {
468 return vcgth_f16(a, b);
471 // CHECK-LABEL: test_vcleh_f16
472 // CHECK: [[TMP1:%.*]] = fcmp ole half %a, %b
473 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
474 // CHECK: ret i16 [[TMP2]]
475 uint16_t test_vcleh_f16(float16_t a, float16_t b) {
476 return vcleh_f16(a, b);
479 // CHECK-LABEL: test_vclth_f16
480 // CHECK: [[TMP1:%.*]] = fcmp olt half %a, %b
481 // CHECK: [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
482 // CHECK: ret i16 [[TMP2]]
483 uint16_t test_vclth_f16(float16_t a, float16_t b) {
484 return vclth_f16(a, b);
487 // CHECK-LABEL: test_vcvth_n_f16_s16
488 // CHECK: [[SEXT:%.*]] = sext i16 %a to i32
489 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 [[SEXT]], i32 1)
490 // CHECK: ret half [[CVT]]
491 float16_t test_vcvth_n_f16_s16(int16_t a) {
492 return vcvth_n_f16_s16(a, 1);
495 // CHECK-LABEL: test_vcvth_n_f16_s32
496 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %a, i32 1)
497 // CHECK: ret half [[CVT]]
498 float16_t test_vcvth_n_f16_s32(int32_t a) {
499 return vcvth_n_f16_s32(a, 1);
502 // CHECK-LABEL: test_vcvth_n_f16_s64
503 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i64(i64 %a, i32 1)
504 // CHECK: ret half [[CVT]]
505 float16_t test_vcvth_n_f16_s64(int64_t a) {
506 return vcvth_n_f16_s64(a, 1);
509 // CHECK-LABEL: test_vcvth_n_s16_f16
510 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1)
511 // CHECK: [[RET:%.*]] = trunc i32 [[CVT]] to i16
512 // CHECK: ret i16 [[RET]]
513 int16_t test_vcvth_n_s16_f16(float16_t a) {
514 return vcvth_n_s16_f16(a, 1);
517 // CHECK-LABEL: test_vcvth_n_s32_f16
518 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxs.i32.f16(half %a, i32 1)
519 // CHECK: ret i32 [[CVT]]
520 int32_t test_vcvth_n_s32_f16(float16_t a) {
521 return vcvth_n_s32_f16(a, 1);
524 // CHECK-LABEL: test_vcvth_n_s64_f16
525 // CHECK: [[CVT:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxs.i64.f16(half %a, i32 1)
526 // CHECK: ret i64 [[CVT]]
527 int64_t test_vcvth_n_s64_f16(float16_t a) {
528 return vcvth_n_s64_f16(a, 1);
531 // CHECK-LABEL: test_vcvth_n_f16_u16
532 // CHECK: [[SEXT:%.*]] = zext i16 %a to i32
533 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 [[SEXT]], i32 1)
534 // CHECK: ret half [[CVT]]
535 float16_t test_vcvth_n_f16_u16(int16_t a) {
536 return vcvth_n_f16_u16(a, 1);
539 // CHECK-LABEL: test_vcvth_n_f16_u32
540 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %a, i32 1)
541 // CHECK: ret half [[CVT]]
542 float16_t test_vcvth_n_f16_u32(int32_t a) {
543 return vcvth_n_f16_u32(a, 1);
546 // CHECK-LABEL: test_vcvth_n_f16_u64
547 // CHECK: [[CVT:%.*]] = call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i64(i64 %a, i32 1)
548 // CHECK: ret half [[CVT]]
549 float16_t test_vcvth_n_f16_u64(int64_t a) {
550 return vcvth_n_f16_u64(a, 1);
553 // CHECK-LABEL: test_vcvth_n_u16_f16
554 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1)
555 // CHECK: [[RET:%.*]] = trunc i32 [[CVT]] to i16
556 // CHECK: ret i16 [[RET]]
557 int16_t test_vcvth_n_u16_f16(float16_t a) {
558 return vcvth_n_u16_f16(a, 1);
561 // CHECK-LABEL: test_vcvth_n_u32_f16
562 // CHECK: [[CVT:%.*]] = call i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half %a, i32 1)
563 // CHECK: ret i32 [[CVT]]
564 int32_t test_vcvth_n_u32_f16(float16_t a) {
565 return vcvth_n_u32_f16(a, 1);
568 // CHECK-LABEL: test_vcvth_n_u64_f16
569 // CHECK: [[CVT:%.*]] = call i64 @llvm.aarch64.neon.vcvtfp2fxu.i64.f16(half %a, i32 1)
570 // CHECK: ret i64 [[CVT]]
571 int64_t test_vcvth_n_u64_f16(float16_t a) {
572 return vcvth_n_u64_f16(a, 1);
575 // CHECK-LABEL: test_vdivh_f16
576 // CHECK: [[DIV:%.*]] = fdiv half %a, %b
577 // CHECK: ret half [[DIV]]
578 float16_t test_vdivh_f16(float16_t a, float16_t b) {
579 return vdivh_f16(a, b);
582 // CHECK-LABEL: test_vmaxh_f16
583 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmax.f16(half %a, half %b)
584 // CHECK: ret half [[MAX]]
585 float16_t test_vmaxh_f16(float16_t a, float16_t b) {
586 return vmaxh_f16(a, b);
589 // CHECK-LABEL: test_vmaxnmh_f16
590 // CHECK: [[MAX:%.*]] = call half @llvm.aarch64.neon.fmaxnm.f16(half %a, half %b)
591 // CHECK: ret half [[MAX]]
592 float16_t test_vmaxnmh_f16(float16_t a, float16_t b) {
593 return vmaxnmh_f16(a, b);
596 // CHECK-LABEL: test_vminh_f16
597 // CHECK: [[MIN:%.*]] = call half @llvm.aarch64.neon.fmin.f16(half %a, half %b)
598 // CHECK: ret half [[MIN]]
599 float16_t test_vminh_f16(float16_t a, float16_t b) {
600 return vminh_f16(a, b);
603 // CHECK-LABEL: test_vminnmh_f16
604 // CHECK: [[MIN:%.*]] = call half @llvm.aarch64.neon.fminnm.f16(half %a, half %b)
605 // CHECK: ret half [[MIN]]
606 float16_t test_vminnmh_f16(float16_t a, float16_t b) {
607 return vminnmh_f16(a, b);
610 // CHECK-LABEL: test_vmulh_f16
611 // CHECK: [[MUL:%.*]] = fmul half %a, %b
612 // CHECK: ret half [[MUL]]
613 float16_t test_vmulh_f16(float16_t a, float16_t b) {
614 return vmulh_f16(a, b);
617 // CHECK-LABEL: test_vmulxh_f16
618 // CHECK: [[MUL:%.*]] = call half @llvm.aarch64.neon.fmulx.f16(half %a, half %b)
619 // CHECK: ret half [[MUL]]
620 float16_t test_vmulxh_f16(float16_t a, float16_t b) {
621 return vmulxh_f16(a, b);
624 // CHECK-LABEL: test_vrecpsh_f16
625 // CHECK: [[RECPS:%.*]] = call half @llvm.aarch64.neon.frecps.f16(half %a, half %b)
626 // CHECK: ret half [[RECPS]]
627 float16_t test_vrecpsh_f16(float16_t a, float16_t b) {
628 return vrecpsh_f16(a, b);
631 // CHECK-LABEL: test_vrsqrtsh_f16
632 // CHECK: [[RSQRTS:%.*]] = call half @llvm.aarch64.neon.frsqrts.f16(half %a, half %b)
633 // CHECK: ret half [[RSQRTS]]
634 float16_t test_vrsqrtsh_f16(float16_t a, float16_t b) {
635 return vrsqrtsh_f16(a, b);
638 // CHECK-LABEL: test_vsubh_f16
639 // CHECK: [[SUB:%.*]] = fsub half %a, %b
640 // CHECK: ret half [[SUB]]
641 float16_t test_vsubh_f16(float16_t a, float16_t b) {
642 return vsubh_f16(a, b);
645 // CHECK-LABEL: test_vfmah_f16
646 // CHECK: [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half %c, half %a)
647 // CHECK: ret half [[FMA]]
648 float16_t test_vfmah_f16(float16_t a, float16_t b, float16_t c) {
649 return vfmah_f16(a, b, c);
652 // CHECK-LABEL: test_vfmsh_f16
653 // CHECK: [[SUB:%.*]] = fsub half 0xH8000, %b
654 // CHECK: [[ADD:%.*]] = call half @llvm.fma.f16(half [[SUB]], half %c, half %a)
655 // CHECK: ret half [[ADD]]
656 float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
657 return vfmsh_f16(a, b, c);