1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s
4 ; Skylake-avx512 target supports masked load/store for i8 and i16 vectors
6 define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
7 ; CHECK-LABEL: test_mask_load_16xi8:
9 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
10 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
11 ; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z}
13 %res = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
16 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
18 define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
19 ; CHECK-LABEL: test_mask_load_32xi8:
21 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
22 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
23 ; CHECK-NEXT: vmovdqu8 (%rdi), %ymm1 {%k1}
24 ; CHECK-NEXT: vmovdqa %ymm1, %ymm0
26 %res = call <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> %val)
29 declare <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
31 define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
32 ; CHECK-LABEL: test_mask_load_64xi8:
34 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
35 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
36 ; CHECK-NEXT: vmovdqu8 (%rdi), %zmm1 {%k1}
37 ; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
39 %res = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
42 declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)
44 define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
45 ; CHECK-LABEL: test_mask_load_8xi16:
47 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
48 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
49 ; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z}
51 %res = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
54 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
56 define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
57 ; CHECK-LABEL: test_mask_load_16xi16:
59 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
60 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
61 ; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
63 %res = call <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
66 declare <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)
68 define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
69 ; CHECK-LABEL: test_mask_load_32xi16:
71 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
72 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
73 ; CHECK-NEXT: vmovdqu16 (%rdi), %zmm1 {%k1}
74 ; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
76 %res = call <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>* %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
79 declare <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>)
81 define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
82 ; CHECK-LABEL: test_mask_store_16xi8:
84 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
85 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
86 ; CHECK-NEXT: vmovdqu8 %xmm1, (%rdi) {%k1}
88 call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask)
91 declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
93 define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
94 ; CHECK-LABEL: test_mask_store_32xi8:
96 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
97 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
98 ; CHECK-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
100 call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
103 declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
105 define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
106 ; CHECK-LABEL: test_mask_store_64xi8:
108 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
109 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
110 ; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
112 call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask)
115 declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
117 define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
118 ; CHECK-LABEL: test_mask_store_8xi16:
120 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
121 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
122 ; CHECK-NEXT: vmovdqu16 %xmm1, (%rdi) {%k1}
124 call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %addr, i32 4, <8 x i1>%mask)
127 declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
129 define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
130 ; CHECK-LABEL: test_mask_store_16xi16:
132 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
133 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
134 ; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
136 call void @llvm.masked.store.v16i16.p0v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
139 declare void @llvm.masked.store.v16i16.p0v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>)
141 define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
142 ; CHECK-LABEL: test_mask_store_32xi16:
144 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
145 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
146 ; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
148 call void @llvm.masked.store.v32i16.p0v32i16(<32 x i16> %val, <32 x i16>* %addr, i32 4, <32 x i1>%mask)
152 declare void @llvm.masked.store.v32i16.p0v32i16(<32 x i16>, <32 x i16>*, i32, <32 x i1>)