1 //===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
\r
3 // The LLVM Compiler Infrastructure
\r
5 // This file is distributed under the University of Illinois Open Source
\r
6 // License. See LICENSE.TXT for details.
\r
8 //===----------------------------------------------------------------------===//
\r
10 // This file describes the PTX instructions in TableGen format.
\r
12 //===----------------------------------------------------------------------===//
\r
14 include "NVPTXInstrFormats.td"
\r
16 // A NOP instruction
\r
17 let hasSideEffects = 0 in {
\r
18 def NOP : NVPTXInst<(outs), (ins), "", []>;
\r
21 let OperandType = "OPERAND_IMMEDIATE" in {
\r
22 def f16imm : Operand<f16>;
\r
25 // List of vector specific properties
\r
26 def isVecLD : VecInstTypeEnum<1>;
\r
27 def isVecST : VecInstTypeEnum<2>;
\r
28 def isVecBuild : VecInstTypeEnum<3>;
\r
29 def isVecShuffle : VecInstTypeEnum<4>;
\r
30 def isVecExtract : VecInstTypeEnum<5>;
\r
31 def isVecInsert : VecInstTypeEnum<6>;
\r
32 def isVecDest : VecInstTypeEnum<7>;
\r
33 def isVecOther : VecInstTypeEnum<15>;
\r
35 //===----------------------------------------------------------------------===//
\r
36 // NVPTX Operand Definitions.
\r
37 //===----------------------------------------------------------------------===//
\r
39 def brtarget : Operand<OtherVT>;
\r
41 // CVT conversion modes
\r
42 // These must match the enum in NVPTX.h
\r
43 def CvtNONE : PatLeaf<(i32 0x0)>;
\r
44 def CvtRNI : PatLeaf<(i32 0x1)>;
\r
45 def CvtRZI : PatLeaf<(i32 0x2)>;
\r
46 def CvtRMI : PatLeaf<(i32 0x3)>;
\r
47 def CvtRPI : PatLeaf<(i32 0x4)>;
\r
48 def CvtRN : PatLeaf<(i32 0x5)>;
\r
49 def CvtRZ : PatLeaf<(i32 0x6)>;
\r
50 def CvtRM : PatLeaf<(i32 0x7)>;
\r
51 def CvtRP : PatLeaf<(i32 0x8)>;
\r
53 def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
\r
54 def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
\r
55 def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
\r
56 def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
\r
57 def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
\r
58 def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
\r
59 def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
\r
60 def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
\r
61 def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
\r
63 def CvtSAT : PatLeaf<(i32 0x20)>;
\r
64 def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
\r
66 def CvtMode : Operand<i32> {
\r
67 let PrintMethod = "printCvtMode";
\r
71 // These must match the enum in NVPTX.h
\r
72 def CmpEQ : PatLeaf<(i32 0)>;
\r
73 def CmpNE : PatLeaf<(i32 1)>;
\r
74 def CmpLT : PatLeaf<(i32 2)>;
\r
75 def CmpLE : PatLeaf<(i32 3)>;
\r
76 def CmpGT : PatLeaf<(i32 4)>;
\r
77 def CmpGE : PatLeaf<(i32 5)>;
\r
78 def CmpEQU : PatLeaf<(i32 10)>;
\r
79 def CmpNEU : PatLeaf<(i32 11)>;
\r
80 def CmpLTU : PatLeaf<(i32 12)>;
\r
81 def CmpLEU : PatLeaf<(i32 13)>;
\r
82 def CmpGTU : PatLeaf<(i32 14)>;
\r
83 def CmpGEU : PatLeaf<(i32 15)>;
\r
84 def CmpNUM : PatLeaf<(i32 16)>;
\r
85 def CmpNAN : PatLeaf<(i32 17)>;
\r
87 def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
\r
88 def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
\r
89 def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
\r
90 def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
\r
91 def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
\r
92 def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
\r
93 def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
\r
94 def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
\r
95 def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
\r
96 def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
\r
97 def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
\r
98 def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
\r
99 def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
\r
100 def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
\r
102 def CmpMode : Operand<i32> {
\r
103 let PrintMethod = "printCmpMode";
\r
105 def VecElement : Operand<i32> {
\r
106 let PrintMethod = "printVecElement";
\r
109 //===----------------------------------------------------------------------===//
\r
110 // NVPTX Instruction Predicate Definitions
\r
111 //===----------------------------------------------------------------------===//
\r
114 def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
\r
115 def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
\r
116 def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
\r
117 def useAtomRedG32forGen32 :
\r
118 Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
\r
119 def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
\r
120 def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
\r
121 def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
\r
122 def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
\r
123 def useAtomRedG64forGen64 :
\r
124 Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
\r
125 def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
\r
126 def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
\r
127 def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
\r
128 def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
\r
129 def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
\r
130 def hasVote : Predicate<"Subtarget->hasVote()">;
\r
131 def hasDouble : Predicate<"Subtarget->hasDouble()">;
\r
132 def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
\r
133 def hasLDG : Predicate<"Subtarget->hasLDG()">;
\r
134 def hasLDU : Predicate<"Subtarget->hasLDU()">;
\r
135 def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
\r
137 def doF32FTZ : Predicate<"useF32FTZ()">;
\r
138 def doNoF32FTZ : Predicate<"!useF32FTZ()">;
\r
140 def doMulWide : Predicate<"doMulWide">;
\r
142 def allowFMA : Predicate<"allowFMA()">;
\r
143 def noFMA : Predicate<"!allowFMA()">;
\r
144 def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
\r
146 def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
\r
147 def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
\r
149 def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
\r
150 def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
\r
152 def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
\r
153 def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
\r
155 def true : Predicate<"true">;
\r
157 def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
\r
159 def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
\r
161 //===----------------------------------------------------------------------===//
\r
162 // Some Common Instruction Class Templates
\r
163 //===----------------------------------------------------------------------===//
\r
165 // Template for instructions which take three int64, int32, or int16 args.
\r
166 // The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
\r
167 multiclass I3<string OpcStr, SDNode OpNode> {
\r
169 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
\r
170 !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
\r
171 [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
\r
173 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
\r
174 !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
\r
175 [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
\r
177 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
\r
178 !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
\r
179 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
\r
181 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
\r
182 !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
\r
183 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
\r
185 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
\r
186 !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
\r
187 [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
\r
189 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
\r
190 !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
\r
191 [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
\r
194 // Template for instructions which take 3 int32 args. The instructions are
\r
195 // named "<OpcStr>.s32" (e.g. "addc.cc.s32").
\r
196 multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
\r
198 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
\r
199 !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
\r
200 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
\r
202 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
\r
203 !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
\r
204 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
\r
207 // Template for instructions which take three fp64 or fp32 args. The
\r
208 // instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
\r
210 // Also defines ftz (flush subnormal inputs and results to sign-preserving
\r
211 // zero) variants for fp32 functions.
\r
213 // This multiclass should be used for nodes that cannot be folded into FMAs.
\r
214 // For nodes that can be folded into FMAs (i.e. adds and muls), use
\r
215 // F3_fma_component.
\r
216 multiclass F3<string OpcStr, SDNode OpNode> {
\r
218 NVPTXInst<(outs Float64Regs:$dst),
\r
219 (ins Float64Regs:$a, Float64Regs:$b),
\r
220 !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
\r
221 [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
\r
223 NVPTXInst<(outs Float64Regs:$dst),
\r
224 (ins Float64Regs:$a, f64imm:$b),
\r
225 !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
\r
226 [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
\r
228 NVPTXInst<(outs Float32Regs:$dst),
\r
229 (ins Float32Regs:$a, Float32Regs:$b),
\r
230 !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
\r
231 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
\r
232 Requires<[doF32FTZ]>;
\r
234 NVPTXInst<(outs Float32Regs:$dst),
\r
235 (ins Float32Regs:$a, f32imm:$b),
\r
236 !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
\r
237 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
\r
238 Requires<[doF32FTZ]>;
\r
240 NVPTXInst<(outs Float32Regs:$dst),
\r
241 (ins Float32Regs:$a, Float32Regs:$b),
\r
242 !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
\r
243 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
\r
245 NVPTXInst<(outs Float32Regs:$dst),
\r
246 (ins Float32Regs:$a, f32imm:$b),
\r
247 !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
\r
248 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
\r
251 // Template for instructions which take three FP args. The
\r
252 // instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
\r
254 // Also defines ftz (flush subnormal inputs and results to sign-preserving
\r
255 // zero) variants for fp32/fp16 functions.
\r
257 // This multiclass should be used for nodes that can be folded to make fma ops.
\r
258 // In this case, we use the ".rn" variant when FMA is disabled, as this behaves
\r
259 // just like the non ".rn" op, but prevents ptxas from creating FMAs.
\r
260 multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
\r
262 NVPTXInst<(outs Float64Regs:$dst),
\r
263 (ins Float64Regs:$a, Float64Regs:$b),
\r
264 !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
\r
265 [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
\r
266 Requires<[allowFMA]>;
\r
268 NVPTXInst<(outs Float64Regs:$dst),
\r
269 (ins Float64Regs:$a, f64imm:$b),
\r
270 !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
\r
271 [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
\r
272 Requires<[allowFMA]>;
\r
274 NVPTXInst<(outs Float32Regs:$dst),
\r
275 (ins Float32Regs:$a, Float32Regs:$b),
\r
276 !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
\r
277 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
\r
278 Requires<[allowFMA, doF32FTZ]>;
\r
280 NVPTXInst<(outs Float32Regs:$dst),
\r
281 (ins Float32Regs:$a, f32imm:$b),
\r
282 !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
\r
283 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
\r
284 Requires<[allowFMA, doF32FTZ]>;
\r
286 NVPTXInst<(outs Float32Regs:$dst),
\r
287 (ins Float32Regs:$a, Float32Regs:$b),
\r
288 !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
\r
289 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
\r
290 Requires<[allowFMA]>;
\r
292 NVPTXInst<(outs Float32Regs:$dst),
\r
293 (ins Float32Regs:$a, f32imm:$b),
\r
294 !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
\r
295 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
\r
296 Requires<[allowFMA]>;
\r
299 NVPTXInst<(outs Float16Regs:$dst),
\r
300 (ins Float16Regs:$a, Float16Regs:$b),
\r
301 !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
\r
302 [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
\r
303 Requires<[useFP16Math, allowFMA, doF32FTZ]>;
\r
305 NVPTXInst<(outs Float16Regs:$dst),
\r
306 (ins Float16Regs:$a, Float16Regs:$b),
\r
307 !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
\r
308 [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
\r
309 Requires<[useFP16Math, allowFMA]>;
\r
312 NVPTXInst<(outs Float16x2Regs:$dst),
\r
313 (ins Float16x2Regs:$a, Float16x2Regs:$b),
\r
314 !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
\r
315 [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
\r
316 Requires<[useFP16Math, allowFMA, doF32FTZ]>;
\r
318 NVPTXInst<(outs Float16x2Regs:$dst),
\r
319 (ins Float16x2Regs:$a, Float16x2Regs:$b),
\r
320 !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
\r
321 [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
\r
322 Requires<[useFP16Math, allowFMA]>;
\r
324 // These have strange names so we don't perturb existing mir tests.
\r
326 NVPTXInst<(outs Float64Regs:$dst),
\r
327 (ins Float64Regs:$a, Float64Regs:$b),
\r
328 !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
\r
329 [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
\r
332 NVPTXInst<(outs Float64Regs:$dst),
\r
333 (ins Float64Regs:$a, f64imm:$b),
\r
334 !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
\r
335 [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
\r
338 NVPTXInst<(outs Float32Regs:$dst),
\r
339 (ins Float32Regs:$a, Float32Regs:$b),
\r
340 !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
\r
341 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
\r
342 Requires<[noFMA, doF32FTZ]>;
\r
344 NVPTXInst<(outs Float32Regs:$dst),
\r
345 (ins Float32Regs:$a, f32imm:$b),
\r
346 !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
\r
347 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
\r
348 Requires<[noFMA, doF32FTZ]>;
\r
350 NVPTXInst<(outs Float32Regs:$dst),
\r
351 (ins Float32Regs:$a, Float32Regs:$b),
\r
352 !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
\r
353 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
\r
356 NVPTXInst<(outs Float32Regs:$dst),
\r
357 (ins Float32Regs:$a, f32imm:$b),
\r
358 !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
\r
359 [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
\r
362 NVPTXInst<(outs Float16Regs:$dst),
\r
363 (ins Float16Regs:$a, Float16Regs:$b),
\r
364 !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
\r
365 [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
\r
366 Requires<[useFP16Math, noFMA, doF32FTZ]>;
\r
368 NVPTXInst<(outs Float16Regs:$dst),
\r
369 (ins Float16Regs:$a, Float16Regs:$b),
\r
370 !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
\r
371 [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
\r
372 Requires<[useFP16Math, noFMA]>;
\r
373 def _rnf16x2rr_ftz :
\r
374 NVPTXInst<(outs Float16x2Regs:$dst),
\r
375 (ins Float16x2Regs:$a, Float16x2Regs:$b),
\r
376 !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
\r
377 [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
\r
378 Requires<[useFP16Math, noFMA, doF32FTZ]>;
\r
380 NVPTXInst<(outs Float16x2Regs:$dst),
\r
381 (ins Float16x2Regs:$a, Float16x2Regs:$b),
\r
382 !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
\r
383 [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
\r
384 Requires<[useFP16Math, noFMA]>;
\r
387 // Template for operations which take two f32 or f64 operands. Provides three
\r
388 // instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
\r
389 // subnormal inputs and results to zero).
\r
390 multiclass F2<string OpcStr, SDNode OpNode> {
\r
391 def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
\r
392 !strconcat(OpcStr, ".f64 \t$dst, $a;"),
\r
393 [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
\r
394 def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
\r
395 !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
\r
396 [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
\r
397 Requires<[doF32FTZ]>;
\r
398 def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
\r
399 !strconcat(OpcStr, ".f32 \t$dst, $a;"),
\r
400 [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
\r
403 //===----------------------------------------------------------------------===//
\r
404 // NVPTX Instructions.
\r
405 //===----------------------------------------------------------------------===//
\r
407 //-----------------------------------
\r
409 //-----------------------------------
\r
411 let hasSideEffects = 0 in {
\r
412 // Generate a cvt to the given type from all possible types. Each instance
\r
413 // takes a CvtMode immediate that defines the conversion mode to use. It can
\r
414 // be CvtNONE to omit a conversion mode.
\r
415 multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
\r
417 NVPTXInst<(outs RC:$dst),
\r
418 (ins Int16Regs:$src, CvtMode:$mode),
\r
419 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
420 FromName, ".s8 \t$dst, $src;"), []>;
\r
422 NVPTXInst<(outs RC:$dst),
\r
423 (ins Int16Regs:$src, CvtMode:$mode),
\r
424 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
425 FromName, ".u8 \t$dst, $src;"), []>;
\r
427 NVPTXInst<(outs RC:$dst),
\r
428 (ins Int16Regs:$src, CvtMode:$mode),
\r
429 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
430 FromName, ".s16 \t$dst, $src;"), []>;
\r
432 NVPTXInst<(outs RC:$dst),
\r
433 (ins Int16Regs:$src, CvtMode:$mode),
\r
434 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
435 FromName, ".u16 \t$dst, $src;"), []>;
\r
437 NVPTXInst<(outs RC:$dst),
\r
438 (ins Int32Regs:$src, CvtMode:$mode),
\r
439 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
440 FromName, ".s32 \t$dst, $src;"), []>;
\r
442 NVPTXInst<(outs RC:$dst),
\r
443 (ins Int32Regs:$src, CvtMode:$mode),
\r
444 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
445 FromName, ".u32 \t$dst, $src;"), []>;
\r
447 NVPTXInst<(outs RC:$dst),
\r
448 (ins Int64Regs:$src, CvtMode:$mode),
\r
449 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
450 FromName, ".s64 \t$dst, $src;"), []>;
\r
452 NVPTXInst<(outs RC:$dst),
\r
453 (ins Int64Regs:$src, CvtMode:$mode),
\r
454 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
455 FromName, ".u64 \t$dst, $src;"), []>;
\r
457 NVPTXInst<(outs RC:$dst),
\r
458 (ins Float16Regs:$src, CvtMode:$mode),
\r
459 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
460 FromName, ".f16 \t$dst, $src;"), []>;
\r
462 NVPTXInst<(outs RC:$dst),
\r
463 (ins Float32Regs:$src, CvtMode:$mode),
\r
464 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
465 FromName, ".f32 \t$dst, $src;"), []>;
\r
467 NVPTXInst<(outs RC:$dst),
\r
468 (ins Float64Regs:$src, CvtMode:$mode),
\r
469 !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
\r
470 FromName, ".f64 \t$dst, $src;"), []>;
\r
473 // Generate cvts from all types to all types.
\r
474 defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
\r
475 defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
\r
476 defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
\r
477 defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
\r
478 defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
\r
479 defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
\r
480 defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
\r
481 defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
\r
482 defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
\r
483 defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
\r
484 defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
\r
486 // These cvts are different from those above: The source and dest registers
\r
487 // are of the same type.
\r
488 def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
\r
489 "cvt.s16.s8 \t$dst, $src;", []>;
\r
490 def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
\r
491 "cvt.s32.s8 \t$dst, $src;", []>;
\r
492 def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
\r
493 "cvt.s32.s16 \t$dst, $src;", []>;
\r
494 def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
\r
495 "cvt.s64.s8 \t$dst, $src;", []>;
\r
496 def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
\r
497 "cvt.s64.s16 \t$dst, $src;", []>;
\r
498 def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
\r
499 "cvt.s64.s32 \t$dst, $src;", []>;
\r
502 //-----------------------------------
\r
503 // Integer Arithmetic
\r
504 //-----------------------------------
\r
506 // Template for xor masquerading as int1 arithmetic.
\r
507 multiclass ADD_SUB_i1<SDNode OpNode> {
\r
508 def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
\r
509 "xor.pred \t$dst, $a, $b;",
\r
510 [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
\r
511 def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
\r
512 "xor.pred \t$dst, $a, $b;",
\r
513 [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
\r
516 // int1 addition and subtraction are both just xor.
\r
517 defm ADD_i1 : ADD_SUB_i1<add>;
\r
518 defm SUB_i1 : ADD_SUB_i1<sub>;
\r
520 // int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
\r
521 // also use these for unsigned arithmetic.
\r
522 defm ADD : I3<"add.s", add>;
\r
523 defm SUB : I3<"sub.s", sub>;
\r
525 // int32 addition and subtraction with carry-out.
\r
526 // FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
\r
527 defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
\r
528 defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
\r
530 // int32 addition and subtraction with carry-in and carry-out.
\r
531 defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
\r
532 defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
\r
534 defm MULT : I3<"mul.lo.s", mul>;
\r
536 defm MULTHS : I3<"mul.hi.s", mulhs>;
\r
537 defm MULTHU : I3<"mul.hi.u", mulhu>;
\r
539 defm SDIV : I3<"div.s", sdiv>;
\r
540 defm UDIV : I3<"div.u", udiv>;
\r
542 // The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
\r
544 defm SREM : I3<"rem.s", srem>;
\r
545 defm UREM : I3<"rem.u", urem>;
\r
547 // Integer absolute value. NumBits should be one minus the bit width of RC.
\r
548 // This idiom implements the algorithm at
\r
549 // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
\r
550 multiclass ABS<RegisterClass RC, string SizeName> {
\r
551 def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
\r
552 !strconcat("abs", SizeName, " \t$dst, $a;"),
\r
553 [(set RC:$dst, (abs RC:$a))]>;
\r
555 defm ABS_16 : ABS<Int16Regs, ".s16">;
\r
556 defm ABS_32 : ABS<Int32Regs, ".s32">;
\r
557 defm ABS_64 : ABS<Int64Regs, ".s64">;
\r
559 // Integer min/max.
\r
560 defm SMAX : I3<"max.s", smax>;
\r
561 defm UMAX : I3<"max.u", umax>;
\r
562 defm SMIN : I3<"min.s", smin>;
\r
563 defm UMIN : I3<"min.u", umin>;
\r
566 // Wide multiplication
\r
569 NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
\r
570 "mul.wide.s32 \t$dst, $a, $b;", []>;
\r
571 def MULWIDES64Imm :
\r
572 NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
\r
573 "mul.wide.s32 \t$dst, $a, $b;", []>;
\r
574 def MULWIDES64Imm64 :
\r
575 NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
\r
576 "mul.wide.s32 \t$dst, $a, $b;", []>;
\r
579 NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
\r
580 "mul.wide.u32 \t$dst, $a, $b;", []>;
\r
581 def MULWIDEU64Imm :
\r
582 NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
\r
583 "mul.wide.u32 \t$dst, $a, $b;", []>;
\r
584 def MULWIDEU64Imm64 :
\r
585 NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
\r
586 "mul.wide.u32 \t$dst, $a, $b;", []>;
\r
589 NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
\r
590 "mul.wide.s16 \t$dst, $a, $b;", []>;
\r
591 def MULWIDES32Imm :
\r
592 NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
\r
593 "mul.wide.s16 \t$dst, $a, $b;", []>;
\r
594 def MULWIDES32Imm32 :
\r
595 NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
\r
596 "mul.wide.s16 \t$dst, $a, $b;", []>;
\r
599 NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
\r
600 "mul.wide.u16 \t$dst, $a, $b;", []>;
\r
601 def MULWIDEU32Imm :
\r
602 NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
\r
603 "mul.wide.u16 \t$dst, $a, $b;", []>;
\r
604 def MULWIDEU32Imm32 :
\r
605 NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
\r
606 "mul.wide.u16 \t$dst, $a, $b;", []>;
\r
608 def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
\r
609 def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
\r
610 def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
\r
612 // Matchers for signed, unsigned mul.wide ISD nodes.
\r
613 def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
\r
614 (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
\r
615 Requires<[doMulWide]>;
\r
616 def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
\r
617 (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
\r
618 Requires<[doMulWide]>;
\r
619 def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
\r
620 (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
\r
621 Requires<[doMulWide]>;
\r
622 def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
\r
623 (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
\r
624 Requires<[doMulWide]>;
\r
626 def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
\r
627 (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
\r
628 Requires<[doMulWide]>;
\r
629 def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
\r
630 (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
\r
631 Requires<[doMulWide]>;
\r
632 def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
\r
633 (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
\r
634 Requires<[doMulWide]>;
\r
635 def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
\r
636 (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
\r
637 Requires<[doMulWide]>;
\r
639 // Predicates used for converting some patterns to mul.wide.
\r
640 def SInt32Const : PatLeaf<(imm), [{
\r
641 const APInt &v = N->getAPIntValue();
\r
642 return v.isSignedIntN(32);
\r
645 def UInt32Const : PatLeaf<(imm), [{
\r
646 const APInt &v = N->getAPIntValue();
\r
647 return v.isIntN(32);
\r
650 def SInt16Const : PatLeaf<(imm), [{
\r
651 const APInt &v = N->getAPIntValue();
\r
652 return v.isSignedIntN(16);
\r
655 def UInt16Const : PatLeaf<(imm), [{
\r
656 const APInt &v = N->getAPIntValue();
\r
657 return v.isIntN(16);
\r
660 def Int5Const : PatLeaf<(imm), [{
\r
661 // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
\r
662 const APInt &v = N->getAPIntValue();
\r
663 return v.sge(0) && v.slt(32);
\r
666 def Int4Const : PatLeaf<(imm), [{
\r
667 // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
\r
668 const APInt &v = N->getAPIntValue();
\r
669 return v.sge(0) && v.slt(16);
\r
672 def SHL2MUL32 : SDNodeXForm<imm, [{
\r
673 const APInt &v = N->getAPIntValue();
\r
675 return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
\r
678 def SHL2MUL16 : SDNodeXForm<imm, [{
\r
679 const APInt &v = N->getAPIntValue();
\r
681 return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
\r
684 // Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
\r
685 def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
\r
686 (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
\r
687 Requires<[doMulWide]>;
\r
688 def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
\r
689 (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
\r
690 Requires<[doMulWide]>;
\r
692 def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
\r
693 (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
\r
694 Requires<[doMulWide]>;
\r
695 def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
\r
696 (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
\r
697 Requires<[doMulWide]>;
\r
699 // Convert "sign/zero-extend then multiply" to mul.wide.
\r
700 def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
\r
701 (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
\r
702 Requires<[doMulWide]>;
\r
703 def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
\r
704 (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
\r
705 Requires<[doMulWide]>;
\r
707 def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
\r
708 (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
\r
709 Requires<[doMulWide]>;
\r
710 def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
\r
711 (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
\r
712 Requires<[doMulWide]>;
\r
714 def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
\r
715 (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
\r
716 Requires<[doMulWide]>;
\r
717 def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
\r
718 (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
\r
719 Requires<[doMulWide]>;
\r
721 def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
\r
722 (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
\r
723 Requires<[doMulWide]>;
\r
724 def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
\r
725 (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
\r
726 Requires<[doMulWide]>;
\r
729 // Integer multiply-add
\r
732 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
\r
733 SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
\r
734 def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
\r
737 NVPTXInst<(outs Int16Regs:$dst),
\r
738 (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
\r
739 "mad.lo.s16 \t$dst, $a, $b, $c;",
\r
740 [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
\r
742 NVPTXInst<(outs Int16Regs:$dst),
\r
743 (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
\r
744 "mad.lo.s16 \t$dst, $a, $b, $c;",
\r
745 [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
\r
747 NVPTXInst<(outs Int16Regs:$dst),
\r
748 (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
\r
749 "mad.lo.s16 \t$dst, $a, $b, $c;",
\r
750 [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
\r
752 NVPTXInst<(outs Int16Regs:$dst),
\r
753 (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
\r
754 "mad.lo.s16 \t$dst, $a, $b, $c;",
\r
755 [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
\r
758 NVPTXInst<(outs Int32Regs:$dst),
\r
759 (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
\r
760 "mad.lo.s32 \t$dst, $a, $b, $c;",
\r
761 [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
\r
763 NVPTXInst<(outs Int32Regs:$dst),
\r
764 (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
\r
765 "mad.lo.s32 \t$dst, $a, $b, $c;",
\r
766 [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
\r
768 NVPTXInst<(outs Int32Regs:$dst),
\r
769 (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
\r
770 "mad.lo.s32 \t$dst, $a, $b, $c;",
\r
771 [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
\r
773 NVPTXInst<(outs Int32Regs:$dst),
\r
774 (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
\r
775 "mad.lo.s32 \t$dst, $a, $b, $c;",
\r
776 [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
\r
779 NVPTXInst<(outs Int64Regs:$dst),
\r
780 (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
\r
781 "mad.lo.s64 \t$dst, $a, $b, $c;",
\r
782 [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
\r
784 NVPTXInst<(outs Int64Regs:$dst),
\r
785 (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
\r
786 "mad.lo.s64 \t$dst, $a, $b, $c;",
\r
787 [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
\r
789 NVPTXInst<(outs Int64Regs:$dst),
\r
790 (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
\r
791 "mad.lo.s64 \t$dst, $a, $b, $c;",
\r
792 [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
\r
794 NVPTXInst<(outs Int64Regs:$dst),
\r
795 (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
\r
796 "mad.lo.s64 \t$dst, $a, $b, $c;",
\r
797 [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
\r
800 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
\r
801 "neg.s16 \t$dst, $src;",
\r
802 [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
\r
804 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
\r
805 "neg.s32 \t$dst, $src;",
\r
806 [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
\r
808 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
\r
809 "neg.s64 \t$dst, $src;",
\r
810 [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
\r
812 //-----------------------------------
\r
813 // Floating Point Arithmetic
\r
814 //-----------------------------------
\r
817 def FloatConst1 : PatLeaf<(fpimm), [{
\r
818 return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
\r
819 N->getValueAPF().convertToFloat() == 1.0f;
\r
821 // Constant 1.0 (double)
\r
822 def DoubleConst1 : PatLeaf<(fpimm), [{
\r
823 return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
\r
824 N->getValueAPF().convertToDouble() == 1.0;
\r
827 // Loads FP16 constant into a register.
\r
829 // ptxas does not have hex representation for fp16, so we can't use
\r
830 // fp16 immediate values in .f16 instructions. Instead we have to load
\r
831 // the constant into a register using mov.b16.
\r
832 def LOAD_CONST_F16 :
\r
833 NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
\r
834 "mov.b16 \t$dst, $a;", []>;
\r
836 defm FADD : F3_fma_component<"add", fadd>;
\r
837 defm FSUB : F3_fma_component<"sub", fsub>;
\r
838 defm FMUL : F3_fma_component<"mul", fmul>;
\r
840 defm FMIN : F3<"min", fminnum>;
\r
841 defm FMAX : F3<"max", fmaxnum>;
\r
843 defm FABS : F2<"abs", fabs>;
\r
844 defm FNEG : F2<"neg", fneg>;
\r
845 defm FSQRT : F2<"sqrt.rn", fsqrt>;
\r
851 NVPTXInst<(outs Float64Regs:$dst),
\r
852 (ins f64imm:$a, Float64Regs:$b),
\r
853 "rcp.rn.f64 \t$dst, $b;",
\r
854 [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
\r
856 NVPTXInst<(outs Float64Regs:$dst),
\r
857 (ins Float64Regs:$a, Float64Regs:$b),
\r
858 "div.rn.f64 \t$dst, $a, $b;",
\r
859 [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
\r
861 NVPTXInst<(outs Float64Regs:$dst),
\r
862 (ins Float64Regs:$a, f64imm:$b),
\r
863 "div.rn.f64 \t$dst, $a, $b;",
\r
864 [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
\r
867 // F32 Approximate reciprocal
\r
870 NVPTXInst<(outs Float32Regs:$dst),
\r
871 (ins f32imm:$a, Float32Regs:$b),
\r
872 "rcp.approx.ftz.f32 \t$dst, $b;",
\r
873 [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
\r
874 Requires<[do_DIVF32_APPROX, doF32FTZ]>;
\r
876 NVPTXInst<(outs Float32Regs:$dst),
\r
877 (ins f32imm:$a, Float32Regs:$b),
\r
878 "rcp.approx.f32 \t$dst, $b;",
\r
879 [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
\r
880 Requires<[do_DIVF32_APPROX]>;
\r
882 // F32 Approximate division
\r
884 def FDIV32approxrr_ftz :
\r
885 NVPTXInst<(outs Float32Regs:$dst),
\r
886 (ins Float32Regs:$a, Float32Regs:$b),
\r
887 "div.approx.ftz.f32 \t$dst, $a, $b;",
\r
888 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
\r
889 Requires<[do_DIVF32_APPROX, doF32FTZ]>;
\r
890 def FDIV32approxri_ftz :
\r
891 NVPTXInst<(outs Float32Regs:$dst),
\r
892 (ins Float32Regs:$a, f32imm:$b),
\r
893 "div.approx.ftz.f32 \t$dst, $a, $b;",
\r
894 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
\r
895 Requires<[do_DIVF32_APPROX, doF32FTZ]>;
\r
896 def FDIV32approxrr :
\r
897 NVPTXInst<(outs Float32Regs:$dst),
\r
898 (ins Float32Regs:$a, Float32Regs:$b),
\r
899 "div.approx.f32 \t$dst, $a, $b;",
\r
900 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
\r
901 Requires<[do_DIVF32_APPROX]>;
\r
902 def FDIV32approxri :
\r
903 NVPTXInst<(outs Float32Regs:$dst),
\r
904 (ins Float32Regs:$a, f32imm:$b),
\r
905 "div.approx.f32 \t$dst, $a, $b;",
\r
906 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
\r
907 Requires<[do_DIVF32_APPROX]>;
\r
909 // F32 Semi-accurate reciprocal
\r
911 // rcp.approx gives the same result as div.full(1.0f, a) and is faster.
\r
913 def FDIV321r_approx_ftz :
\r
914 NVPTXInst<(outs Float32Regs:$dst),
\r
915 (ins f32imm:$a, Float32Regs:$b),
\r
916 "rcp.approx.ftz.f32 \t$dst, $b;",
\r
917 [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
\r
918 Requires<[do_DIVF32_FULL, doF32FTZ]>;
\r
919 def FDIV321r_approx :
\r
920 NVPTXInst<(outs Float32Regs:$dst),
\r
921 (ins f32imm:$a, Float32Regs:$b),
\r
922 "rcp.approx.f32 \t$dst, $b;",
\r
923 [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
\r
924 Requires<[do_DIVF32_FULL]>;
\r
926 // F32 Semi-accurate division
\r
929 NVPTXInst<(outs Float32Regs:$dst),
\r
930 (ins Float32Regs:$a, Float32Regs:$b),
\r
931 "div.full.ftz.f32 \t$dst, $a, $b;",
\r
932 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
\r
933 Requires<[do_DIVF32_FULL, doF32FTZ]>;
\r
935 NVPTXInst<(outs Float32Regs:$dst),
\r
936 (ins Float32Regs:$a, f32imm:$b),
\r
937 "div.full.ftz.f32 \t$dst, $a, $b;",
\r
938 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
\r
939 Requires<[do_DIVF32_FULL, doF32FTZ]>;
\r
941 NVPTXInst<(outs Float32Regs:$dst),
\r
942 (ins Float32Regs:$a, Float32Regs:$b),
\r
943 "div.full.f32 \t$dst, $a, $b;",
\r
944 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
\r
945 Requires<[do_DIVF32_FULL]>;
\r
947 NVPTXInst<(outs Float32Regs:$dst),
\r
948 (ins Float32Regs:$a, f32imm:$b),
\r
949 "div.full.f32 \t$dst, $a, $b;",
\r
950 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
\r
951 Requires<[do_DIVF32_FULL]>;
\r
953 // F32 Accurate reciprocal
\r
955 def FDIV321r_prec_ftz :
\r
956 NVPTXInst<(outs Float32Regs:$dst),
\r
957 (ins f32imm:$a, Float32Regs:$b),
\r
958 "rcp.rn.ftz.f32 \t$dst, $b;",
\r
959 [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
\r
960 Requires<[reqPTX20, doF32FTZ]>;
\r
961 def FDIV321r_prec :
\r
962 NVPTXInst<(outs Float32Regs:$dst),
\r
963 (ins f32imm:$a, Float32Regs:$b),
\r
964 "rcp.rn.f32 \t$dst, $b;",
\r
965 [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
\r
966 Requires<[reqPTX20]>;
\r
968 // F32 Accurate division
\r
970 def FDIV32rr_prec_ftz :
\r
971 NVPTXInst<(outs Float32Regs:$dst),
\r
972 (ins Float32Regs:$a, Float32Regs:$b),
\r
973 "div.rn.ftz.f32 \t$dst, $a, $b;",
\r
974 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
\r
975 Requires<[doF32FTZ, reqPTX20]>;
\r
976 def FDIV32ri_prec_ftz :
\r
977 NVPTXInst<(outs Float32Regs:$dst),
\r
978 (ins Float32Regs:$a, f32imm:$b),
\r
979 "div.rn.ftz.f32 \t$dst, $a, $b;",
\r
980 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
\r
981 Requires<[doF32FTZ, reqPTX20]>;
\r
982 def FDIV32rr_prec :
\r
983 NVPTXInst<(outs Float32Regs:$dst),
\r
984 (ins Float32Regs:$a, Float32Regs:$b),
\r
985 "div.rn.f32 \t$dst, $a, $b;",
\r
986 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
\r
987 Requires<[reqPTX20]>;
\r
988 def FDIV32ri_prec :
\r
989 NVPTXInst<(outs Float32Regs:$dst),
\r
990 (ins Float32Regs:$a, f32imm:$b),
\r
991 "div.rn.f32 \t$dst, $a, $b;",
\r
992 [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
\r
993 Requires<[reqPTX20]>;
\r
999 multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
\r
1000 def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
\r
1001 !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
\r
1002 [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
\r
1004 def rri : NVPTXInst<(outs RC:$dst),
\r
1005 (ins RC:$a, RC:$b, ImmCls:$c),
\r
1006 !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
\r
1007 [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
\r
1009 def rir : NVPTXInst<(outs RC:$dst),
\r
1010 (ins RC:$a, ImmCls:$b, RC:$c),
\r
1011 !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
\r
1012 [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
\r
1014 def rii : NVPTXInst<(outs RC:$dst),
\r
1015 (ins RC:$a, ImmCls:$b, ImmCls:$c),
\r
1016 !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
\r
1017 [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
\r
1021 multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
\r
1022 def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
\r
1023 !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
\r
1024 [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
\r
1025 Requires<[useFP16Math, Pred]>;
\r
1028 defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
\r
1029 defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
\r
1030 defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
\r
1031 defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
\r
1032 defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
\r
1033 defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
\r
1034 defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
\r
1037 def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
\r
1038 "sin.approx.f32 \t$dst, $src;",
\r
1039 [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
\r
1040 Requires<[allowUnsafeFPMath]>;
\r
1041 def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
\r
1042 "cos.approx.f32 \t$dst, $src;",
\r
1043 [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
\r
1044 Requires<[allowUnsafeFPMath]>;
\r
1046 // Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
\r
1047 // i.e. "poor man's fmod()"
\r
1050 def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
\r
1051 (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
\r
1052 (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
\r
1053 Float32Regs:$y))>,
\r
1054 Requires<[doF32FTZ]>;
\r
1055 def : Pat<(frem Float32Regs:$x, fpimm:$y),
\r
1056 (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
\r
1057 (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
\r
1059 Requires<[doF32FTZ]>;
\r
1062 def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
\r
1063 (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
\r
1064 (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
\r
1065 Float32Regs:$y))>;
\r
1066 def : Pat<(frem Float32Regs:$x, fpimm:$y),
\r
1067 (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
\r
1068 (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
\r
1072 def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
\r
1073 (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
\r
1074 (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
\r
1075 Float64Regs:$y))>;
\r
1076 def : Pat<(frem Float64Regs:$x, fpimm:$y),
\r
1077 (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
\r
1078 (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
\r
1081 //-----------------------------------
\r
1082 // Bitwise operations
\r
1083 //-----------------------------------
\r
1085 // Template for three-arg bitwise operations. Takes three args, Creates .b16,
\r
1086 // .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
\r
1087 multiclass BITWISE<string OpcStr, SDNode OpNode> {
\r
1089 NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
\r
1090 !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
\r
1091 [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
\r
1093 NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
\r
1094 !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
\r
1095 [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
\r
1097 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
\r
1098 !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
\r
1099 [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
\r
1101 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
\r
1102 !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
\r
1103 [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
\r
1105 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
\r
1106 !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
\r
1107 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
\r
1109 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
\r
1110 !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
\r
1111 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
\r
1113 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
\r
1114 !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
\r
1115 [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
\r
1117 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
\r
1118 !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
\r
1119 [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
\r
1122 defm OR : BITWISE<"or", or>;
\r
1123 defm AND : BITWISE<"and", and>;
\r
1124 defm XOR : BITWISE<"xor", xor>;
\r
1126 def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
\r
1127 "not.pred \t$dst, $src;",
\r
1128 [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
\r
1129 def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
\r
1130 "not.b16 \t$dst, $src;",
\r
1131 [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
\r
1132 def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
\r
1133 "not.b32 \t$dst, $src;",
\r
1134 [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
\r
1135 def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
\r
1136 "not.b64 \t$dst, $src;",
\r
1137 [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
\r
1139 // Template for left/right shifts. Takes three operands,
\r
1140 // [dest (reg), src (reg), shift (reg or imm)].
\r
1141 // dest and src may be int64, int32, or int16, but shift is always int32.
\r
1143 // This template also defines a 32-bit shift (imm, imm) instruction.
\r
1144 multiclass SHIFT<string OpcStr, SDNode OpNode> {
\r
1146 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
\r
1147 !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
\r
1148 [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
\r
1150 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
\r
1151 !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
\r
1152 [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
\r
1154 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
\r
1155 !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
\r
1156 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
\r
1158 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
\r
1159 !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
\r
1160 [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
\r
1162 NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
\r
1163 !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
\r
1164 [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
\r
1166 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
\r
1167 !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
\r
1168 [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
\r
1170 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
\r
1171 !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
\r
1172 [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
\r
1175 defm SHL : SHIFT<"shl.b", shl>;
\r
1176 defm SRA : SHIFT<"shr.s", sra>;
\r
1177 defm SRL : SHIFT<"shr.u", srl>;
\r
1181 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
\r
1182 "brev.b32 \t$dst, $a;",
\r
1183 [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
\r
1185 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
\r
1186 "brev.b64 \t$dst, $a;",
\r
1187 [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
\r
1190 // Rotate: Use ptx shf instruction if available.
\r
1193 // 32 bit r2 = rotl r1, n
\r
1195 // r2 = shf.l r1, r1, n
\r
1196 def ROTL32imm_hw :
\r
1197 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
\r
1198 "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
\r
1199 [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
\r
1200 Requires<[hasHWROT32]>;
\r
1202 def ROTL32reg_hw :
\r
1203 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
\r
1204 "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
\r
1205 [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
\r
1206 Requires<[hasHWROT32]>;
\r
1208 // 32 bit r2 = rotr r1, n
\r
1210 // r2 = shf.r r1, r1, n
\r
1211 def ROTR32imm_hw :
\r
1212 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
\r
1213 "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
\r
1214 [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
\r
1215 Requires<[hasHWROT32]>;
\r
1217 def ROTR32reg_hw :
\r
1218 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
\r
1219 "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
\r
1220 [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
\r
1221 Requires<[hasHWROT32]>;
\r
1223 // 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
\r
1225 NVPTXInst<(outs Int32Regs:$dst),
\r
1226 (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
\r
1228 ".reg .b32 %lhs;\n\t"
\r
1229 ".reg .b32 %rhs;\n\t"
\r
1230 "shl.b32 \t%lhs, $src, $amt1;\n\t"
\r
1231 "shr.b32 \t%rhs, $src, $amt2;\n\t"
\r
1232 "add.u32 \t$dst, %lhs, %rhs;\n\t"
\r
1236 def SUB_FRM_32 : SDNodeXForm<imm, [{
\r
1237 return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
\r
1240 def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
\r
1241 (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
\r
1242 Requires<[noHWROT32]>;
\r
1243 def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
\r
1244 (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
\r
1245 Requires<[noHWROT32]>;
\r
1247 // 32-bit software rotate left by register.
\r
1248 def ROTL32reg_sw :
\r
1249 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
\r
1251 ".reg .b32 %lhs;\n\t"
\r
1252 ".reg .b32 %rhs;\n\t"
\r
1253 ".reg .b32 %amt2;\n\t"
\r
1254 "shl.b32 \t%lhs, $src, $amt;\n\t"
\r
1255 "sub.s32 \t%amt2, 32, $amt;\n\t"
\r
1256 "shr.b32 \t%rhs, $src, %amt2;\n\t"
\r
1257 "add.u32 \t$dst, %lhs, %rhs;\n\t"
\r
1259 [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
\r
1260 Requires<[noHWROT32]>;
\r
1262 // 32-bit software rotate right by register.
\r
1263 def ROTR32reg_sw :
\r
1264 NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
\r
1266 ".reg .b32 %lhs;\n\t"
\r
1267 ".reg .b32 %rhs;\n\t"
\r
1268 ".reg .b32 %amt2;\n\t"
\r
1269 "shr.b32 \t%lhs, $src, $amt;\n\t"
\r
1270 "sub.s32 \t%amt2, 32, $amt;\n\t"
\r
1271 "shl.b32 \t%rhs, $src, %amt2;\n\t"
\r
1272 "add.u32 \t$dst, %lhs, %rhs;\n\t"
\r
1274 [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
\r
1275 Requires<[noHWROT32]>;
\r
1277 // 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
\r
1279 NVPTXInst<(outs Int64Regs:$dst),
\r
1280 (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
\r
1282 ".reg .b64 %lhs;\n\t"
\r
1283 ".reg .b64 %rhs;\n\t"
\r
1284 "shl.b64 \t%lhs, $src, $amt1;\n\t"
\r
1285 "shr.b64 \t%rhs, $src, $amt2;\n\t"
\r
1286 "add.u64 \t$dst, %lhs, %rhs;\n\t"
\r
1290 def SUB_FRM_64 : SDNodeXForm<imm, [{
\r
1291 return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
\r
1294 def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
\r
1295 (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
\r
1296 def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
\r
1297 (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
\r
1299 // 64-bit software rotate left by register.
\r
1300 def ROTL64reg_sw :
\r
1301 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
\r
1303 ".reg .b64 %lhs;\n\t"
\r
1304 ".reg .b64 %rhs;\n\t"
\r
1305 ".reg .u32 %amt2;\n\t"
\r
1306 "shl.b64 \t%lhs, $src, $amt;\n\t"
\r
1307 "sub.u32 \t%amt2, 64, $amt;\n\t"
\r
1308 "shr.b64 \t%rhs, $src, %amt2;\n\t"
\r
1309 "add.u64 \t$dst, %lhs, %rhs;\n\t"
\r
1311 [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
\r
1313 def ROTR64reg_sw :
\r
1314 NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
\r
1316 ".reg .b64 %lhs;\n\t"
\r
1317 ".reg .b64 %rhs;\n\t"
\r
1318 ".reg .u32 %amt2;\n\t"
\r
1319 "shr.b64 \t%lhs, $src, $amt;\n\t"
\r
1320 "sub.u32 \t%amt2, 64, $amt;\n\t"
\r
1321 "shl.b64 \t%rhs, $src, %amt2;\n\t"
\r
1322 "add.u64 \t$dst, %lhs, %rhs;\n\t"
\r
1324 [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
\r
1327 // Funnnel shift in clamp mode
\r
1330 // Create SDNodes so they can be used in the DAG code, e.g.
\r
1331 // NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
\r
1332 def SDTIntShiftDOp :
\r
1333 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
\r
1334 SDTCisInt<0>, SDTCisInt<3>]>;
\r
1335 def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
\r
1336 def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
\r
1338 def FUNSHFLCLAMP :
\r
1339 NVPTXInst<(outs Int32Regs:$dst),
\r
1340 (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
\r
1341 "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
\r
1342 [(set Int32Regs:$dst,
\r
1343 (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
\r
1345 def FUNSHFRCLAMP :
\r
1346 NVPTXInst<(outs Int32Regs:$dst),
\r
1347 (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
\r
1348 "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
\r
1349 [(set Int32Regs:$dst,
\r
1350 (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
\r
1353 // BFE - bit-field extract
\r
1356 // Template for BFE instructions. Takes four args,
\r
1357 // [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
\r
1358 // Start may be an imm only if end is also an imm. FIXME: Is this a
\r
1359 // restriction in PTX?
\r
1361 // dest and src may be int32 or int64, but start and end are always int32.
\r
1362 multiclass BFE<string TyStr, RegisterClass RC> {
\r
1364 : NVPTXInst<(outs RC:$d),
\r
1365 (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
\r
1366 !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
\r
1368 : NVPTXInst<(outs RC:$d),
\r
1369 (ins RC:$a, Int32Regs:$b, i32imm:$c),
\r
1370 !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
\r
1372 : NVPTXInst<(outs RC:$d),
\r
1373 (ins RC:$a, i32imm:$b, i32imm:$c),
\r
1374 !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
\r
1377 let hasSideEffects = 0 in {
\r
1378 defm BFE_S32 : BFE<"s32", Int32Regs>;
\r
1379 defm BFE_U32 : BFE<"u32", Int32Regs>;
\r
1380 defm BFE_S64 : BFE<"s64", Int64Regs>;
\r
1381 defm BFE_U64 : BFE<"u64", Int64Regs>;
\r
1384 //-----------------------------------
\r
1385 // Comparison instructions (setp, set)
\r
1386 //-----------------------------------
\r
1388 // FIXME: This doesn't cover versions of set and setp that combine with a
\r
1389 // boolean predicate, e.g. setp.eq.and.b16.
\r
1391 let hasSideEffects = 0 in {
\r
1392 multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
\r
1394 NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
\r
1395 !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
\r
1396 " \t$dst, $a, $b;"), []>;
\r
1398 NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
\r
1399 !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
\r
1400 " \t$dst, $a, $b;"), []>;
\r
1402 NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
\r
1403 !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
\r
1404 " \t$dst, $a, $b;"), []>;
\r
1408 defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
\r
1409 defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
\r
1410 defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
\r
1411 defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
\r
1412 defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
\r
1413 defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
\r
1414 defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
\r
1415 defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
\r
1416 defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
\r
1417 defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
\r
1418 defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
\r
1420 NVPTXInst<(outs Int1Regs:$dst),
\r
1421 (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
\r
1422 "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
\r
1423 []>, Requires<[useFP16Math]>;
\r
1425 def SETP_f16x2rr :
\r
1426 NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
\r
1427 (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
\r
1428 "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
\r
1430 Requires<[useFP16Math]>;
\r
1433 // FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
\r
1434 // "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
\r
1435 // reg, either u32, s32, or f32. Anyway these aren't used at the moment.
\r
1437 let hasSideEffects = 0 in {
\r
1438 multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
\r
1439 def rr : NVPTXInst<(outs Int32Regs:$dst),
\r
1440 (ins RC:$a, RC:$b, CmpMode:$cmp),
\r
1441 !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
\r
1442 def ri : NVPTXInst<(outs Int32Regs:$dst),
\r
1443 (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
\r
1444 !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
\r
1445 def ir : NVPTXInst<(outs Int32Regs:$dst),
\r
1446 (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
\r
1447 !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
\r
1451 defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
\r
1452 defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
\r
1453 defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
\r
1454 defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
\r
1455 defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
\r
1456 defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
\r
1457 defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
\r
1458 defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
\r
1459 defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
\r
1460 defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
\r
1461 defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
\r
1462 defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
\r
1464 //-----------------------------------
\r
1465 // Selection instructions (selp)
\r
1466 //-----------------------------------
\r
1468 // FIXME: Missing slct
\r
1470 // selp instructions that don't have any pattern matches; we explicitly use
\r
1471 // them within this file.
\r
1472 let hasSideEffects = 0 in {
\r
1473 multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
\r
1474 def rr : NVPTXInst<(outs RC:$dst),
\r
1475 (ins RC:$a, RC:$b, Int1Regs:$p),
\r
1476 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
\r
1477 def ri : NVPTXInst<(outs RC:$dst),
\r
1478 (ins RC:$a, ImmCls:$b, Int1Regs:$p),
\r
1479 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
\r
1480 def ir : NVPTXInst<(outs RC:$dst),
\r
1481 (ins ImmCls:$a, RC:$b, Int1Regs:$p),
\r
1482 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
\r
1483 def ii : NVPTXInst<(outs RC:$dst),
\r
1484 (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
\r
1485 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
\r
1488 multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
\r
1491 NVPTXInst<(outs RC:$dst),
\r
1492 (ins RC:$a, RC:$b, Int1Regs:$p),
\r
1493 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
\r
1494 [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
\r
1496 NVPTXInst<(outs RC:$dst),
\r
1497 (ins RC:$a, ImmCls:$b, Int1Regs:$p),
\r
1498 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
\r
1499 [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
\r
1501 NVPTXInst<(outs RC:$dst),
\r
1502 (ins ImmCls:$a, RC:$b, Int1Regs:$p),
\r
1503 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
\r
1504 [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
\r
1506 NVPTXInst<(outs RC:$dst),
\r
1507 (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
\r
1508 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
\r
1509 [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
\r
1513 // Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
\r
1515 defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
\r
1516 defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
\r
1517 defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
\r
1518 defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
\r
1519 defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
\r
1520 defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
\r
1521 defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
\r
1522 defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
\r
1523 defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
\r
1524 defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
\r
1525 defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
\r
1526 defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
\r
1528 def SELP_f16x2rr :
\r
1529 NVPTXInst<(outs Float16x2Regs:$dst),
\r
1530 (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
\r
1531 "selp.b32 \t$dst, $a, $b, $p;",
\r
1532 [(set Float16x2Regs:$dst,
\r
1533 (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
\r
1535 //-----------------------------------
\r
1536 // Data Movement (Load / Store, Move)
\r
1537 //-----------------------------------
\r
1539 def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
\r
1541 def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
\r
1544 def MEMri : Operand<i32> {
\r
1545 let PrintMethod = "printMemOperand";
\r
1546 let MIOperandInfo = (ops Int32Regs, i32imm);
\r
1548 def MEMri64 : Operand<i64> {
\r
1549 let PrintMethod = "printMemOperand";
\r
1550 let MIOperandInfo = (ops Int64Regs, i64imm);
\r
1553 def imem : Operand<iPTR> {
\r
1554 let PrintMethod = "printOperand";
\r
1557 def imemAny : Operand<iPTRAny> {
\r
1558 let PrintMethod = "printOperand";
\r
1561 def LdStCode : Operand<i32> {
\r
1562 let PrintMethod = "printLdStCode";
\r
1565 def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
\r
1566 def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
\r
1568 // Load a memory address into a u32 or u64 register.
\r
1569 def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
\r
1570 "mov.u32 \t$dst, $a;",
\r
1571 [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
\r
1572 def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
\r
1573 "mov.u64 \t$dst, $a;",
\r
1574 [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
\r
1576 // Get pointer to local stack.
\r
1577 let hasSideEffects = 0 in {
\r
1578 def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
\r
1579 "mov.u32 \t$d, __local_depot$num;", []>;
\r
1580 def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
\r
1581 "mov.u64 \t$d, __local_depot$num;", []>;
\r
1585 // copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
\r
1586 let IsSimpleMove=1, hasSideEffects=0 in {
\r
1587 def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
\r
1588 "mov.pred \t$dst, $sss;", []>;
\r
1589 def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
\r
1590 "mov.u16 \t$dst, $sss;", []>;
\r
1591 def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
\r
1592 "mov.u32 \t$dst, $sss;", []>;
\r
1593 def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
\r
1594 "mov.u64 \t$dst, $sss;", []>;
\r
1596 def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
\r
1597 // We have to use .b16 here as there's no mov.f16.
\r
1598 "mov.b16 \t$dst, $src;", []>;
\r
1599 def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
\r
1600 "mov.f32 \t$dst, $src;", []>;
\r
1601 def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
\r
1602 "mov.f64 \t$dst, $src;", []>;
\r
1605 def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
\r
1606 "mov.pred \t$dst, $src;",
\r
1607 [(set Int1Regs:$dst, imm:$src)]>;
\r
1608 def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
\r
1609 "mov.u16 \t$dst, $src;",
\r
1610 [(set Int16Regs:$dst, imm:$src)]>;
\r
1611 def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
\r
1612 "mov.u32 \t$dst, $src;",
\r
1613 [(set Int32Regs:$dst, imm:$src)]>;
\r
1614 def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
\r
1615 "mov.u64 \t$dst, $src;",
\r
1616 [(set Int64Regs:$dst, imm:$src)]>;
\r
1618 def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
\r
1619 "mov.f32 \t$dst, $src;",
\r
1620 [(set Float32Regs:$dst, fpimm:$src)]>;
\r
1621 def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
\r
1622 "mov.f64 \t$dst, $src;",
\r
1623 [(set Float64Regs:$dst, fpimm:$src)]>;
\r
1625 def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
\r
1627 //---- Copy Frame Index ----
\r
1628 def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
\r
1629 "add.u32 \t$dst, ${addr:add};",
\r
1630 [(set Int32Regs:$dst, ADDRri:$addr)]>;
\r
1631 def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
\r
1632 "add.u64 \t$dst, ${addr:add};",
\r
1633 [(set Int64Regs:$dst, ADDRri64:$addr)]>;
\r
1635 //-----------------------------------
\r
1636 // Comparison and Selection
\r
1637 //-----------------------------------
\r
1639 multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
\r
1640 Instruction setp_16rr,
\r
1641 Instruction setp_16ri,
\r
1642 Instruction setp_16ir,
\r
1643 Instruction setp_32rr,
\r
1644 Instruction setp_32ri,
\r
1645 Instruction setp_32ir,
\r
1646 Instruction setp_64rr,
\r
1647 Instruction setp_64ri,
\r
1648 Instruction setp_64ir,
\r
1649 Instruction set_16rr,
\r
1650 Instruction set_16ri,
\r
1651 Instruction set_16ir,
\r
1652 Instruction set_32rr,
\r
1653 Instruction set_32ri,
\r
1654 Instruction set_32ir,
\r
1655 Instruction set_64rr,
\r
1656 Instruction set_64ri,
\r
1657 Instruction set_64ir> {
\r
1659 def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
\r
1660 (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
\r
1661 def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
\r
1662 (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
\r
1663 def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
\r
1664 (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
\r
1666 def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
\r
1667 (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
\r
1668 def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
\r
1669 (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
\r
1670 def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
\r
1671 (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
\r
1673 def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
\r
1674 (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
\r
1675 def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
\r
1676 (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
\r
1677 def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
\r
1678 (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
\r
1681 def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
\r
1682 (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
\r
1683 def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
\r
1684 (set_16ri Int16Regs:$a, imm:$b, Mode)>;
\r
1685 def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
\r
1686 (set_16ir imm:$a, Int16Regs:$b, Mode)>;
\r
1688 def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
\r
1689 (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
\r
1690 def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
\r
1691 (set_32ri Int32Regs:$a, imm:$b, Mode)>;
\r
1692 def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
\r
1693 (set_32ir imm:$a, Int32Regs:$b, Mode)>;
\r
1695 def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
\r
1696 (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
\r
1697 def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
\r
1698 (set_64ri Int64Regs:$a, imm:$b, Mode)>;
\r
1699 def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
\r
1700 (set_64ir imm:$a, Int64Regs:$b, Mode)>;
\r
1703 multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
\r
1704 : ISET_FORMAT<OpNode, Mode,
\r
1705 SETP_s16rr, SETP_s16ri, SETP_s16ir,
\r
1706 SETP_s32rr, SETP_s32ri, SETP_s32ir,
\r
1707 SETP_s64rr, SETP_s64ri, SETP_s64ir,
\r
1708 SET_s16rr, SET_s16ri, SET_s16ir,
\r
1709 SET_s32rr, SET_s32ri, SET_s32ir,
\r
1710 SET_s64rr, SET_s64ri, SET_s64ir> {
\r
1711 // TableGen doesn't like empty multiclasses.
\r
1712 def : PatLeaf<(i32 0)>;
\r
1715 multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
\r
1716 : ISET_FORMAT<OpNode, Mode,
\r
1717 SETP_u16rr, SETP_u16ri, SETP_u16ir,
\r
1718 SETP_u32rr, SETP_u32ri, SETP_u32ir,
\r
1719 SETP_u64rr, SETP_u64ri, SETP_u64ir,
\r
1720 SET_u16rr, SET_u16ri, SET_u16ir,
\r
1721 SET_u32rr, SET_u32ri, SET_u32ir,
\r
1722 SET_u64rr, SET_u64ri, SET_u64ir> {
\r
1723 // TableGen doesn't like empty multiclasses.
\r
1724 def : PatLeaf<(i32 0)>;
\r
1727 defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
\r
1728 defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
\r
1729 defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
\r
1730 defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
\r
1731 defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
\r
1732 defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
\r
1733 defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
\r
1734 defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
\r
1735 defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
\r
1736 defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
\r
1737 defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
\r
1738 defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
\r
1741 def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
\r
1742 (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
\r
1743 def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
\r
1744 (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
\r
1746 def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
\r
1747 (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
\r
1748 def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
\r
1749 (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
\r
1751 // i1 compare -> i32
\r
1752 def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
\r
1753 (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
\r
1754 def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
\r
1755 (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
\r
1759 multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
\r
1761 def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
\r
1762 (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
\r
1763 Requires<[useFP16Math,doF32FTZ]>;
\r
1764 def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
\r
1765 (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
\r
1766 Requires<[useFP16Math]>;
\r
1767 def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
\r
1768 (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
\r
1769 Requires<[useFP16Math,doF32FTZ]>;
\r
1770 def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
\r
1771 (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
\r
1772 Requires<[useFP16Math]>;
\r
1773 def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
\r
1774 (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
\r
1775 Requires<[useFP16Math,doF32FTZ]>;
\r
1776 def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
\r
1777 (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
\r
1778 Requires<[useFP16Math]>;
\r
1781 def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
\r
1782 (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
\r
1783 Requires<[doF32FTZ]>;
\r
1784 def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
\r
1785 (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
\r
1786 def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
\r
1787 (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
\r
1788 Requires<[doF32FTZ]>;
\r
1789 def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
\r
1790 (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
\r
1791 def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
\r
1792 (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
\r
1793 Requires<[doF32FTZ]>;
\r
1794 def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
\r
1795 (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
\r
1798 def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
\r
1799 (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
\r
1800 def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
\r
1801 (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
\r
1802 def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
\r
1803 (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
\r
1806 def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
\r
1807 (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
\r
1808 Requires<[useFP16Math, doF32FTZ]>;
\r
1809 def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
\r
1810 (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
\r
1811 Requires<[useFP16Math]>;
\r
1812 def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
\r
1813 (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
\r
1814 Requires<[useFP16Math, doF32FTZ]>;
\r
1815 def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
\r
1816 (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
\r
1817 Requires<[useFP16Math]>;
\r
1818 def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
\r
1819 (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
\r
1820 Requires<[useFP16Math, doF32FTZ]>;
\r
1821 def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
\r
1822 (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
\r
1823 Requires<[useFP16Math]>;
\r
1826 def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
\r
1827 (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
\r
1828 Requires<[doF32FTZ]>;
\r
1829 def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
\r
1830 (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
\r
1831 def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
\r
1832 (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
\r
1833 Requires<[doF32FTZ]>;
\r
1834 def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
\r
1835 (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
\r
1836 def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
\r
1837 (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
\r
1838 Requires<[doF32FTZ]>;
\r
1839 def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
\r
1840 (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
\r
1843 def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
\r
1844 (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
\r
1845 def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
\r
1846 (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
\r
1847 def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
\r
1848 (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
\r
1851 defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
\r
1852 defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
\r
1853 defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
\r
1854 defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
\r
1855 defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
\r
1856 defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
\r
1858 defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
\r
1859 defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
\r
1860 defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
\r
1861 defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
\r
1862 defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
\r
1863 defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
\r
1865 defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
\r
1866 defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
\r
1867 defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
\r
1868 defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
\r
1869 defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
\r
1870 defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
\r
1872 defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
\r
1873 defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
\r
1875 // FIXME: What is this doing here? Can it be deleted?
\r
1876 // def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
\r
1877 // [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
\r
1879 def SDTDeclareParamProfile :
\r
1880 SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
\r
1881 def SDTDeclareScalarParamProfile :
\r
1882 SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
\r
1883 def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
\r
1884 def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
\r
1885 def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
\r
1886 def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
\r
1887 def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
\r
1888 def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
\r
1889 def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
\r
1890 def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
\r
1891 def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
\r
1892 def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
\r
1893 def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
\r
1894 def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
\r
1895 def SDTCallValProfile : SDTypeProfile<1, 0, []>;
\r
1896 def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
\r
1897 def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
\r
1898 def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
\r
1899 def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
\r
1900 def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
\r
1902 def DeclareParam :
\r
1903 SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
\r
1904 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1905 def DeclareScalarParam :
\r
1906 SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
\r
1907 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1908 def DeclareRetParam :
\r
1909 SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
\r
1910 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1912 SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
\r
1913 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1915 SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
\r
1916 [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
\r
1918 SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
\r
1919 [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
\r
1921 SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
\r
1922 [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
\r
1924 SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
\r
1925 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1926 def PrintConvergentCall :
\r
1927 SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
\r
1928 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1929 def PrintCallUni :
\r
1930 SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
\r
1931 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1932 def PrintConvergentCallUni :
\r
1933 SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
\r
1934 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1936 SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
\r
1937 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1938 def StoreParamV2 :
\r
1939 SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
\r
1940 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1941 def StoreParamV4 :
\r
1942 SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
\r
1943 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1944 def StoreParamU32 :
\r
1945 SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
\r
1946 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1947 def StoreParamS32 :
\r
1948 SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
\r
1949 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1950 def CallArgBegin :
\r
1951 SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
\r
1952 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1954 SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
\r
1955 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1957 SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
\r
1958 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1960 SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
\r
1961 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1963 SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
\r
1964 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1966 SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
\r
1967 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1969 SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
\r
1970 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1972 SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
\r
1974 SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
\r
1975 [SDNPHasChain, SDNPSideEffect]>;
\r
1976 def StoreRetvalV2 :
\r
1977 SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
\r
1978 [SDNPHasChain, SDNPSideEffect]>;
\r
1979 def StoreRetvalV4 :
\r
1980 SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
\r
1981 [SDNPHasChain, SDNPSideEffect]>;
\r
1982 def PseudoUseParam :
\r
1983 SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
\r
1984 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
1986 SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
\r
1987 [SDNPHasChain, SDNPSideEffect]>;
\r
1989 let mayLoad = 1 in {
\r
1990 class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
\r
1991 NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
\r
1992 !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
\r
1995 class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
\r
1996 NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
\r
1997 !strconcat("ld.param.v2", opstr,
\r
1998 " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
\r
2000 class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
\r
2001 NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
\r
2004 !strconcat("ld.param.v4", opstr,
\r
2005 " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
\r
2009 class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
\r
2010 NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
\r
2011 !strconcat("mov", opstr, " \t$dst, retval$b;"),
\r
2012 [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
\r
2014 let mayStore = 1 in {
\r
2015 class StoreParamInst<NVPTXRegClass regclass, string opstr> :
\r
2016 NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
\r
2017 !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
\r
2020 class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
\r
2021 NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
\r
2022 i32imm:$a, i32imm:$b),
\r
2023 !strconcat("st.param.v2", opstr,
\r
2024 " \t[param$a+$b], {{$val, $val2}};"),
\r
2027 class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
\r
2028 NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
\r
2029 regclass:$val4, i32imm:$a,
\r
2031 !strconcat("st.param.v4", opstr,
\r
2032 " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
\r
2035 class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
\r
2036 NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
\r
2037 !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
\r
2040 class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
\r
2041 NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
\r
2042 !strconcat("st.param.v2", opstr,
\r
2043 " \t[func_retval0+$a], {{$val, $val2}};"),
\r
2046 class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
\r
2048 (ins regclass:$val, regclass:$val2, regclass:$val3,
\r
2049 regclass:$val4, i32imm:$a),
\r
2050 !strconcat("st.param.v4", opstr,
\r
2051 " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
\r
2056 multiclass CALL<string OpcStr, SDNode OpNode> {
\r
2057 def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
\r
2058 !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
\r
2059 def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
\r
2060 !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
\r
2061 def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
\r
2062 !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
\r
2063 def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
\r
2064 !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
\r
2065 def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
\r
2066 !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
\r
2067 [(OpNode (i32 4))]>;
\r
2068 def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
\r
2069 !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
\r
2070 [(OpNode (i32 5))]>;
\r
2071 def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
\r
2072 !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
\r
2074 [(OpNode (i32 6))]>;
\r
2075 def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
\r
2076 !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
\r
2077 "retval5, retval6), "),
\r
2078 [(OpNode (i32 7))]>;
\r
2079 def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
\r
2080 !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
\r
2081 "retval5, retval6, retval7), "),
\r
2082 [(OpNode (i32 8))]>;
\r
2086 defm Call : CALL<"call", PrintCall>;
\r
2087 defm CallUni : CALL<"call.uni", PrintCallUni>;
\r
2089 // Convergent call instructions. These are identical to regular calls, except
\r
2090 // they have the isConvergent bit set.
\r
2091 let isConvergent=1 in {
\r
2092 defm ConvergentCall : CALL<"call", PrintConvergentCall>;
\r
2093 defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
\r
2096 def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
\r
2097 def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
\r
2098 def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
\r
2099 def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
\r
2100 def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
\r
2101 def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
\r
2102 def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
\r
2103 def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
\r
2104 def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
\r
2105 def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
\r
2106 def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
\r
2107 def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
\r
2108 def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
\r
2109 def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
\r
2110 def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
\r
2111 def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
\r
2112 def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
\r
2113 def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
\r
2114 def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
\r
2115 def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
\r
2116 def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
\r
2117 def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
\r
2119 def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
\r
2120 def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
\r
2122 def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
\r
2123 def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
\r
2124 def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
\r
2125 def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
\r
2126 def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
\r
2127 def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
\r
2129 def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
\r
2130 def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
\r
2131 def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
\r
2133 def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
\r
2134 def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
\r
2135 def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
\r
2136 def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
\r
2137 def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
\r
2138 def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
\r
2139 def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
\r
2140 def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
\r
2141 def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
\r
2142 def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
\r
2143 def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
\r
2145 def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
\r
2146 def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
\r
2147 def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
\r
2148 def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
\r
2149 def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
\r
2150 def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
\r
2151 def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
\r
2152 def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
\r
2153 def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
\r
2154 def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
\r
2155 def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
\r
2157 def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
\r
2158 def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
\r
2159 def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
\r
2160 def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
\r
2161 def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
\r
2162 def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
\r
2163 def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
\r
2164 def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
\r
2165 def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
\r
2166 def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
\r
2167 def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
\r
2169 def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
\r
2170 def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
\r
2171 def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
\r
2172 def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
\r
2174 class CallArgInst<NVPTXRegClass regclass> :
\r
2175 NVPTXInst<(outs), (ins regclass:$a), "$a, ",
\r
2176 [(CallArg (i32 0), regclass:$a)]>;
\r
2178 class LastCallArgInst<NVPTXRegClass regclass> :
\r
2179 NVPTXInst<(outs), (ins regclass:$a), "$a",
\r
2180 [(LastCallArg (i32 0), regclass:$a)]>;
\r
2182 def CallArgI64 : CallArgInst<Int64Regs>;
\r
2183 def CallArgI32 : CallArgInst<Int32Regs>;
\r
2184 def CallArgI16 : CallArgInst<Int16Regs>;
\r
2185 def CallArgF64 : CallArgInst<Float64Regs>;
\r
2186 def CallArgF32 : CallArgInst<Float32Regs>;
\r
2188 def LastCallArgI64 : LastCallArgInst<Int64Regs>;
\r
2189 def LastCallArgI32 : LastCallArgInst<Int32Regs>;
\r
2190 def LastCallArgI16 : LastCallArgInst<Int16Regs>;
\r
2191 def LastCallArgF64 : LastCallArgInst<Float64Regs>;
\r
2192 def LastCallArgF32 : LastCallArgInst<Float32Regs>;
\r
2194 def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
\r
2195 [(CallArg (i32 0), (i32 imm:$a))]>;
\r
2196 def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
\r
2197 [(LastCallArg (i32 0), (i32 imm:$a))]>;
\r
2199 def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
\r
2200 [(CallArg (i32 1), (i32 imm:$a))]>;
\r
2201 def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
\r
2202 [(LastCallArg (i32 1), (i32 imm:$a))]>;
\r
2204 def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
\r
2205 [(CallVoid (Wrapper tglobaladdr:$addr))]>;
\r
2206 def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
\r
2207 [(CallVoid Int32Regs:$addr)]>;
\r
2208 def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
\r
2209 [(CallVoid Int64Regs:$addr)]>;
\r
2210 def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
\r
2211 [(Prototype (i32 imm:$val))]>;
\r
2213 def DeclareRetMemInst :
\r
2214 NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
\r
2215 ".param .align $align .b8 retval$num[$size];",
\r
2216 [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
\r
2217 def DeclareRetScalarInst :
\r
2218 NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
\r
2219 ".param .b$size retval$num;",
\r
2220 [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
\r
2221 def DeclareRetRegInst :
\r
2222 NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
\r
2223 ".reg .b$size retval$num;",
\r
2224 [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
\r
2226 def DeclareParamInst :
\r
2227 NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
\r
2228 ".param .align $align .b8 param$a[$size];",
\r
2229 [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
\r
2230 def DeclareScalarParamInst :
\r
2231 NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
\r
2232 ".param .b$size param$a;",
\r
2233 [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
\r
2234 def DeclareScalarRegInst :
\r
2235 NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
\r
2236 ".reg .b$size param$a;",
\r
2237 [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
\r
2239 class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
\r
2240 NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
\r
2241 !strconcat("mov", asmstr, " \t$dst, $src;"),
\r
2242 [(set regclass:$dst, (MoveParam regclass:$src))]>;
\r
2244 def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
\r
2245 def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
\r
2246 def MoveParamI16 :
\r
2247 NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
\r
2248 "cvt.u16.u32 \t$dst, $src;",
\r
2249 [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
\r
2250 def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
\r
2251 def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
\r
2252 def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
\r
2254 class PseudoUseParamInst<NVPTXRegClass regclass> :
\r
2255 NVPTXInst<(outs), (ins regclass:$src),
\r
2256 "// Pseudo use of $src",
\r
2257 [(PseudoUseParam regclass:$src)]>;
\r
2259 def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
\r
2260 def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
\r
2261 def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
\r
2262 def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
\r
2263 def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
\r
2267 // Load / Store Handling
\r
2269 multiclass LD<NVPTXRegClass regclass> {
\r
2270 def _avar : NVPTXInst<
\r
2271 (outs regclass:$dst),
\r
2272 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2273 i32imm:$fromWidth, imem:$addr),
\r
2274 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2275 "\t$dst, [$addr];", []>;
\r
2276 def _areg : NVPTXInst<
\r
2277 (outs regclass:$dst),
\r
2278 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2279 i32imm:$fromWidth, Int32Regs:$addr),
\r
2280 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2281 "\t$dst, [$addr];", []>;
\r
2282 def _areg_64 : NVPTXInst<
\r
2283 (outs regclass:$dst),
\r
2284 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2285 i32imm:$fromWidth, Int64Regs:$addr),
\r
2286 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2287 "\t$dst, [$addr];", []>;
\r
2288 def _ari : NVPTXInst<
\r
2289 (outs regclass:$dst),
\r
2290 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2291 i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
\r
2292 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2293 "\t$dst, [$addr+$offset];", []>;
\r
2294 def _ari_64 : NVPTXInst<
\r
2295 (outs regclass:$dst),
\r
2296 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2297 LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
\r
2298 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2299 "\t$dst, [$addr+$offset];", []>;
\r
2300 def _asi : NVPTXInst<
\r
2301 (outs regclass:$dst),
\r
2302 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2303 LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
\r
2304 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2305 "\t$dst, [$addr+$offset];", []>;
\r
2308 let mayLoad=1, hasSideEffects=0 in {
\r
2309 defm LD_i8 : LD<Int16Regs>;
\r
2310 defm LD_i16 : LD<Int16Regs>;
\r
2311 defm LD_i32 : LD<Int32Regs>;
\r
2312 defm LD_i64 : LD<Int64Regs>;
\r
2313 defm LD_f16 : LD<Float16Regs>;
\r
2314 defm LD_f16x2 : LD<Float16x2Regs>;
\r
2315 defm LD_f32 : LD<Float32Regs>;
\r
2316 defm LD_f64 : LD<Float64Regs>;
\r
2319 multiclass ST<NVPTXRegClass regclass> {
\r
2320 def _avar : NVPTXInst<
\r
2322 (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2323 LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
\r
2324 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
\r
2325 " \t[$addr], $src;", []>;
\r
2326 def _areg : NVPTXInst<
\r
2328 (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
\r
2329 LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
\r
2330 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
\r
2331 " \t[$addr], $src;", []>;
\r
2332 def _areg_64 : NVPTXInst<
\r
2334 (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2335 LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
\r
2336 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
\r
2337 " \t[$addr], $src;", []>;
\r
2338 def _ari : NVPTXInst<
\r
2340 (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2341 LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
\r
2342 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
\r
2343 " \t[$addr+$offset], $src;", []>;
\r
2344 def _ari_64 : NVPTXInst<
\r
2346 (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2347 LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
\r
2348 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
\r
2349 " \t[$addr+$offset], $src;", []>;
\r
2350 def _asi : NVPTXInst<
\r
2352 (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
\r
2353 LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
\r
2354 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
\r
2355 " \t[$addr+$offset], $src;", []>;
\r
2358 let mayStore=1, hasSideEffects=0 in {
\r
2359 defm ST_i8 : ST<Int16Regs>;
\r
2360 defm ST_i16 : ST<Int16Regs>;
\r
2361 defm ST_i32 : ST<Int32Regs>;
\r
2362 defm ST_i64 : ST<Int64Regs>;
\r
2363 defm ST_f16 : ST<Float16Regs>;
\r
2364 defm ST_f16x2 : ST<Float16x2Regs>;
\r
2365 defm ST_f32 : ST<Float32Regs>;
\r
2366 defm ST_f64 : ST<Float64Regs>;
\r
2369 // The following is used only in and after vector elementizations. Vector
\r
2370 // elementization happens at the machine instruction level, so the following
\r
2371 // instructions never appear in the DAG.
\r
2372 multiclass LD_VEC<NVPTXRegClass regclass> {
\r
2373 def _v2_avar : NVPTXInst<
\r
2374 (outs regclass:$dst1, regclass:$dst2),
\r
2375 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2376 i32imm:$fromWidth, imem:$addr),
\r
2377 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2378 "\t{{$dst1, $dst2}}, [$addr];", []>;
\r
2379 def _v2_areg : NVPTXInst<
\r
2380 (outs regclass:$dst1, regclass:$dst2),
\r
2381 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2382 i32imm:$fromWidth, Int32Regs:$addr),
\r
2383 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2384 "\t{{$dst1, $dst2}}, [$addr];", []>;
\r
2385 def _v2_areg_64 : NVPTXInst<
\r
2386 (outs regclass:$dst1, regclass:$dst2),
\r
2387 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2388 i32imm:$fromWidth, Int64Regs:$addr),
\r
2389 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2390 "\t{{$dst1, $dst2}}, [$addr];", []>;
\r
2391 def _v2_ari : NVPTXInst<
\r
2392 (outs regclass:$dst1, regclass:$dst2),
\r
2393 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2394 i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
\r
2395 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2396 "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
\r
2397 def _v2_ari_64 : NVPTXInst<
\r
2398 (outs regclass:$dst1, regclass:$dst2),
\r
2399 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2400 i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
\r
2401 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2402 "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
\r
2403 def _v2_asi : NVPTXInst<
\r
2404 (outs regclass:$dst1, regclass:$dst2),
\r
2405 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2406 i32imm:$fromWidth, imem:$addr, i32imm:$offset),
\r
2407 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2408 "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
\r
2409 def _v4_avar : NVPTXInst<
\r
2410 (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
\r
2411 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2412 i32imm:$fromWidth, imem:$addr),
\r
2413 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2414 "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
\r
2415 def _v4_areg : NVPTXInst<
\r
2416 (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
\r
2417 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2418 i32imm:$fromWidth, Int32Regs:$addr),
\r
2419 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2420 "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
\r
2421 def _v4_areg_64 : NVPTXInst<
\r
2422 (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
\r
2423 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2424 i32imm:$fromWidth, Int64Regs:$addr),
\r
2425 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2426 "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
\r
2427 def _v4_ari : NVPTXInst<
\r
2428 (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
\r
2429 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2430 i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
\r
2431 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2432 "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
\r
2433 def _v4_ari_64 : NVPTXInst<
\r
2434 (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
\r
2435 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2436 i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
\r
2437 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2438 "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
\r
2439 def _v4_asi : NVPTXInst<
\r
2440 (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
\r
2441 (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2442 i32imm:$fromWidth, imem:$addr, i32imm:$offset),
\r
2443 "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2444 "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
\r
2446 let mayLoad=1, hasSideEffects=0 in {
\r
2447 defm LDV_i8 : LD_VEC<Int16Regs>;
\r
2448 defm LDV_i16 : LD_VEC<Int16Regs>;
\r
2449 defm LDV_i32 : LD_VEC<Int32Regs>;
\r
2450 defm LDV_i64 : LD_VEC<Int64Regs>;
\r
2451 defm LDV_f16 : LD_VEC<Float16Regs>;
\r
2452 defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
\r
2453 defm LDV_f32 : LD_VEC<Float32Regs>;
\r
2454 defm LDV_f64 : LD_VEC<Float64Regs>;
\r
2457 multiclass ST_VEC<NVPTXRegClass regclass> {
\r
2458 def _v2_avar : NVPTXInst<
\r
2460 (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
\r
2461 LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
\r
2462 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2463 "\t[$addr], {{$src1, $src2}};", []>;
\r
2464 def _v2_areg : NVPTXInst<
\r
2466 (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
\r
2467 LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
\r
2468 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2469 "\t[$addr], {{$src1, $src2}};", []>;
\r
2470 def _v2_areg_64 : NVPTXInst<
\r
2472 (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
\r
2473 LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
\r
2474 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2475 "\t[$addr], {{$src1, $src2}};", []>;
\r
2476 def _v2_ari : NVPTXInst<
\r
2478 (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
\r
2479 LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
\r
2481 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2482 "\t[$addr+$offset], {{$src1, $src2}};", []>;
\r
2483 def _v2_ari_64 : NVPTXInst<
\r
2485 (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
\r
2486 LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
\r
2488 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2489 "\t[$addr+$offset], {{$src1, $src2}};", []>;
\r
2490 def _v2_asi : NVPTXInst<
\r
2492 (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
\r
2493 LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
\r
2495 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2496 "\t[$addr+$offset], {{$src1, $src2}};", []>;
\r
2497 def _v4_avar : NVPTXInst<
\r
2499 (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
\r
2500 LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2501 i32imm:$fromWidth, imem:$addr),
\r
2502 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2503 "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
\r
2504 def _v4_areg : NVPTXInst<
\r
2506 (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
\r
2507 LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2508 i32imm:$fromWidth, Int32Regs:$addr),
\r
2509 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2510 "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
\r
2511 def _v4_areg_64 : NVPTXInst<
\r
2513 (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
\r
2514 LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2515 i32imm:$fromWidth, Int64Regs:$addr),
\r
2516 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2517 "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
\r
2518 def _v4_ari : NVPTXInst<
\r
2520 (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
\r
2521 LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2522 i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
\r
2523 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2524 "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
\r
2525 def _v4_ari_64 : NVPTXInst<
\r
2527 (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
\r
2528 LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2529 i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
\r
2530 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
\r
2531 "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
\r
2532 def _v4_asi : NVPTXInst<
\r
2534 (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
\r
2535 LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
\r
2536 i32imm:$fromWidth, imem:$addr, i32imm:$offset),
\r
2537 "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
\r
2538 "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
\r
2541 let mayStore=1, hasSideEffects=0 in {
\r
2542 defm STV_i8 : ST_VEC<Int16Regs>;
\r
2543 defm STV_i16 : ST_VEC<Int16Regs>;
\r
2544 defm STV_i32 : ST_VEC<Int32Regs>;
\r
2545 defm STV_i64 : ST_VEC<Int64Regs>;
\r
2546 defm STV_f16 : ST_VEC<Float16Regs>;
\r
2547 defm STV_f16x2 : ST_VEC<Float16x2Regs>;
\r
2548 defm STV_f32 : ST_VEC<Float32Regs>;
\r
2549 defm STV_f64 : ST_VEC<Float64Regs>;
\r
2552 //---- Conversion ----
\r
2554 class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
\r
2555 NVPTXRegClass regclassOut> :
\r
2556 NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
\r
2557 !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
\r
2558 [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
\r
2560 def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
\r
2561 def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
\r
2562 def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
\r
2563 def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
\r
2564 def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
\r
2565 def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
\r
2566 def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
\r
2567 def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
\r
2569 // NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
\r
2570 // we cannot specify floating-point literals in isel patterns. Therefore, we
\r
2571 // use an integer selp to select either 1 or 0 and then cvt to floating-point.
\r
2574 def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
\r
2575 (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
\r
2576 def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
\r
2577 (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
\r
2578 def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
\r
2579 (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
\r
2580 def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
\r
2581 (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
\r
2584 def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
\r
2585 (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
\r
2586 def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
\r
2587 (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
\r
2588 def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
\r
2589 (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
\r
2590 def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
\r
2591 (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
\r
2594 def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
\r
2595 (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
\r
2596 def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
\r
2597 (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
\r
2598 def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
\r
2599 (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
\r
2600 def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
\r
2601 (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
\r
2604 def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
\r
2605 (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
\r
2606 def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
\r
2607 (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
\r
2608 def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
\r
2609 (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
\r
2610 def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
\r
2611 (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
\r
2614 def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
\r
2615 (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
\r
2616 def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
\r
2617 (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
\r
2618 def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
\r
2619 (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
\r
2620 def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
\r
2621 (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
\r
2624 def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
\r
2625 (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
\r
2626 def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
\r
2627 (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
\r
2628 def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
\r
2629 (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
\r
2630 def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
\r
2631 (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
\r
2635 def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
\r
2636 (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
\r
2637 def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
\r
2638 (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2639 def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
\r
2640 (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
\r
2641 def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
\r
2642 (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2643 def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
\r
2644 (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
\r
2645 def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
\r
2646 (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2647 def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
\r
2648 (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
\r
2651 def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
\r
2652 (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
\r
2653 def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
\r
2654 (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2655 def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
\r
2656 (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
\r
2657 def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
\r
2658 (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2659 def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
\r
2660 (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
\r
2661 def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
\r
2662 (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2663 def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
\r
2664 (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
\r
2667 def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
\r
2668 (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
\r
2669 def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
\r
2670 (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2671 def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
\r
2672 (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
\r
2673 def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
\r
2674 (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2675 def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
\r
2676 (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
\r
2677 def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
\r
2678 (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2679 def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
\r
2680 (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
\r
2683 def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
\r
2684 (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
\r
2685 def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
\r
2686 (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2687 def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
\r
2688 (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
\r
2689 def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
\r
2690 (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2691 def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
\r
2692 (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
\r
2693 def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
\r
2694 (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
2695 def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
\r
2696 (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
\r
2699 def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
\r
2700 (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
\r
2701 def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
\r
2702 (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
\r
2703 def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
\r
2704 (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
\r
2705 def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
\r
2706 (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
\r
2709 def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
\r
2710 (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
\r
2711 def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
\r
2712 (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
\r
2713 def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
\r
2714 (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
\r
2715 def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
\r
2716 (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
\r
2719 def : Pat<(i16 (sext Int1Regs:$a)),
\r
2720 (SELP_s16ii -1, 0, Int1Regs:$a)>;
\r
2721 def : Pat<(i32 (sext Int1Regs:$a)),
\r
2722 (SELP_s32ii -1, 0, Int1Regs:$a)>;
\r
2723 def : Pat<(i64 (sext Int1Regs:$a)),
\r
2724 (SELP_s64ii -1, 0, Int1Regs:$a)>;
\r
2727 def : Pat<(i16 (zext Int1Regs:$a)),
\r
2728 (SELP_u16ii 1, 0, Int1Regs:$a)>;
\r
2729 def : Pat<(i32 (zext Int1Regs:$a)),
\r
2730 (SELP_u32ii 1, 0, Int1Regs:$a)>;
\r
2731 def : Pat<(i64 (zext Int1Regs:$a)),
\r
2732 (SELP_u64ii 1, 0, Int1Regs:$a)>;
\r
2735 def : Pat<(i16 (anyext Int1Regs:$a)),
\r
2736 (SELP_u16ii -1, 0, Int1Regs:$a)>;
\r
2737 def : Pat<(i32 (anyext Int1Regs:$a)),
\r
2738 (SELP_u32ii -1, 0, Int1Regs:$a)>;
\r
2739 def : Pat<(i64 (anyext Int1Regs:$a)),
\r
2740 (SELP_u64ii -1, 0, Int1Regs:$a)>;
\r
2743 def : Pat<(i32 (sext Int16Regs:$a)),
\r
2744 (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
\r
2745 def : Pat<(i64 (sext Int16Regs:$a)),
\r
2746 (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
\r
2749 def : Pat<(i32 (zext Int16Regs:$a)),
\r
2750 (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
\r
2751 def : Pat<(i64 (zext Int16Regs:$a)),
\r
2752 (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
\r
2755 def : Pat<(i32 (anyext Int16Regs:$a)),
\r
2756 (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
\r
2757 def : Pat<(i64 (anyext Int16Regs:$a)),
\r
2758 (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
\r
2761 def : Pat<(i64 (sext Int32Regs:$a)),
\r
2762 (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
\r
2765 def : Pat<(i64 (zext Int32Regs:$a)),
\r
2766 (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
\r
2769 def : Pat<(i64 (anyext Int32Regs:$a)),
\r
2770 (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
\r
2774 def : Pat<(i32 (trunc Int64Regs:$a)),
\r
2775 (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
\r
2776 def : Pat<(i16 (trunc Int64Regs:$a)),
\r
2777 (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
\r
2778 def : Pat<(i1 (trunc Int64Regs:$a)),
\r
2779 (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
\r
2782 def : Pat<(i16 (trunc Int32Regs:$a)),
\r
2783 (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
\r
2784 def : Pat<(i1 (trunc Int32Regs:$a)),
\r
2785 (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
\r
2788 def : Pat<(i1 (trunc Int16Regs:$a)),
\r
2789 (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
\r
2792 def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
\r
2793 def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
\r
2794 def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
\r
2795 def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
\r
2796 def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
\r
2797 def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
\r
2800 // Select instructions with 32-bit predicates
\r
2801 def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
\r
2802 (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
\r
2803 (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
\r
2804 def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
\r
2805 (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
\r
2806 (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
\r
2807 def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
\r
2808 (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
\r
2809 (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
\r
2810 def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
\r
2811 (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
\r
2812 (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
\r
2813 def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
\r
2814 (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
\r
2815 (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
\r
2816 def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
\r
2817 (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
\r
2818 (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
\r
2821 let hasSideEffects = 0 in {
\r
2822 // pack a set of smaller int registers to a larger int register
\r
2823 def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
\r
2824 (ins Int16Regs:$s1, Int16Regs:$s2,
\r
2825 Int16Regs:$s3, Int16Regs:$s4),
\r
2826 "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
\r
2827 def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
\r
2828 (ins Int16Regs:$s1, Int16Regs:$s2),
\r
2829 "mov.b32 \t$d, {{$s1, $s2}};", []>;
\r
2830 def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
\r
2831 (ins Int32Regs:$s1, Int32Regs:$s2),
\r
2832 "mov.b64 \t$d, {{$s1, $s2}};", []>;
\r
2833 def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
\r
2834 (ins Float32Regs:$s1, Float32Regs:$s2),
\r
2835 "mov.b64 \t$d, {{$s1, $s2}};", []>;
\r
2837 // unpack a larger int register to a set of smaller int registers
\r
2838 def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
\r
2839 Int16Regs:$d3, Int16Regs:$d4),
\r
2840 (ins Int64Regs:$s),
\r
2841 "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
\r
2842 def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
\r
2843 (ins Int32Regs:$s),
\r
2844 "mov.b32 \t{{$d1, $d2}}, $s;", []>;
\r
2845 def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
\r
2846 (ins Int64Regs:$s),
\r
2847 "mov.b64 \t{{$d1, $d2}}, $s;", []>;
\r
2848 def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
\r
2849 (ins Float64Regs:$s),
\r
2850 "mov.b64 \t{{$d1, $d2}}, $s;", []>;
\r
2854 let hasSideEffects = 0 in {
\r
2855 // Extract element of f16x2 register. PTX does not provide any way
\r
2856 // to access elements of f16x2 vector directly, so we need to
\r
2857 // extract it using a temporary register.
\r
2858 def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
\r
2859 (ins Float16x2Regs:$src),
\r
2860 "{{ .reg .b16 \t%tmp_hi;\n\t"
\r
2861 " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
\r
2862 [(set Float16Regs:$dst,
\r
2863 (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
\r
2864 def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
\r
2865 (ins Float16x2Regs:$src),
\r
2866 "{{ .reg .b16 \t%tmp_lo;\n\t"
\r
2867 " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
\r
2868 [(set Float16Regs:$dst,
\r
2869 (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
\r
2871 // Coalesce two f16 registers into f16x2
\r
2872 def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
\r
2873 (ins Float16Regs:$a, Float16Regs:$b),
\r
2874 "mov.b32 \t$dst, {{$a, $b}};",
\r
2875 [(set Float16x2Regs:$dst,
\r
2876 (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
\r
2878 // Directly initializing underlying the b32 register is one less SASS
\r
2879 // instruction than than vector-packing move.
\r
2880 def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
\r
2881 "mov.b32 \t$dst, $src;",
\r
2884 // Split f16x2 into two f16 registers.
\r
2885 def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
\r
2886 (ins Float16x2Regs:$src),
\r
2887 "mov.b32 \t{{$lo, $hi}}, $src;",
\r
2889 // Split an i32 into two f16
\r
2890 def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
\r
2891 (ins Int32Regs:$src),
\r
2892 "mov.b32 \t{{$lo, $hi}}, $src;",
\r
2896 // Count leading zeros
\r
2897 let hasSideEffects = 0 in {
\r
2898 def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
\r
2899 "clz.b32 \t$d, $a;", []>;
\r
2900 def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
\r
2901 "clz.b64 \t$d, $a;", []>;
\r
2904 // 32-bit has a direct PTX instruction
\r
2905 def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
\r
2907 // The return type of the ctlz ISD node is the same as its input, but the PTX
\r
2908 // ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
\r
2909 // ptx value to 64 bits to match the ISD node's semantics, unless we know we're
\r
2910 // truncating back down to 32 bits.
\r
2911 def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
\r
2912 def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
\r
2914 // For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
\r
2915 // result back to 16-bits if necessary. We also need to subtract 16 because
\r
2916 // the high-order 16 zeros were counted.
\r
2918 // TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
\r
2919 // use to save one SASS instruction (on sm_35 anyway):
\r
2921 // mov.b32 $tmp, {0xffff, $a}
\r
2922 // ctlz.b32 $result, $tmp
\r
2924 // That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
\r
2925 // and then ctlz that value. This way we don't have to subtract 16 from the
\r
2926 // result. Unfortunately today we don't have a way to generate
\r
2927 // "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
\r
2928 def : Pat<(ctlz Int16Regs:$a),
\r
2929 (SUBi16ri (CVT_u16_u32
\r
2930 (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
\r
2931 def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
\r
2932 (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
\r
2934 // Population count
\r
2935 let hasSideEffects = 0 in {
\r
2936 def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
\r
2937 "popc.b32 \t$d, $a;", []>;
\r
2938 def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
\r
2939 "popc.b64 \t$d, $a;", []>;
\r
2942 // 32-bit has a direct PTX instruction
\r
2943 def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
\r
2945 // For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
\r
2946 // to match the LLVM semantics. Just as with ctlz.i64, we provide a second
\r
2947 // pattern that avoids the type conversion if we're truncating the result to
\r
2949 def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
\r
2950 def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
\r
2952 // For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
\r
2953 // If we know that we're storing into an i32, we can avoid the final trunc.
\r
2954 def : Pat<(ctpop Int16Regs:$a),
\r
2955 (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
\r
2956 def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
\r
2957 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
\r
2959 // fpround f32 -> f16
\r
2960 def : Pat<(f16 (fpround Float32Regs:$a)),
\r
2961 (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
\r
2962 def : Pat<(f16 (fpround Float32Regs:$a)),
\r
2963 (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
\r
2965 // fpround f64 -> f16
\r
2966 def : Pat<(f16 (fpround Float64Regs:$a)),
\r
2967 (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
\r
2968 def : Pat<(f16 (fpround Float64Regs:$a)),
\r
2969 (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
\r
2971 // fpround f64 -> f32
\r
2972 def : Pat<(f32 (fpround Float64Regs:$a)),
\r
2973 (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
\r
2974 def : Pat<(f32 (fpround Float64Regs:$a)),
\r
2975 (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
\r
2977 // fpextend f16 -> f32
\r
2978 def : Pat<(f32 (fpextend Float16Regs:$a)),
\r
2979 (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
\r
2980 def : Pat<(f32 (fpextend Float16Regs:$a)),
\r
2981 (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
\r
2983 // fpextend f16 -> f64
\r
2984 def : Pat<(f64 (fpextend Float16Regs:$a)),
\r
2985 (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
\r
2986 def : Pat<(f64 (fpextend Float16Regs:$a)),
\r
2987 (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
\r
2989 // fpextend f32 -> f64
\r
2990 def : Pat<(f64 (fpextend Float32Regs:$a)),
\r
2991 (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
\r
2992 def : Pat<(f64 (fpextend Float32Regs:$a)),
\r
2993 (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
\r
2995 def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
\r
2996 [SDNPHasChain, SDNPOptInGlue]>;
\r
2998 // fceil, ffloor, fround, ftrunc.
\r
3000 def : Pat<(fceil Float16Regs:$a),
\r
3001 (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
\r
3002 def : Pat<(fceil Float16Regs:$a),
\r
3003 (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
\r
3004 def : Pat<(fceil Float32Regs:$a),
\r
3005 (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
\r
3006 def : Pat<(fceil Float32Regs:$a),
\r
3007 (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
\r
3008 def : Pat<(fceil Float64Regs:$a),
\r
3009 (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
\r
3011 def : Pat<(ffloor Float16Regs:$a),
\r
3012 (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
\r
3013 def : Pat<(ffloor Float16Regs:$a),
\r
3014 (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
\r
3015 def : Pat<(ffloor Float32Regs:$a),
\r
3016 (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
\r
3017 def : Pat<(ffloor Float32Regs:$a),
\r
3018 (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
\r
3019 def : Pat<(ffloor Float64Regs:$a),
\r
3020 (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
\r
3022 def : Pat<(fround Float16Regs:$a),
\r
3023 (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
\r
3024 def : Pat<(f16 (fround Float16Regs:$a)),
\r
3025 (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
\r
3026 def : Pat<(fround Float32Regs:$a),
\r
3027 (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
\r
3028 def : Pat<(f32 (fround Float32Regs:$a)),
\r
3029 (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
\r
3030 def : Pat<(f64 (fround Float64Regs:$a)),
\r
3031 (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
\r
3033 def : Pat<(ftrunc Float16Regs:$a),
\r
3034 (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
3035 def : Pat<(ftrunc Float16Regs:$a),
\r
3036 (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
\r
3037 def : Pat<(ftrunc Float32Regs:$a),
\r
3038 (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
\r
3039 def : Pat<(ftrunc Float32Regs:$a),
\r
3040 (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
\r
3041 def : Pat<(ftrunc Float64Regs:$a),
\r
3042 (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
\r
3044 // nearbyint and rint are implemented as rounding to nearest even. This isn't
\r
3045 // strictly correct, because it causes us to ignore the rounding mode. But it
\r
3046 // matches what CUDA's "libm" does.
\r
3048 def : Pat<(fnearbyint Float16Regs:$a),
\r
3049 (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
\r
3050 def : Pat<(fnearbyint Float16Regs:$a),
\r
3051 (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
\r
3052 def : Pat<(fnearbyint Float32Regs:$a),
\r
3053 (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
\r
3054 def : Pat<(fnearbyint Float32Regs:$a),
\r
3055 (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
\r
3056 def : Pat<(fnearbyint Float64Regs:$a),
\r
3057 (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
\r
3059 def : Pat<(frint Float16Regs:$a),
\r
3060 (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
\r
3061 def : Pat<(frint Float16Regs:$a),
\r
3062 (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
\r
3063 def : Pat<(frint Float32Regs:$a),
\r
3064 (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
\r
3065 def : Pat<(frint Float32Regs:$a),
\r
3066 (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
\r
3067 def : Pat<(frint Float64Regs:$a),
\r
3068 (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
\r
3071 //-----------------------------------
\r
3073 //-----------------------------------
\r
3075 let isTerminator=1 in {
\r
3076 let isReturn=1, isBarrier=1 in
\r
3077 def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
\r
3080 def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
\r
3081 "@$a bra \t$target;",
\r
3082 [(brcond Int1Regs:$a, bb:$target)]>;
\r
3084 def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
\r
3085 "@!$a bra \t$target;", []>;
\r
3087 let isBranch=1, isBarrier=1 in
\r
3088 def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
\r
3089 "bra.uni \t$target;", [(br bb:$target)]>;
\r
3092 def : Pat<(brcond Int32Regs:$a, bb:$target),
\r
3093 (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
\r
3095 // SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
\r
3096 // conditional branch if the target block is the next block so that the code
\r
3097 // can fall through to the target block. The invertion is done by 'xor
\r
3098 // condition, 1', which will be translated to (setne condition, -1). Since ptx
\r
3099 // supports '@!pred bra target', we should use it.
\r
3100 def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
\r
3101 (CBranchOther Int1Regs:$a, bb:$target)>;
\r
3104 def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
\r
3105 SDTCisVT<1, i32>]>;
\r
3106 def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
\r
3108 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
\r
3109 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
\r
3110 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
\r
3111 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
\r
3114 def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
\r
3115 def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
\r
3116 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
\r
3117 def calltarget : Operand<i32>;
\r
3119 def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
\r
3122 def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
\r
3123 def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
\r
3125 // Pseudo instructions.
\r
3126 class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
\r
3127 : NVPTXInst<outs, ins, asmstr, pattern>;
\r
3129 def Callseq_Start :
\r
3130 NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
\r
3131 "\\{ // callseq $amt1, $amt2\n"
\r
3132 "\t.reg .b32 temp_param_reg;",
\r
3133 [(callseq_start timm:$amt1, timm:$amt2)]>;
\r
3135 NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
\r
3136 "\\} // callseq $amt1",
\r
3137 [(callseq_end timm:$amt1, timm:$amt2)]>;
\r
3139 // trap instruction
\r
3140 def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
\r
3142 // Call prototype wrapper
\r
3143 def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
\r
3144 def CallPrototype :
\r
3145 SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
\r
3146 [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
\r
3147 def ProtoIdent : Operand<i32> {
\r
3148 let PrintMethod = "printProtoIdent";
\r
3150 def CALL_PROTOTYPE :
\r
3151 NVPTXInst<(outs), (ins ProtoIdent:$ident),
\r
3152 "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
\r
3155 include "NVPTXIntrinsics.td"
\r
3158 //-----------------------------------
\r
3160 //-----------------------------------
\r
3161 // BSWAP is currently expanded. The following is a more efficient
\r
3162 // - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
\r
3163 // - for sm_20, use pmpt (use vector scalar mov to get the pack and
\r
3164 // unpack). sm_20 supports native 32-bit register, but not native 16-bit
\r