1 //===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
14 def SDT_CMPFP0 : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisVT<1, i32>]>;
15 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
17 def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>,
20 def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>;
22 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
23 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMFCmp, [SDNPOutGlue]>;
24 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
25 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
26 def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>;
27 def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>;
29 def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >;
30 def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >;
31 def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>;
32 def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>;
34 //===----------------------------------------------------------------------===//
35 // Operand Definitions.
38 // 8-bit floating-point immediate encodings.
39 def FPImmOperand : AsmOperandClass {
41 let ParserMethod = "parseFPImm";
44 def vfp_f16imm : Operand<f16>,
45 PatLeaf<(f16 fpimm), [{
46 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1;
47 }], SDNodeXForm<fpimm, [{
48 APFloat InVal = N->getValueAPF();
49 uint32_t enc = ARM_AM::getFP16Imm(InVal);
50 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
52 let PrintMethod = "printFPImmOperand";
53 let ParserMatchClass = FPImmOperand;
56 def vfp_f32imm : Operand<f32>,
57 PatLeaf<(f32 fpimm), [{
58 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
59 }], SDNodeXForm<fpimm, [{
60 APFloat InVal = N->getValueAPF();
61 uint32_t enc = ARM_AM::getFP32Imm(InVal);
62 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
64 let PrintMethod = "printFPImmOperand";
65 let ParserMatchClass = FPImmOperand;
68 def vfp_f64imm : Operand<f64>,
69 PatLeaf<(f64 fpimm), [{
70 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
71 }], SDNodeXForm<fpimm, [{
72 APFloat InVal = N->getValueAPF();
73 uint32_t enc = ARM_AM::getFP64Imm(InVal);
74 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32);
76 let PrintMethod = "printFPImmOperand";
77 let ParserMatchClass = FPImmOperand;
80 def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
81 return cast<LoadSDNode>(N)->getAlignment() >= 2;
84 def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
85 return cast<LoadSDNode>(N)->getAlignment() >= 4;
88 def alignedstore16 : PatFrag<(ops node:$val, node:$ptr),
89 (store node:$val, node:$ptr), [{
90 return cast<StoreSDNode>(N)->getAlignment() >= 2;
93 def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
94 (store node:$val, node:$ptr), [{
95 return cast<StoreSDNode>(N)->getAlignment() >= 4;
98 // The VCVT to/from fixed-point instructions encode the 'fbits' operand
99 // (the number of fixed bits) differently than it appears in the assembly
100 // source. It's encoded as "Size - fbits" where Size is the size of the
101 // fixed-point representation (32 or 16) and fbits is the value appearing
102 // in the assembly source, an integer in [0,16] or (0,32], depending on size.
103 def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
104 def fbits32 : Operand<i32> {
105 let PrintMethod = "printFBits32";
106 let ParserMatchClass = fbits32_asm_operand;
109 def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
110 def fbits16 : Operand<i32> {
111 let PrintMethod = "printFBits16";
112 let ParserMatchClass = fbits16_asm_operand;
115 //===----------------------------------------------------------------------===//
116 // Load / store Instructions.
119 let canFoldAsLoad = 1, isReMaterializable = 1 in {
121 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
122 IIC_fpLoad64, "vldr", "\t$Dd, $addr",
123 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>;
125 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
126 IIC_fpLoad32, "vldr", "\t$Sd, $addr",
127 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]> {
128 // Some single precision VFP instructions may be executed on both NEON and VFP
130 let D = VFPNeonDomain;
133 def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr),
134 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr",
135 [(set HPR:$Sd, (alignedload16 addrmode5fp16:$addr))]>,
136 Requires<[HasFullFP16]>;
138 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
140 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
141 IIC_fpStore64, "vstr", "\t$Dd, $addr",
142 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>;
144 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
145 IIC_fpStore32, "vstr", "\t$Sd, $addr",
146 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]> {
147 // Some single precision VFP instructions may be executed on both NEON and VFP
149 let D = VFPNeonDomain;
152 def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr),
153 IIC_fpStore16, "vstr", ".16\t$Sd, $addr",
154 [(alignedstore16 HPR:$Sd, addrmode5fp16:$addr)]>,
155 Requires<[HasFullFP16]>;
157 //===----------------------------------------------------------------------===//
158 // Load / store multiple Instructions.
161 multiclass vfp_ldst_mult<string asm, bit L_bit,
162 InstrItinClass itin, InstrItinClass itin_upd> {
165 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
167 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
168 let Inst{24-23} = 0b01; // Increment After
169 let Inst{21} = 0; // No writeback
170 let Inst{20} = L_bit;
173 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
175 IndexModeUpd, itin_upd,
176 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
177 let Inst{24-23} = 0b01; // Increment After
178 let Inst{21} = 1; // Writeback
179 let Inst{20} = L_bit;
182 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
184 IndexModeUpd, itin_upd,
185 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
186 let Inst{24-23} = 0b10; // Decrement Before
187 let Inst{21} = 1; // Writeback
188 let Inst{20} = L_bit;
193 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
195 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
196 let Inst{24-23} = 0b01; // Increment After
197 let Inst{21} = 0; // No writeback
198 let Inst{20} = L_bit;
200 // Some single precision VFP instructions may be executed on both NEON and
202 let D = VFPNeonDomain;
205 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
207 IndexModeUpd, itin_upd,
208 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
209 let Inst{24-23} = 0b01; // Increment After
210 let Inst{21} = 1; // Writeback
211 let Inst{20} = L_bit;
213 // Some single precision VFP instructions may be executed on both NEON and
215 let D = VFPNeonDomain;
218 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
220 IndexModeUpd, itin_upd,
221 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
222 let Inst{24-23} = 0b10; // Decrement Before
223 let Inst{21} = 1; // Writeback
224 let Inst{20} = L_bit;
226 // Some single precision VFP instructions may be executed on both NEON and
228 let D = VFPNeonDomain;
232 let hasSideEffects = 0 in {
234 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
235 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
237 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
238 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>;
242 def : MnemonicAlias<"vldm", "vldmia">;
243 def : MnemonicAlias<"vstm", "vstmia">;
246 //===----------------------------------------------------------------------===//
247 // Lazy load / store multiple Instructions
250 def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
251 IIC_fpLoad_m, "vlldm${p}\t$Rn", "", []>,
252 Requires<[HasV8MMainline, Has8MSecExt]> {
253 let Inst{24-23} = 0b00;
263 def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone,
264 IIC_fpStore_m, "vlstm${p}\t$Rn", "", []>,
265 Requires<[HasV8MMainline, Has8MSecExt]> {
266 let Inst{24-23} = 0b00;
275 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>,
277 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>,
279 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>,
281 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>,
283 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
284 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
285 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
286 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
287 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
288 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
289 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
290 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
292 // FLDMX, FSTMX - Load and store multiple unknown precision registers for
294 // These instruction are deprecated so we don't want them to get selected.
295 // However, there is no UAL syntax for them, so we keep them around for
296 // (dis)assembly only.
297 multiclass vfp_ldstx_mult<string asm, bit L_bit> {
300 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
301 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> {
302 let Inst{24-23} = 0b01; // Increment After
303 let Inst{21} = 0; // No writeback
304 let Inst{20} = L_bit;
307 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
308 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
309 let Inst{24-23} = 0b01; // Increment After
310 let Inst{21} = 1; // Writeback
311 let Inst{20} = L_bit;
314 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
315 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
316 let Inst{24-23} = 0b10; // Decrement Before
317 let Inst{21} = 1; // Writeback
318 let Inst{20} = L_bit;
322 defm FLDM : vfp_ldstx_mult<"fldm", 1>;
323 defm FSTM : vfp_ldstx_mult<"fstm", 0>;
325 def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">;
326 def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">;
328 def : VFP2MnemonicAlias<"fstmeax", "fstmiax">;
329 def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">;
331 //===----------------------------------------------------------------------===//
332 // FP Binary Operations.
335 let TwoOperandAliasConstraint = "$Dn = $Dd" in
336 def VADDD : ADbI<0b11100, 0b11, 0, 0,
337 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
338 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
339 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>,
340 Sched<[WriteFPALU64]>;
342 let TwoOperandAliasConstraint = "$Sn = $Sd" in
343 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
344 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
345 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
346 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>,
347 Sched<[WriteFPALU32]> {
348 // Some single precision VFP instructions may be executed on both NEON and
349 // VFP pipelines on A8.
350 let D = VFPNeonA8Domain;
353 let TwoOperandAliasConstraint = "$Sn = $Sd" in
354 def VADDH : AHbI<0b11100, 0b11, 0, 0,
355 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
356 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm",
357 [(set HPR:$Sd, (fadd HPR:$Sn, HPR:$Sm))]>,
358 Sched<[WriteFPALU32]>;
360 let TwoOperandAliasConstraint = "$Dn = $Dd" in
361 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
362 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
363 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
364 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>,
365 Sched<[WriteFPALU64]>;
367 let TwoOperandAliasConstraint = "$Sn = $Sd" in
368 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
369 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
370 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
371 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>,
372 Sched<[WriteFPALU32]>{
373 // Some single precision VFP instructions may be executed on both NEON and
374 // VFP pipelines on A8.
375 let D = VFPNeonA8Domain;
378 let TwoOperandAliasConstraint = "$Sn = $Sd" in
379 def VSUBH : AHbI<0b11100, 0b11, 1, 0,
380 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
381 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm",
382 [(set HPR:$Sd, (fsub HPR:$Sn, HPR:$Sm))]>,
383 Sched<[WriteFPALU32]>;
385 let TwoOperandAliasConstraint = "$Dn = $Dd" in
386 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
387 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
388 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
389 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>,
390 Sched<[WriteFPDIV64]>;
392 let TwoOperandAliasConstraint = "$Sn = $Sd" in
393 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
394 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
395 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
396 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>,
397 Sched<[WriteFPDIV32]>;
399 let TwoOperandAliasConstraint = "$Sn = $Sd" in
400 def VDIVH : AHbI<0b11101, 0b00, 0, 0,
401 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
402 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm",
403 [(set HPR:$Sd, (fdiv HPR:$Sn, HPR:$Sm))]>,
404 Sched<[WriteFPDIV32]>;
406 let TwoOperandAliasConstraint = "$Dn = $Dd" in
407 def VMULD : ADbI<0b11100, 0b10, 0, 0,
408 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
409 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
410 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>,
411 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
413 let TwoOperandAliasConstraint = "$Sn = $Sd" in
414 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
415 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
416 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
417 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>,
418 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
419 // Some single precision VFP instructions may be executed on both NEON and
420 // VFP pipelines on A8.
421 let D = VFPNeonA8Domain;
424 let TwoOperandAliasConstraint = "$Sn = $Sd" in
425 def VMULH : AHbI<0b11100, 0b10, 0, 0,
426 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
427 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm",
428 [(set HPR:$Sd, (fmul HPR:$Sn, HPR:$Sm))]>,
429 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
431 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
432 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
433 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
434 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>,
435 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>;
437 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
438 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
439 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
440 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>,
441 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> {
442 // Some single precision VFP instructions may be executed on both NEON and
443 // VFP pipelines on A8.
444 let D = VFPNeonA8Domain;
447 def VNMULH : AHbI<0b11100, 0b10, 1, 0,
448 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
449 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm",
450 [(set HPR:$Sd, (fneg (fmul HPR:$Sn, HPR:$Sm)))]>,
451 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>;
453 multiclass vsel_inst<string op, bits<2> opc, int CC> {
454 let DecoderNamespace = "VFPV8", PostEncoderMethod = "",
455 Uses = [CPSR], AddedComplexity = 4 in {
456 def H : AHbInp<0b11100, opc, 0,
457 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
458 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"),
459 [(set HPR:$Sd, (ARMcmov HPR:$Sm, HPR:$Sn, CC))]>,
460 Requires<[HasFullFP16]>;
462 def S : ASbInp<0b11100, opc, 0,
463 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
464 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"),
465 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>,
466 Requires<[HasFPARMv8]>;
468 def D : ADbInp<0b11100, opc, 0,
469 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
470 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"),
471 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>,
472 Requires<[HasFPARMv8, HasDPVFP]>;
476 // The CC constants here match ARMCC::CondCodes.
477 defm VSELGT : vsel_inst<"gt", 0b11, 12>;
478 defm VSELGE : vsel_inst<"ge", 0b10, 10>;
479 defm VSELEQ : vsel_inst<"eq", 0b00, 0>;
480 defm VSELVS : vsel_inst<"vs", 0b01, 6>;
482 multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
483 let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in {
484 def H : AHbInp<0b11101, 0b00, opc,
485 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
486 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
487 [(set HPR:$Sd, (SD HPR:$Sn, HPR:$Sm))]>,
488 Requires<[HasFullFP16]>;
490 def S : ASbInp<0b11101, 0b00, opc,
491 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
492 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"),
493 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>,
494 Requires<[HasFPARMv8]>;
496 def D : ADbInp<0b11101, 0b00, opc,
497 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
498 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"),
499 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>,
500 Requires<[HasFPARMv8, HasDPVFP]>;
504 defm VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>;
505 defm VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>;
507 // Match reassociated forms only if not sign dependent rounding.
508 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
509 (VNMULD DPR:$a, DPR:$b)>,
510 Requires<[NoHonorSignDependentRounding,HasDPVFP]>;
511 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
512 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
514 // These are encoded as unary instructions.
515 let Defs = [FPSCR_NZCV] in {
516 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
517 (outs), (ins DPR:$Dd, DPR:$Dm),
518 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
519 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 1))]>;
521 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
522 (outs), (ins SPR:$Sd, SPR:$Sm),
523 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
524 [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 1))]> {
525 // Some single precision VFP instructions may be executed on both NEON and
526 // VFP pipelines on A8.
527 let D = VFPNeonA8Domain;
530 def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0,
531 (outs), (ins HPR:$Sd, HPR:$Sm),
532 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm",
533 [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 1))]>;
535 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
536 (outs), (ins DPR:$Dd, DPR:$Dm),
537 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
538 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 0))]>;
540 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
541 (outs), (ins SPR:$Sd, SPR:$Sm),
542 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
543 [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 0))]> {
544 // Some single precision VFP instructions may be executed on both NEON and
545 // VFP pipelines on A8.
546 let D = VFPNeonA8Domain;
549 def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0,
550 (outs), (ins HPR:$Sd, HPR:$Sm),
551 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm",
552 [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 0))]>;
553 } // Defs = [FPSCR_NZCV]
555 //===----------------------------------------------------------------------===//
556 // FP Unary Operations.
559 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
560 (outs DPR:$Dd), (ins DPR:$Dm),
561 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
562 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
564 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
565 (outs SPR:$Sd), (ins SPR:$Sm),
566 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
567 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
568 // Some single precision VFP instructions may be executed on both NEON and
569 // VFP pipelines on A8.
570 let D = VFPNeonA8Domain;
573 def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0,
574 (outs SPR:$Sd), (ins SPR:$Sm),
575 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm",
578 let Defs = [FPSCR_NZCV] in {
579 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
580 (outs), (ins DPR:$Dd),
581 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
582 [(arm_cmpfp0 (f64 DPR:$Dd), (i32 1))]> {
583 let Inst{3-0} = 0b0000;
587 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
588 (outs), (ins SPR:$Sd),
589 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
590 [(arm_cmpfp0 SPR:$Sd, (i32 1))]> {
591 let Inst{3-0} = 0b0000;
594 // Some single precision VFP instructions may be executed on both NEON and
595 // VFP pipelines on A8.
596 let D = VFPNeonA8Domain;
599 def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0,
600 (outs), (ins HPR:$Sd),
601 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0",
602 [(arm_cmpfp0 HPR:$Sd, (i32 1))]> {
603 let Inst{3-0} = 0b0000;
607 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
608 (outs), (ins DPR:$Dd),
609 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
610 [(arm_cmpfp0 (f64 DPR:$Dd), (i32 0))]> {
611 let Inst{3-0} = 0b0000;
615 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
616 (outs), (ins SPR:$Sd),
617 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
618 [(arm_cmpfp0 SPR:$Sd, (i32 0))]> {
619 let Inst{3-0} = 0b0000;
622 // Some single precision VFP instructions may be executed on both NEON and
623 // VFP pipelines on A8.
624 let D = VFPNeonA8Domain;
627 def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0,
628 (outs), (ins HPR:$Sd),
629 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0",
630 [(arm_cmpfp0 HPR:$Sd, (i32 0))]> {
631 let Inst{3-0} = 0b0000;
634 } // Defs = [FPSCR_NZCV]
636 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
637 (outs DPR:$Dd), (ins SPR:$Sm),
638 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
639 [(set DPR:$Dd, (fpextend SPR:$Sm))]>,
640 Sched<[WriteFPCVT]> {
641 // Instruction operands.
645 // Encode instruction operands.
646 let Inst{3-0} = Sm{4-1};
648 let Inst{15-12} = Dd{3-0};
649 let Inst{22} = Dd{4};
651 let Predicates = [HasVFP2, HasDPVFP];
654 // Special case encoding: bits 11-8 is 0b1011.
655 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
656 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
657 [(set SPR:$Sd, (fpround DPR:$Dm))]>,
658 Sched<[WriteFPCVT]> {
659 // Instruction operands.
663 // Encode instruction operands.
664 let Inst{3-0} = Dm{3-0};
666 let Inst{15-12} = Sd{4-1};
667 let Inst{22} = Sd{0};
669 let Inst{27-23} = 0b11101;
670 let Inst{21-16} = 0b110111;
671 let Inst{11-8} = 0b1011;
672 let Inst{7-6} = 0b11;
675 let Predicates = [HasVFP2, HasDPVFP];
678 // Between half, single and double-precision.
679 def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
680 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
681 [/* Intentionally left blank, see patterns below */]>,
685 def : FullFP16Pat<(f32 (fpextend HPR:$Sm)),
686 (VCVTBHS (COPY_TO_REGCLASS HPR:$Sm, SPR))>;
687 def : FP16Pat<(f16_to_fp GPR:$a),
688 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
690 def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
691 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
692 [/* Intentionally left blank, see patterns below */]>,
696 def : FullFP16Pat<(f16 (fpround SPR:$Sm)),
697 (COPY_TO_REGCLASS (VCVTBSH SPR:$Sm), HPR)>;
698 def : FP16Pat<(fp_to_f16 SPR:$a),
699 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
701 def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
702 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
703 [/* For disassembly only; pattern left blank */]>,
707 def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
708 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
709 [/* For disassembly only; pattern left blank */]>,
713 def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0,
714 (outs DPR:$Dd), (ins SPR:$Sm),
715 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm",
716 [/* Intentionally left blank, see patterns below */]>,
717 Requires<[HasFPARMv8, HasDPVFP]>,
718 Sched<[WriteFPCVT]> {
719 // Instruction operands.
722 // Encode instruction operands.
723 let Inst{3-0} = Sm{4-1};
727 def : FullFP16Pat<(f64 (fpextend HPR:$Sm)),
728 (VCVTBHD (COPY_TO_REGCLASS HPR:$Sm, SPR))>,
729 Requires<[HasFPARMv8, HasDPVFP]>;
730 def : FP16Pat<(f64 (f16_to_fp GPR:$a)),
731 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>,
732 Requires<[HasFPARMv8, HasDPVFP]>;
734 def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0,
735 (outs SPR:$Sd), (ins DPR:$Dm),
736 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm",
737 [/* Intentionally left blank, see patterns below */]>,
738 Requires<[HasFPARMv8, HasDPVFP]> {
739 // Instruction operands.
743 // Encode instruction operands.
744 let Inst{3-0} = Dm{3-0};
746 let Inst{15-12} = Sd{4-1};
747 let Inst{22} = Sd{0};
750 def : FullFP16Pat<(f16 (fpround DPR:$Dm)),
751 (COPY_TO_REGCLASS (VCVTBDH DPR:$Dm), HPR)>,
752 Requires<[HasFPARMv8, HasDPVFP]>;
753 def : FP16Pat<(fp_to_f16 (f64 DPR:$a)),
754 (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>,
755 Requires<[HasFPARMv8, HasDPVFP]>;
757 def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0,
758 (outs DPR:$Dd), (ins SPR:$Sm),
759 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm",
760 []>, Requires<[HasFPARMv8, HasDPVFP]> {
761 // Instruction operands.
764 // Encode instruction operands.
765 let Inst{3-0} = Sm{4-1};
769 def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0,
770 (outs SPR:$Sd), (ins DPR:$Dm),
771 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm",
772 []>, Requires<[HasFPARMv8, HasDPVFP]> {
773 // Instruction operands.
777 // Encode instruction operands.
778 let Inst{15-12} = Sd{4-1};
779 let Inst{22} = Sd{0};
780 let Inst{3-0} = Dm{3-0};
784 multiclass vcvt_inst<string opc, bits<2> rm,
785 SDPatternOperator node = null_frag> {
786 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
787 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0,
788 (outs SPR:$Sd), (ins HPR:$Sm),
789 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"),
791 Requires<[HasFullFP16]> {
792 let Inst{17-16} = rm;
795 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0,
796 (outs SPR:$Sd), (ins HPR:$Sm),
797 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"),
799 Requires<[HasFullFP16]> {
800 let Inst{17-16} = rm;
803 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
804 (outs SPR:$Sd), (ins SPR:$Sm),
805 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"),
807 Requires<[HasFPARMv8]> {
808 let Inst{17-16} = rm;
811 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
812 (outs SPR:$Sd), (ins SPR:$Sm),
813 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"),
815 Requires<[HasFPARMv8]> {
816 let Inst{17-16} = rm;
819 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0,
820 (outs SPR:$Sd), (ins DPR:$Dm),
821 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"),
823 Requires<[HasFPARMv8, HasDPVFP]> {
826 let Inst{17-16} = rm;
828 // Encode instruction operands
829 let Inst{3-0} = Dm{3-0};
834 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0,
835 (outs SPR:$Sd), (ins DPR:$Dm),
836 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"),
838 Requires<[HasFPARMv8, HasDPVFP]> {
841 let Inst{17-16} = rm;
843 // Encode instruction operands
844 let Inst{3-0} = Dm{3-0};
850 let Predicates = [HasFPARMv8] in {
851 let Predicates = [HasFullFP16] in {
852 def : Pat<(i32 (fp_to_sint (node HPR:$a))),
854 (!cast<Instruction>(NAME#"SH") HPR:$a),
857 def : Pat<(i32 (fp_to_uint (node HPR:$a))),
859 (!cast<Instruction>(NAME#"UH") HPR:$a),
862 def : Pat<(i32 (fp_to_sint (node SPR:$a))),
864 (!cast<Instruction>(NAME#"SS") SPR:$a),
866 def : Pat<(i32 (fp_to_uint (node SPR:$a))),
868 (!cast<Instruction>(NAME#"US") SPR:$a),
871 let Predicates = [HasFPARMv8, HasDPVFP] in {
872 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))),
874 (!cast<Instruction>(NAME#"SD") DPR:$a),
876 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))),
878 (!cast<Instruction>(NAME#"UD") DPR:$a),
883 defm VCVTA : vcvt_inst<"a", 0b00, fround>;
884 defm VCVTN : vcvt_inst<"n", 0b01>;
885 defm VCVTP : vcvt_inst<"p", 0b10, fceil>;
886 defm VCVTM : vcvt_inst<"m", 0b11, ffloor>;
888 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
889 (outs DPR:$Dd), (ins DPR:$Dm),
890 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
891 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
893 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
894 (outs SPR:$Sd), (ins SPR:$Sm),
895 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
896 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
897 // Some single precision VFP instructions may be executed on both NEON and
898 // VFP pipelines on A8.
899 let D = VFPNeonA8Domain;
902 def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0,
903 (outs HPR:$Sd), (ins HPR:$Sm),
904 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm",
905 [(set HPR:$Sd, (fneg HPR:$Sm))]>;
907 multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> {
908 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0,
909 (outs SPR:$Sd), (ins SPR:$Sm),
910 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm",
912 Requires<[HasFullFP16]> {
917 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0,
918 (outs SPR:$Sd), (ins SPR:$Sm),
919 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm",
920 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
921 Requires<[HasFPARMv8]> {
925 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0,
926 (outs DPR:$Dd), (ins DPR:$Dm),
927 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm",
928 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
929 Requires<[HasFPARMv8, HasDPVFP]> {
934 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"),
935 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
936 Requires<[HasFullFP16]>;
937 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"),
938 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>,
939 Requires<[HasFPARMv8]>;
940 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"),
941 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>,
942 Requires<[HasFPARMv8,HasDPVFP]>;
945 defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>;
946 defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>;
947 defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>;
949 multiclass vrint_inst_anpm<string opc, bits<2> rm,
950 SDPatternOperator node = null_frag> {
951 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
952 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0,
953 (outs SPR:$Sd), (ins SPR:$Sm),
954 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"),
956 Requires<[HasFullFP16]> {
957 let Inst{17-16} = rm;
959 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0,
960 (outs SPR:$Sd), (ins SPR:$Sm),
961 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"),
962 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>,
963 Requires<[HasFPARMv8]> {
964 let Inst{17-16} = rm;
966 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0,
967 (outs DPR:$Dd), (ins DPR:$Dm),
968 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"),
969 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>,
970 Requires<[HasFPARMv8, HasDPVFP]> {
971 let Inst{17-16} = rm;
975 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"),
976 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>,
977 Requires<[HasFPARMv8]>;
978 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"),
979 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>,
980 Requires<[HasFPARMv8,HasDPVFP]>;
983 defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>;
984 defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>;
985 defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>;
986 defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>;
988 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
989 (outs DPR:$Dd), (ins DPR:$Dm),
990 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
991 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>,
992 Sched<[WriteFPSQRT64]>;
994 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
995 (outs SPR:$Sd), (ins SPR:$Sm),
996 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
997 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>,
998 Sched<[WriteFPSQRT32]>;
1000 def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0,
1001 (outs SPR:$Sd), (ins SPR:$Sm),
1002 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm",
1005 let hasSideEffects = 0 in {
1006 let isMoveReg = 1 in {
1007 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
1008 (outs DPR:$Dd), (ins DPR:$Dm),
1009 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
1011 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
1012 (outs SPR:$Sd), (ins SPR:$Sm),
1013 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
1016 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in {
1017 def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0,
1018 (outs SPR:$Sd), (ins SPR:$Sm),
1019 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>,
1020 Requires<[HasFullFP16]>;
1022 def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0,
1023 (outs SPR:$Sd), (ins SPR:$Sm),
1024 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>,
1025 Requires<[HasFullFP16]>;
1026 } // PostEncoderMethod
1029 //===----------------------------------------------------------------------===//
1030 // FP <-> GPR Copies. Int <-> FP Conversions.
1033 let isMoveReg = 1 in {
1034 def VMOVRS : AVConv2I<0b11100001, 0b1010,
1035 (outs GPR:$Rt), (ins SPR:$Sn),
1036 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
1037 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>,
1038 Sched<[WriteFPMOV]> {
1039 // Instruction operands.
1043 // Encode instruction operands.
1044 let Inst{19-16} = Sn{4-1};
1045 let Inst{7} = Sn{0};
1046 let Inst{15-12} = Rt;
1048 let Inst{6-5} = 0b00;
1049 let Inst{3-0} = 0b0000;
1051 // Some single precision VFP instructions may be executed on both NEON and VFP
1053 let D = VFPNeonDomain;
1056 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
1057 def VMOVSR : AVConv4I<0b11100000, 0b1010,
1058 (outs SPR:$Sn), (ins GPR:$Rt),
1059 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
1060 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
1061 Requires<[HasVFP2, UseVMOVSR]>,
1062 Sched<[WriteFPMOV]> {
1063 // Instruction operands.
1067 // Encode instruction operands.
1068 let Inst{19-16} = Sn{4-1};
1069 let Inst{7} = Sn{0};
1070 let Inst{15-12} = Rt;
1072 let Inst{6-5} = 0b00;
1073 let Inst{3-0} = 0b0000;
1075 // Some single precision VFP instructions may be executed on both NEON and VFP
1077 let D = VFPNeonDomain;
1080 def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>;
1082 let hasSideEffects = 0 in {
1083 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
1084 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
1085 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
1086 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>,
1087 Sched<[WriteFPMOV]> {
1088 // Instruction operands.
1093 // Encode instruction operands.
1094 let Inst{3-0} = Dm{3-0};
1095 let Inst{5} = Dm{4};
1096 let Inst{15-12} = Rt;
1097 let Inst{19-16} = Rt2;
1099 let Inst{7-6} = 0b00;
1101 // Some single precision VFP instructions may be executed on both NEON and VFP
1103 let D = VFPNeonDomain;
1105 // This instruction is equivalent to
1106 // $Rt = EXTRACT_SUBREG $Dm, ssub_0
1107 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1
1108 let isExtractSubreg = 1;
1111 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
1112 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
1113 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
1114 [/* For disassembly only; pattern left blank */]>,
1115 Sched<[WriteFPMOV]> {
1120 // Encode instruction operands.
1121 let Inst{3-0} = src1{4-1};
1122 let Inst{5} = src1{0};
1123 let Inst{15-12} = Rt;
1124 let Inst{19-16} = Rt2;
1126 let Inst{7-6} = 0b00;
1128 // Some single precision VFP instructions may be executed on both NEON and VFP
1130 let D = VFPNeonDomain;
1131 let DecoderMethod = "DecodeVMOVRRS";
1135 // FMDHR: GPR -> SPR
1136 // FMDLR: GPR -> SPR
1138 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
1139 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
1140 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
1141 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>,
1142 Sched<[WriteFPMOV]> {
1143 // Instruction operands.
1148 // Encode instruction operands.
1149 let Inst{3-0} = Dm{3-0};
1150 let Inst{5} = Dm{4};
1151 let Inst{15-12} = Rt;
1152 let Inst{19-16} = Rt2;
1154 let Inst{7-6} = 0b00;
1156 // Some single precision VFP instructions may be executed on both NEON and VFP
1158 let D = VFPNeonDomain;
1160 // This instruction is equivalent to
1161 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1
1162 let isRegSequence = 1;
1165 // Hoist an fabs or a fneg of a value coming from integer registers
1166 // and do the fabs/fneg on the integer value. This is never a lose
1167 // and could enable the conversion to float to be removed completely.
1168 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1169 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1170 Requires<[IsARM, HasV6T2]>;
1171 def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1172 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>,
1173 Requires<[IsThumb2, HasV6T2]>;
1174 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1175 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>,
1177 def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)),
1178 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>,
1179 Requires<[IsThumb2]>;
1181 let hasSideEffects = 0 in
1182 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
1183 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
1184 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
1185 [/* For disassembly only; pattern left blank */]>,
1186 Sched<[WriteFPMOV]> {
1187 // Instruction operands.
1192 // Encode instruction operands.
1193 let Inst{3-0} = dst1{4-1};
1194 let Inst{5} = dst1{0};
1195 let Inst{15-12} = src1;
1196 let Inst{19-16} = src2;
1198 let Inst{7-6} = 0b00;
1200 // Some single precision VFP instructions may be executed on both NEON and VFP
1202 let D = VFPNeonDomain;
1204 let DecoderMethod = "DecodeVMOVSRR";
1207 // Move H->R, clearing top 16 bits
1208 def VMOVRH : AVConv2I<0b11100001, 0b1001,
1209 (outs GPR:$Rt), (ins HPR:$Sn),
1210 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn",
1211 [(set GPR:$Rt, (arm_vmovrh HPR:$Sn))]>,
1212 Requires<[HasFullFP16]>,
1213 Sched<[WriteFPMOV]> {
1214 // Instruction operands.
1218 // Encode instruction operands.
1219 let Inst{19-16} = Sn{4-1};
1220 let Inst{7} = Sn{0};
1221 let Inst{15-12} = Rt;
1223 let Inst{6-5} = 0b00;
1224 let Inst{3-0} = 0b0000;
1227 // Move R->H, clearing top 16 bits
1228 def VMOVHR : AVConv4I<0b11100000, 0b1001,
1229 (outs HPR:$Sn), (ins GPR:$Rt),
1230 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt",
1231 [(set HPR:$Sn, (arm_vmovhr GPR:$Rt))]>,
1232 Requires<[HasFullFP16]>,
1233 Sched<[WriteFPMOV]> {
1234 // Instruction operands.
1238 // Encode instruction operands.
1239 let Inst{19-16} = Sn{4-1};
1240 let Inst{7} = Sn{0};
1241 let Inst{15-12} = Rt;
1243 let Inst{6-5} = 0b00;
1244 let Inst{3-0} = 0b0000;
1247 // FMRDH: SPR -> GPR
1248 // FMRDL: SPR -> GPR
1249 // FMRRS: SPR -> GPR
1250 // FMRX: SPR system reg -> GPR
1251 // FMSRR: GPR -> SPR
1252 // FMXR: GPR -> VFP system reg
1257 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1258 bits<4> opcod4, dag oops, dag iops,
1259 InstrItinClass itin, string opc, string asm,
1261 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1263 // Instruction operands.
1267 // Encode instruction operands.
1268 let Inst{3-0} = Sm{4-1};
1269 let Inst{5} = Sm{0};
1270 let Inst{15-12} = Dd{3-0};
1271 let Inst{22} = Dd{4};
1273 let Predicates = [HasVFP2, HasDPVFP];
1276 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1277 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
1278 string opc, string asm, list<dag> pattern>
1279 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1281 // Instruction operands.
1285 // Encode instruction operands.
1286 let Inst{3-0} = Sm{4-1};
1287 let Inst{5} = Sm{0};
1288 let Inst{15-12} = Sd{4-1};
1289 let Inst{22} = Sd{0};
1292 class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1293 bits<4> opcod4, dag oops, dag iops,
1294 InstrItinClass itin, string opc, string asm,
1296 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1298 // Instruction operands.
1302 // Encode instruction operands.
1303 let Inst{3-0} = Sm{4-1};
1304 let Inst{5} = Sm{0};
1305 let Inst{15-12} = Sd{4-1};
1306 let Inst{22} = Sd{0};
1308 let Predicates = [HasFullFP16];
1311 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1312 (outs DPR:$Dd), (ins SPR:$Sm),
1313 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
1315 Sched<[WriteFPCVT]> {
1316 let Inst{7} = 1; // s32
1319 let Predicates=[HasVFP2, HasDPVFP] in {
1320 def : VFPPat<(f64 (sint_to_fp GPR:$a)),
1321 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1323 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1324 (VSITOD (VLDRS addrmode5:$a))>;
1327 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1328 (outs SPR:$Sd),(ins SPR:$Sm),
1329 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
1331 Sched<[WriteFPCVT]> {
1332 let Inst{7} = 1; // s32
1334 // Some single precision VFP instructions may be executed on both NEON and
1335 // VFP pipelines on A8.
1336 let D = VFPNeonA8Domain;
1339 def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)),
1340 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1342 def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1343 (VSITOS (VLDRS addrmode5:$a))>;
1345 def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1346 (outs HPR:$Sd), (ins SPR:$Sm),
1347 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm",
1349 Sched<[WriteFPCVT]> {
1350 let Inst{7} = 1; // s32
1353 def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)),
1354 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1356 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
1357 (outs DPR:$Dd), (ins SPR:$Sm),
1358 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
1360 Sched<[WriteFPCVT]> {
1361 let Inst{7} = 0; // u32
1364 let Predicates=[HasVFP2, HasDPVFP] in {
1365 def : VFPPat<(f64 (uint_to_fp GPR:$a)),
1366 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>;
1368 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1369 (VUITOD (VLDRS addrmode5:$a))>;
1372 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
1373 (outs SPR:$Sd), (ins SPR:$Sm),
1374 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
1376 Sched<[WriteFPCVT]> {
1377 let Inst{7} = 0; // u32
1379 // Some single precision VFP instructions may be executed on both NEON and
1380 // VFP pipelines on A8.
1381 let D = VFPNeonA8Domain;
1384 def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)),
1385 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>;
1387 def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))),
1388 (VUITOS (VLDRS addrmode5:$a))>;
1390 def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001,
1391 (outs HPR:$Sd), (ins SPR:$Sm),
1392 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm",
1394 Sched<[WriteFPCVT]> {
1395 let Inst{7} = 0; // u32
1398 def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)),
1399 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>;
1403 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1404 bits<4> opcod4, dag oops, dag iops,
1405 InstrItinClass itin, string opc, string asm,
1407 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1409 // Instruction operands.
1413 // Encode instruction operands.
1414 let Inst{3-0} = Dm{3-0};
1415 let Inst{5} = Dm{4};
1416 let Inst{15-12} = Sd{4-1};
1417 let Inst{22} = Sd{0};
1419 let Predicates = [HasVFP2, HasDPVFP];
1422 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1423 bits<4> opcod4, dag oops, dag iops,
1424 InstrItinClass itin, string opc, string asm,
1426 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1428 // Instruction operands.
1432 // Encode instruction operands.
1433 let Inst{3-0} = Sm{4-1};
1434 let Inst{5} = Sm{0};
1435 let Inst{15-12} = Sd{4-1};
1436 let Inst{22} = Sd{0};
1439 class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
1440 bits<4> opcod4, dag oops, dag iops,
1441 InstrItinClass itin, string opc, string asm,
1443 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
1445 // Instruction operands.
1449 // Encode instruction operands.
1450 let Inst{3-0} = Sm{4-1};
1451 let Inst{5} = Sm{0};
1452 let Inst{15-12} = Sd{4-1};
1453 let Inst{22} = Sd{0};
1455 let Predicates = [HasFullFP16];
1458 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
1459 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1460 (outs SPR:$Sd), (ins DPR:$Dm),
1461 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
1463 Sched<[WriteFPCVT]> {
1464 let Inst{7} = 1; // Z bit
1467 let Predicates=[HasVFP2, HasDPVFP] in {
1468 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))),
1469 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>;
1471 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr),
1472 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>;
1475 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1476 (outs SPR:$Sd), (ins SPR:$Sm),
1477 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
1479 Sched<[WriteFPCVT]> {
1480 let Inst{7} = 1; // Z bit
1482 // Some single precision VFP instructions may be executed on both NEON and
1483 // VFP pipelines on A8.
1484 let D = VFPNeonA8Domain;
1487 def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)),
1488 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>;
1490 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))),
1492 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>;
1494 def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1495 (outs SPR:$Sd), (ins HPR:$Sm),
1496 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm",
1498 Sched<[WriteFPCVT]> {
1499 let Inst{7} = 1; // Z bit
1502 def : VFPNoNEONPat<(i32 (fp_to_sint HPR:$a)),
1503 (COPY_TO_REGCLASS (VTOSIZH HPR:$a), GPR)>;
1505 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1506 (outs SPR:$Sd), (ins DPR:$Dm),
1507 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
1509 Sched<[WriteFPCVT]> {
1510 let Inst{7} = 1; // Z bit
1513 let Predicates=[HasVFP2, HasDPVFP] in {
1514 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))),
1515 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>;
1517 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr),
1518 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>;
1521 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1522 (outs SPR:$Sd), (ins SPR:$Sm),
1523 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
1525 Sched<[WriteFPCVT]> {
1526 let Inst{7} = 1; // Z bit
1528 // Some single precision VFP instructions may be executed on both NEON and
1529 // VFP pipelines on A8.
1530 let D = VFPNeonA8Domain;
1533 def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)),
1534 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>;
1536 def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))),
1538 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>;
1540 def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1541 (outs SPR:$Sd), (ins HPR:$Sm),
1542 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm",
1544 Sched<[WriteFPCVT]> {
1545 let Inst{7} = 1; // Z bit
1548 def : VFPNoNEONPat<(i32 (fp_to_uint HPR:$a)),
1549 (COPY_TO_REGCLASS (VTOUIZH HPR:$a), GPR)>;
1551 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
1552 let Uses = [FPSCR] in {
1553 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
1554 (outs SPR:$Sd), (ins DPR:$Dm),
1555 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
1556 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>,
1557 Sched<[WriteFPCVT]> {
1558 let Inst{7} = 0; // Z bit
1561 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
1562 (outs SPR:$Sd), (ins SPR:$Sm),
1563 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
1564 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>,
1565 Sched<[WriteFPCVT]> {
1566 let Inst{7} = 0; // Z bit
1569 def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001,
1570 (outs SPR:$Sd), (ins SPR:$Sm),
1571 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm",
1573 Sched<[WriteFPCVT]> {
1574 let Inst{7} = 0; // Z bit
1577 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
1578 (outs SPR:$Sd), (ins DPR:$Dm),
1579 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
1580 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>,
1581 Sched<[WriteFPCVT]> {
1582 let Inst{7} = 0; // Z bit
1585 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
1586 (outs SPR:$Sd), (ins SPR:$Sm),
1587 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
1588 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>,
1589 Sched<[WriteFPCVT]> {
1590 let Inst{7} = 0; // Z bit
1593 def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001,
1594 (outs SPR:$Sd), (ins SPR:$Sm),
1595 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm",
1597 Sched<[WriteFPCVT]> {
1598 let Inst{7} = 0; // Z bit
1602 // v8.3-a Javascript Convert to Signed fixed-point
1603 def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011,
1604 (outs SPR:$Sd), (ins DPR:$Dm),
1605 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm",
1607 Requires<[HasFPARMv8, HasV8_3a]> {
1608 let Inst{7} = 1; // Z bit
1611 // Convert between floating-point and fixed-point
1612 // Data type for fixed-point naming convention:
1613 // S16 (U=0, sx=0) -> SH
1614 // U16 (U=1, sx=0) -> UH
1615 // S32 (U=0, sx=1) -> SL
1616 // U32 (U=1, sx=1) -> UL
1618 let Constraints = "$a = $dst" in {
1620 // FP to Fixed-Point:
1622 // Single Precision register
1623 class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1624 bit op5, dag oops, dag iops, InstrItinClass itin,
1625 string opc, string asm, list<dag> pattern>
1626 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1628 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1629 let Inst{22} = dst{0};
1630 let Inst{15-12} = dst{4-1};
1633 // Double Precision register
1634 class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
1635 bit op5, dag oops, dag iops, InstrItinClass itin,
1636 string opc, string asm, list<dag> pattern>
1637 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
1639 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
1640 let Inst{22} = dst{4};
1641 let Inst{15-12} = dst{3-0};
1643 let Predicates = [HasVFP2, HasDPVFP];
1646 def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0,
1647 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1648 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>,
1649 Requires<[HasFullFP16]>,
1650 Sched<[WriteFPCVT]>;
1652 def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0,
1653 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1654 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>,
1655 Requires<[HasFullFP16]>,
1656 Sched<[WriteFPCVT]>;
1658 def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1,
1659 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1660 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>,
1661 Requires<[HasFullFP16]>,
1662 Sched<[WriteFPCVT]>;
1664 def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1,
1665 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1666 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>,
1667 Requires<[HasFullFP16]>,
1668 Sched<[WriteFPCVT]>;
1670 def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
1671 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1672 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>,
1673 Sched<[WriteFPCVT]> {
1674 // Some single precision VFP instructions may be executed on both NEON and
1675 // VFP pipelines on A8.
1676 let D = VFPNeonA8Domain;
1679 def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
1680 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1681 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> {
1682 // Some single precision VFP instructions may be executed on both NEON and
1683 // VFP pipelines on A8.
1684 let D = VFPNeonA8Domain;
1687 def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
1688 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1689 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> {
1690 // Some single precision VFP instructions may be executed on both NEON and
1691 // VFP pipelines on A8.
1692 let D = VFPNeonA8Domain;
1695 def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
1696 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1697 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> {
1698 // Some single precision VFP instructions may be executed on both NEON and
1699 // VFP pipelines on A8.
1700 let D = VFPNeonA8Domain;
1703 def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
1704 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1705 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>,
1706 Sched<[WriteFPCVT]>;
1708 def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
1709 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1710 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>,
1711 Sched<[WriteFPCVT]>;
1713 def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
1714 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1715 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>,
1716 Sched<[WriteFPCVT]>;
1718 def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
1719 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1720 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>,
1721 Sched<[WriteFPCVT]>;
1723 // Fixed-Point to FP:
1725 def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0,
1726 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1727 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>,
1728 Requires<[HasFullFP16]>,
1729 Sched<[WriteFPCVT]>;
1731 def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0,
1732 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1733 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>,
1734 Requires<[HasFullFP16]>,
1735 Sched<[WriteFPCVT]>;
1737 def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1,
1738 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1739 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>,
1740 Requires<[HasFullFP16]>,
1741 Sched<[WriteFPCVT]>;
1743 def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1,
1744 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1745 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>,
1746 Requires<[HasFullFP16]>,
1747 Sched<[WriteFPCVT]>;
1749 def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
1750 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1751 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>,
1752 Sched<[WriteFPCVT]> {
1753 // Some single precision VFP instructions may be executed on both NEON and
1754 // VFP pipelines on A8.
1755 let D = VFPNeonA8Domain;
1758 def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
1759 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
1760 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>,
1761 Sched<[WriteFPCVT]> {
1762 // Some single precision VFP instructions may be executed on both NEON and
1763 // VFP pipelines on A8.
1764 let D = VFPNeonA8Domain;
1767 def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
1768 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1769 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>,
1770 Sched<[WriteFPCVT]> {
1771 // Some single precision VFP instructions may be executed on both NEON and
1772 // VFP pipelines on A8.
1773 let D = VFPNeonA8Domain;
1776 def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
1777 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
1778 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>,
1779 Sched<[WriteFPCVT]> {
1780 // Some single precision VFP instructions may be executed on both NEON and
1781 // VFP pipelines on A8.
1782 let D = VFPNeonA8Domain;
1785 def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
1786 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1787 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>,
1788 Sched<[WriteFPCVT]>;
1790 def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
1791 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
1792 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>,
1793 Sched<[WriteFPCVT]>;
1795 def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
1796 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1797 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>,
1798 Sched<[WriteFPCVT]>;
1800 def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
1801 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
1802 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>,
1803 Sched<[WriteFPCVT]>;
1805 } // End of 'let Constraints = "$a = $dst" in'
1807 //===----------------------------------------------------------------------===//
1808 // FP Multiply-Accumulate Operations.
1811 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
1812 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1813 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
1814 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1815 (f64 DPR:$Ddin)))]>,
1816 RegConstraint<"$Ddin = $Dd">,
1817 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1818 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1820 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
1821 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1822 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
1823 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
1825 RegConstraint<"$Sdin = $Sd">,
1826 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1827 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1828 // Some single precision VFP instructions may be executed on both NEON and
1829 // VFP pipelines on A8.
1830 let D = VFPNeonA8Domain;
1833 def VMLAH : AHbI<0b11100, 0b00, 0, 0,
1834 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1835 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm",
1836 [(set HPR:$Sd, (fadd_mlx (fmul_su HPR:$Sn, HPR:$Sm),
1838 RegConstraint<"$Sdin = $Sd">,
1839 Requires<[HasFullFP16,UseFPVMLx]>;
1841 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1842 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1843 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1844 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1845 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1846 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx]>;
1847 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
1848 (VMLAH HPR:$dstin, HPR:$a, HPR:$b)>,
1849 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx]>;
1852 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
1853 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1854 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
1855 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1856 (f64 DPR:$Ddin)))]>,
1857 RegConstraint<"$Ddin = $Dd">,
1858 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1859 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1861 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
1862 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1863 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
1864 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1866 RegConstraint<"$Sdin = $Sd">,
1867 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1868 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1869 // Some single precision VFP instructions may be executed on both NEON and
1870 // VFP pipelines on A8.
1871 let D = VFPNeonA8Domain;
1874 def VMLSH : AHbI<0b11100, 0b00, 1, 0,
1875 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1876 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm",
1877 [(set HPR:$Sd, (fadd_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
1879 RegConstraint<"$Sdin = $Sd">,
1880 Requires<[HasFullFP16,UseFPVMLx]>;
1882 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1883 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1884 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1885 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1886 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1887 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1888 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
1889 (VMLSH HPR:$dstin, HPR:$a, HPR:$b)>,
1890 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
1892 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
1893 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1894 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
1895 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1896 (f64 DPR:$Ddin)))]>,
1897 RegConstraint<"$Ddin = $Dd">,
1898 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1899 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1901 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
1902 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1903 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
1904 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1906 RegConstraint<"$Sdin = $Sd">,
1907 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1908 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1909 // Some single precision VFP instructions may be executed on both NEON and
1910 // VFP pipelines on A8.
1911 let D = VFPNeonA8Domain;
1914 def VNMLAH : AHbI<0b11100, 0b01, 1, 0,
1915 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1916 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm",
1917 [(set HPR:$Sd, (fsub_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
1919 RegConstraint<"$Sdin = $Sd">,
1920 Requires<[HasFullFP16,UseFPVMLx]>;
1922 // (-(a * b) - dst) -> -(dst + (a * b))
1923 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
1924 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1925 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1926 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
1927 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1928 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1929 def : Pat<(fsub_mlx (fneg (fmul_su HPR:$a, HPR:$b)), HPR:$dstin),
1930 (VNMLAH HPR:$dstin, HPR:$a, HPR:$b)>,
1931 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
1933 // (-dst - (a * b)) -> -(dst + (a * b))
1934 def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))),
1935 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1936 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1937 def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)),
1938 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1939 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1940 def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su HPR:$a, HPR:$b)),
1941 (VNMLAH HPR:$dstin, HPR:$a, HPR:$b)>,
1942 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
1944 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
1945 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1946 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
1947 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1948 (f64 DPR:$Ddin)))]>,
1949 RegConstraint<"$Ddin = $Dd">,
1950 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>,
1951 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1953 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
1954 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1955 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
1956 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
1957 RegConstraint<"$Sdin = $Sd">,
1958 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>,
1959 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
1960 // Some single precision VFP instructions may be executed on both NEON and
1961 // VFP pipelines on A8.
1962 let D = VFPNeonA8Domain;
1965 def VNMLSH : AHbI<0b11100, 0b01, 0, 0,
1966 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
1967 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm",
1968 [(set HPR:$Sd, (fsub_mlx (fmul_su HPR:$Sn, HPR:$Sm), HPR:$Sdin))]>,
1969 RegConstraint<"$Sdin = $Sd">,
1970 Requires<[HasFullFP16,UseFPVMLx]>;
1972 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
1973 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1974 Requires<[HasVFP2,HasDPVFP,UseFPVMLx]>;
1975 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
1976 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1977 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx]>;
1978 def : Pat<(fsub_mlx (fmul_su HPR:$a, HPR:$b), HPR:$dstin),
1979 (VNMLSH HPR:$dstin, HPR:$a, HPR:$b)>,
1980 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx]>;
1982 //===----------------------------------------------------------------------===//
1983 // Fused FP Multiply-Accumulate Operations.
1985 def VFMAD : ADbI<0b11101, 0b10, 0, 0,
1986 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1987 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
1988 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1989 (f64 DPR:$Ddin)))]>,
1990 RegConstraint<"$Ddin = $Dd">,
1991 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
1992 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
1994 def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
1995 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1996 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
1997 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
1999 RegConstraint<"$Sdin = $Sd">,
2000 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2001 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2002 // Some single precision VFP instructions may be executed on both NEON and
2006 def VFMAH : AHbI<0b11101, 0b10, 0, 0,
2007 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2008 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm",
2009 [(set HPR:$Sd, (fadd_mlx (fmul_su HPR:$Sn, HPR:$Sm),
2011 RegConstraint<"$Sdin = $Sd">,
2012 Requires<[HasFullFP16,UseFusedMAC]>,
2013 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2015 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2016 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2017 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2018 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2019 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2020 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2021 def : Pat<(fadd_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
2022 (VFMAH HPR:$dstin, HPR:$a, HPR:$b)>,
2023 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2025 // Match @llvm.fma.* intrinsics
2026 // (fma x, y, z) -> (vfms z, x, y)
2027 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
2028 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2029 Requires<[HasVFP4,HasDPVFP]>;
2030 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
2031 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2032 Requires<[HasVFP4]>;
2034 def VFMSD : ADbI<0b11101, 0b10, 1, 0,
2035 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2036 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
2037 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2038 (f64 DPR:$Ddin)))]>,
2039 RegConstraint<"$Ddin = $Dd">,
2040 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2041 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2043 def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
2044 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2045 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
2046 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2048 RegConstraint<"$Sdin = $Sd">,
2049 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2050 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2051 // Some single precision VFP instructions may be executed on both NEON and
2055 def VFMSH : AHbI<0b11101, 0b10, 1, 0,
2056 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2057 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm",
2058 [(set HPR:$Sd, (fadd_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
2060 RegConstraint<"$Sdin = $Sd">,
2061 Requires<[HasFullFP16,UseFusedMAC]>,
2062 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2064 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
2065 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2066 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2067 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
2068 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2069 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2070 def : Pat<(fsub_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)),
2071 (VFMSH HPR:$dstin, HPR:$a, HPR:$b)>,
2072 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>;
2074 // Match @llvm.fma.* intrinsics
2075 // (fma (fneg x), y, z) -> (vfms z, x, y)
2076 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
2077 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2078 Requires<[HasVFP4,HasDPVFP]>;
2079 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
2080 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2081 Requires<[HasVFP4]>;
2082 // (fma x, (fneg y), z) -> (vfms z, x, y)
2083 def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)),
2084 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2085 Requires<[HasVFP4,HasDPVFP]>;
2086 def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)),
2087 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2088 Requires<[HasVFP4]>;
2090 def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
2091 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2092 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
2093 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
2094 (f64 DPR:$Ddin)))]>,
2095 RegConstraint<"$Ddin = $Dd">,
2096 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2097 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2099 def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
2100 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2101 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
2102 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
2104 RegConstraint<"$Sdin = $Sd">,
2105 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2106 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2107 // Some single precision VFP instructions may be executed on both NEON and
2111 def VFNMAH : AHbI<0b11101, 0b01, 1, 0,
2112 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2113 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm",
2114 [(set HPR:$Sd, (fsub_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)),
2116 RegConstraint<"$Sdin = $Sd">,
2117 Requires<[HasFullFP16,UseFusedMAC]>,
2118 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2120 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
2121 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
2122 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2123 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
2124 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
2125 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2127 // Match @llvm.fma.* intrinsics
2128 // (fneg (fma x, y, z)) -> (vfnma z, x, y)
2129 def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
2130 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2131 Requires<[HasVFP4,HasDPVFP]>;
2132 def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
2133 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2134 Requires<[HasVFP4]>;
2135 // (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
2136 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
2137 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2138 Requires<[HasVFP4,HasDPVFP]>;
2139 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
2140 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2141 Requires<[HasVFP4]>;
2143 def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
2144 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
2145 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
2146 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
2147 (f64 DPR:$Ddin)))]>,
2148 RegConstraint<"$Ddin = $Dd">,
2149 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>,
2150 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2152 def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
2153 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
2154 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
2155 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
2156 RegConstraint<"$Sdin = $Sd">,
2157 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>,
2158 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> {
2159 // Some single precision VFP instructions may be executed on both NEON and
2163 def VFNMSH : AHbI<0b11101, 0b01, 0, 0,
2164 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm),
2165 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm",
2166 [(set HPR:$Sd, (fsub_mlx (fmul_su HPR:$Sn, HPR:$Sm), HPR:$Sdin))]>,
2167 RegConstraint<"$Sdin = $Sd">,
2168 Requires<[HasFullFP16,UseFusedMAC]>,
2169 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>;
2171 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
2172 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
2173 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>;
2174 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
2175 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
2176 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
2178 // Match @llvm.fma.* intrinsics
2180 // (fma x, y, (fneg z)) -> (vfnms z, x, y))
2181 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
2182 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2183 Requires<[HasVFP4,HasDPVFP]>;
2184 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
2185 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2186 Requires<[HasVFP4]>;
2187 // (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
2188 def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
2189 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2190 Requires<[HasVFP4,HasDPVFP]>;
2191 def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
2192 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2193 Requires<[HasVFP4]>;
2194 // (fneg (fma x, (fneg y), z) -> (vfnms z, x, y)
2195 def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))),
2196 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
2197 Requires<[HasVFP4,HasDPVFP]>;
2198 def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
2199 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
2200 Requires<[HasVFP4]>;
2202 //===----------------------------------------------------------------------===//
2203 // FP Conditional moves.
2206 let hasSideEffects = 0 in {
2207 def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p),
2209 [(set (f64 DPR:$Dd),
2210 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>,
2211 RegConstraint<"$Dn = $Dd">, Requires<[HasVFP2,HasDPVFP]>;
2213 def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p),
2215 [(set (f32 SPR:$Sd),
2216 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>,
2217 RegConstraint<"$Sn = $Sd">, Requires<[HasVFP2]>;
2220 //===----------------------------------------------------------------------===//
2221 // Move from VFP System Register to ARM core register.
2224 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2226 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
2228 // Instruction operand.
2231 let Inst{27-20} = 0b11101111;
2232 let Inst{19-16} = opc19_16;
2233 let Inst{15-12} = Rt;
2234 let Inst{11-8} = 0b1010;
2236 let Inst{6-5} = 0b00;
2238 let Inst{3-0} = 0b0000;
2241 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
2243 let Defs = [CPSR], Uses = [FPSCR_NZCV], Rt = 0b1111 /* apsr_nzcv */ in
2244 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
2245 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
2247 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2248 // Application level FPSCR -> GPR
2249 let hasSideEffects = 1, Uses = [FPSCR] in
2250 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins),
2251 "vmrs", "\t$Rt, fpscr",
2252 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>;
2254 // System level FPEXC, FPSID -> GPR
2255 let Uses = [FPSCR] in {
2256 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins),
2257 "vmrs", "\t$Rt, fpexc", []>;
2258 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins),
2259 "vmrs", "\t$Rt, fpsid", []>;
2260 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins),
2261 "vmrs", "\t$Rt, mvfr0", []>;
2262 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins),
2263 "vmrs", "\t$Rt, mvfr1", []>;
2264 let Predicates = [HasFPARMv8] in {
2265 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins),
2266 "vmrs", "\t$Rt, mvfr2", []>;
2268 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins),
2269 "vmrs", "\t$Rt, fpinst", []>;
2270 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt),
2271 (ins), "vmrs", "\t$Rt, fpinst2", []>;
2275 //===----------------------------------------------------------------------===//
2276 // Move from ARM core register to VFP System Register.
2279 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
2281 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
2283 // Instruction operand.
2286 // Encode instruction operand.
2287 let Inst{15-12} = src;
2289 let Inst{27-20} = 0b11101110;
2290 let Inst{19-16} = opc19_16;
2291 let Inst{11-8} = 0b1010;
2296 let DecoderMethod = "DecodeForVMRSandVMSR" in {
2297 let Defs = [FPSCR] in {
2298 // Application level GPR -> FPSCR
2299 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$src),
2300 "vmsr", "\tfpscr, $src",
2301 [(int_arm_set_fpscr GPRnopc:$src)]>;
2302 // System level GPR -> FPEXC
2303 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$src),
2304 "vmsr", "\tfpexc, $src", []>;
2305 // System level GPR -> FPSID
2306 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$src),
2307 "vmsr", "\tfpsid, $src", []>;
2308 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$src),
2309 "vmsr", "\tfpinst, $src", []>;
2310 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$src),
2311 "vmsr", "\tfpinst2, $src", []>;
2315 //===----------------------------------------------------------------------===//
2319 // Materialize FP immediates. VFP3 only.
2320 let isReMaterializable = 1 in {
2321 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
2322 VFPMiscFrm, IIC_fpUNA64,
2323 "vmov", ".f64\t$Dd, $imm",
2324 [(set DPR:$Dd, vfp_f64imm:$imm)]>,
2325 Requires<[HasVFP3,HasDPVFP]> {
2329 let Inst{27-23} = 0b11101;
2330 let Inst{22} = Dd{4};
2331 let Inst{21-20} = 0b11;
2332 let Inst{19-16} = imm{7-4};
2333 let Inst{15-12} = Dd{3-0};
2334 let Inst{11-9} = 0b101;
2335 let Inst{8} = 1; // Double precision.
2336 let Inst{7-4} = 0b0000;
2337 let Inst{3-0} = imm{3-0};
2340 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
2341 VFPMiscFrm, IIC_fpUNA32,
2342 "vmov", ".f32\t$Sd, $imm",
2343 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
2347 let Inst{27-23} = 0b11101;
2348 let Inst{22} = Sd{0};
2349 let Inst{21-20} = 0b11;
2350 let Inst{19-16} = imm{7-4};
2351 let Inst{15-12} = Sd{4-1};
2352 let Inst{11-9} = 0b101;
2353 let Inst{8} = 0; // Single precision.
2354 let Inst{7-4} = 0b0000;
2355 let Inst{3-0} = imm{3-0};
2358 def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm),
2359 VFPMiscFrm, IIC_fpUNA16,
2360 "vmov", ".f16\t$Sd, $imm",
2361 [(set HPR:$Sd, vfp_f16imm:$imm)]>,
2362 Requires<[HasFullFP16]> {
2366 let Inst{27-23} = 0b11101;
2367 let Inst{22} = Sd{0};
2368 let Inst{21-20} = 0b11;
2369 let Inst{19-16} = imm{7-4};
2370 let Inst{15-12} = Sd{4-1};
2371 let Inst{11-8} = 0b1001; // Half precision
2372 let Inst{7-4} = 0b0000;
2373 let Inst{3-0} = imm{3-0};
2377 //===----------------------------------------------------------------------===//
2378 // Assembler aliases.
2380 // A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to
2381 // support them all, but supporting at least some of the basics is
2382 // good to be friendly.
2383 def : VFP2MnemonicAlias<"flds", "vldr">;
2384 def : VFP2MnemonicAlias<"fldd", "vldr">;
2385 def : VFP2MnemonicAlias<"fmrs", "vmov">;
2386 def : VFP2MnemonicAlias<"fmsr", "vmov">;
2387 def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
2388 def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
2389 def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
2390 def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
2391 def : VFP2MnemonicAlias<"fmrdd", "vmov">;
2392 def : VFP2MnemonicAlias<"fmrds", "vmov">;
2393 def : VFP2MnemonicAlias<"fmrrd", "vmov">;
2394 def : VFP2MnemonicAlias<"fmdrr", "vmov">;
2395 def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
2396 def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
2397 def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
2398 def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
2399 def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
2400 def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
2401 def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
2402 def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
2403 def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
2404 def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
2405 def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
2406 def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
2407 def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
2408 def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
2409 def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
2410 def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
2411 def : VFP2MnemonicAlias<"fsts", "vstr">;
2412 def : VFP2MnemonicAlias<"fstd", "vstr">;
2413 def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
2414 def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
2415 def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
2416 def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
2417 def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
2418 def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
2419 def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
2420 def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
2421 def : VFP2MnemonicAlias<"fmrx", "vmrs">;
2422 def : VFP2MnemonicAlias<"fmxr", "vmsr">;
2424 // Be friendly and accept the old form of zero-compare
2425 def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
2426 def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
2429 def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
2430 def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
2431 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2432 def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm",
2433 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2434 def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
2435 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
2436 def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm",
2437 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
2439 // No need for the size suffix on VSQRT. It's implied by the register classes.
2440 def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
2441 def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
2443 // VLDR/VSTR accept an optional type suffix.
2444 def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
2445 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2446 def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
2447 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
2448 def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
2449 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2450 def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
2451 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
2453 // VMOV can accept optional 32-bit or less data type suffix suffix.
2454 def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
2455 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2456 def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
2457 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2458 def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
2459 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
2460 def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
2461 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2462 def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
2463 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2464 def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
2465 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
2467 def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
2468 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
2469 def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
2470 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
2472 // VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
2474 def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
2475 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;
2477 // FCONSTD/FCONSTS alias for vmov.f64/vmov.f32
2478 // These aliases provide added functionality over vmov.f instructions by
2479 // allowing users to write assembly containing encoded floating point constants
2480 // (e.g. #0x70 vs #1.0). Without these alises there is no way for the
2481 // assembler to accept encoded fp constants (but the equivalent fp-literal is
2482 // accepted directly by vmovf).
2483 def : VFP3InstAlias<"fconstd${p} $Dd, $val",
2484 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>;
2485 def : VFP3InstAlias<"fconsts${p} $Sd, $val",
2486 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>;