1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the WebAssemblyTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "Utils/WebAssemblyTypeUtilities.h"
17 #include "Utils/WebAssemblyUtilities.h"
18 #include "WebAssemblyMachineFunctionInfo.h"
19 #include "WebAssemblySubtarget.h"
20 #include "WebAssemblyTargetMachine.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineJumpTableInfo.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/IntrinsicsWebAssembly.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/KnownBits.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetOptions.h"
43 #define DEBUG_TYPE "wasm-lower"
45 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
46 const TargetMachine &TM, const WebAssemblySubtarget &STI)
47 : TargetLowering(TM), Subtarget(&STI) {
48 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
50 // Booleans always contain 0 or 1.
51 setBooleanContents(ZeroOrOneBooleanContent);
52 // Except in SIMD vectors
53 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
54 // We don't know the microarchitecture here, so just reduce register pressure.
55 setSchedulingPreference(Sched::RegPressure);
56 // Tell ISel that we have a stack pointer.
57 setStackPointerRegisterToSaveRestore(
58 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
59 // Set up the register classes.
60 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
61 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
62 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
63 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
64 if (Subtarget->hasSIMD128()) {
65 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
68 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
69 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
70 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
72 if (Subtarget->hasReferenceTypes()) {
73 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
74 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
76 // Compute derived properties from the register classes.
77 computeRegisterProperties(Subtarget->getRegisterInfo());
79 // Transform loads and stores to pointers in address space 1 to loads and
80 // stores to WebAssembly global variables, outside linear memory.
81 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
82 setOperationAction(ISD::LOAD, T, Custom);
83 setOperationAction(ISD::STORE, T, Custom);
85 if (Subtarget->hasSIMD128()) {
86 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
88 setOperationAction(ISD::LOAD, T, Custom);
89 setOperationAction(ISD::STORE, T, Custom);
92 if (Subtarget->hasReferenceTypes()) {
93 // We need custom load and store lowering for both externref, funcref and
94 // Other. The MVT::Other here represents tables of reference types.
95 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
96 setOperationAction(ISD::LOAD, T, Custom);
97 setOperationAction(ISD::STORE, T, Custom);
101 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
102 setOperationAction(ISD::GlobalTLSAddress, MVTPtr, Custom);
103 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
104 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
105 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
106 setOperationAction(ISD::BRIND, MVT::Other, Custom);
108 // Take the default expansion for va_arg, va_copy, and va_end. There is no
109 // default action for va_start, so we do that custom.
110 setOperationAction(ISD::VASTART, MVT::Other, Custom);
111 setOperationAction(ISD::VAARG, MVT::Other, Expand);
112 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
113 setOperationAction(ISD::VAEND, MVT::Other, Expand);
115 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
116 // Don't expand the floating-point types to constant pools.
117 setOperationAction(ISD::ConstantFP, T, Legal);
118 // Expand floating-point comparisons.
119 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
120 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
121 setCondCodeAction(CC, T, Expand);
122 // Expand floating-point library function operators.
124 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
125 setOperationAction(Op, T, Expand);
126 // Note supported floating-point library function operators that otherwise
127 // default to expand.
128 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
129 ISD::FRINT, ISD::FROUNDEVEN})
130 setOperationAction(Op, T, Legal);
131 // Support minimum and maximum, which otherwise default to expand.
132 setOperationAction(ISD::FMINIMUM, T, Legal);
133 setOperationAction(ISD::FMAXIMUM, T, Legal);
134 // WebAssembly currently has no builtin f16 support.
135 setOperationAction(ISD::FP16_TO_FP, T, Expand);
136 setOperationAction(ISD::FP_TO_FP16, T, Expand);
137 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
138 setTruncStoreAction(T, MVT::f16, Expand);
141 // Expand unavailable integer operations.
143 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
144 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
145 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
146 for (auto T : {MVT::i32, MVT::i64})
147 setOperationAction(Op, T, Expand);
148 if (Subtarget->hasSIMD128())
149 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
150 setOperationAction(Op, T, Expand);
153 if (Subtarget->hasNontrappingFPToInt())
154 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
155 for (auto T : {MVT::i32, MVT::i64})
156 setOperationAction(Op, T, Custom);
158 // SIMD-specific configuration
159 if (Subtarget->hasSIMD128()) {
160 // Combine vector mask reductions into alltrue/anytrue
161 setTargetDAGCombine(ISD::SETCC);
163 // Convert vector to integer bitcasts to bitmask
164 setTargetDAGCombine(ISD::BITCAST);
166 // Hoist bitcasts out of shuffles
167 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
169 // Combine extends of extract_subvectors into widening ops
170 setTargetDAGCombine({ISD::SIGN_EXTEND, ISD::ZERO_EXTEND});
172 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
174 setTargetDAGCombine({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_EXTEND,
175 ISD::EXTRACT_SUBVECTOR});
177 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
178 // into conversion ops
179 setTargetDAGCombine({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
180 ISD::FP_ROUND, ISD::CONCAT_VECTORS});
182 setTargetDAGCombine(ISD::TRUNCATE);
184 // Support saturating add for i8x16 and i16x8
185 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
186 for (auto T : {MVT::v16i8, MVT::v8i16})
187 setOperationAction(Op, T, Legal);
189 // Support integer abs
190 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
191 setOperationAction(ISD::ABS, T, Legal);
193 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
194 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
196 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
198 // We have custom shuffle lowering to expose the shuffle mask
199 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
201 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
204 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
206 setOperationAction(ISD::SPLAT_VECTOR, T, Legal);
208 // Custom lowering since wasm shifts must have a scalar shift amount
209 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
210 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
211 setOperationAction(Op, T, Custom);
213 // Custom lower lane accesses to expand out variable indices
214 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT})
215 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
217 setOperationAction(Op, T, Custom);
219 // There is no i8x16.mul instruction
220 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
222 // There is no vector conditional select instruction
223 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
225 setOperationAction(ISD::SELECT_CC, T, Expand);
227 // Expand integer operations supported for scalars but not SIMD
229 {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR})
230 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
231 setOperationAction(Op, T, Expand);
233 // But we do have integer min and max operations
234 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
235 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
236 setOperationAction(Op, T, Legal);
238 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
239 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
240 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
241 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
243 // Custom lower bit counting operations for other types to scalarize them.
244 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
245 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
246 setOperationAction(Op, T, Custom);
248 // Expand float operations supported for scalars but not SIMD
249 for (auto Op : {ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
250 ISD::FEXP, ISD::FEXP2})
251 for (auto T : {MVT::v4f32, MVT::v2f64})
252 setOperationAction(Op, T, Expand);
254 // Unsigned comparison operations are unavailable for i64x2 vectors.
255 for (auto CC : {ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE})
256 setCondCodeAction(CC, MVT::v2i64, Custom);
258 // 64x2 conversions are not in the spec
260 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT})
261 for (auto T : {MVT::v2i64, MVT::v2f64})
262 setOperationAction(Op, T, Expand);
264 // But saturating fp_to_int converstions are
265 for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT})
266 setOperationAction(Op, MVT::v4i32, Custom);
268 // Support vector extending
269 for (auto T : MVT::integer_fixedlen_vector_valuetypes()) {
270 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, T, Custom);
271 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, T, Custom);
275 // As a special case, these operators use the type to mean the type to
277 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
278 if (!Subtarget->hasSignExt()) {
279 // Sign extends are legal only when extending a vector extract
280 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
281 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
282 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
284 for (auto T : MVT::integer_fixedlen_vector_valuetypes())
285 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
287 // Dynamic stack allocation: use the default expansion.
288 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
289 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
290 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
292 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
293 setOperationAction(ISD::FrameIndex, MVT::i64, Custom);
294 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
296 // Expand these forms; we pattern-match the forms that we can handle in isel.
297 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
298 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
299 setOperationAction(Op, T, Expand);
301 // We have custom switch handling.
302 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
304 // WebAssembly doesn't have:
305 // - Floating-point extending loads.
306 // - Floating-point truncating stores.
307 // - i1 extending loads.
308 // - truncating SIMD stores and most extending loads
309 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
310 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
311 for (auto T : MVT::integer_valuetypes())
312 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
313 setLoadExtAction(Ext, T, MVT::i1, Promote);
314 if (Subtarget->hasSIMD128()) {
315 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
317 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
318 if (MVT(T) != MemT) {
319 setTruncStoreAction(T, MemT, Expand);
320 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
321 setLoadExtAction(Ext, T, MemT, Expand);
325 // But some vector extending loads are legal
326 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
327 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
328 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
329 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
331 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
334 // Don't do anything clever with build_pairs
335 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
337 // Trap lowers to wasm unreachable
338 setOperationAction(ISD::TRAP, MVT::Other, Legal);
339 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
341 // Exception handling intrinsics
342 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
343 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
344 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
346 setMaxAtomicSizeInBitsSupported(64);
348 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
349 // consistent with the f64 and f128 names.
350 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
351 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
353 // Define the emscripten name for return address helper.
354 // TODO: when implementing other Wasm backends, make this generic or only do
355 // this on emscripten depending on what they end up doing.
356 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
358 // Always convert switches to br_tables unless there is only one case, which
359 // is equivalent to a simple branch. This reduces code size for wasm, and we
360 // defer possible jump table optimizations to the VM.
361 setMinimumJumpTableEntries(2);
364 MVT WebAssemblyTargetLowering::getPointerTy(const DataLayout &DL,
366 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
367 return MVT::externref;
368 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
370 return TargetLowering::getPointerTy(DL, AS);
373 MVT WebAssemblyTargetLowering::getPointerMemTy(const DataLayout &DL,
375 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_EXTERNREF)
376 return MVT::externref;
377 if (AS == WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF)
379 return TargetLowering::getPointerMemTy(DL, AS);
382 TargetLowering::AtomicExpansionKind
383 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
384 // We have wasm instructions for these
385 switch (AI->getOperation()) {
386 case AtomicRMWInst::Add:
387 case AtomicRMWInst::Sub:
388 case AtomicRMWInst::And:
389 case AtomicRMWInst::Or:
390 case AtomicRMWInst::Xor:
391 case AtomicRMWInst::Xchg:
392 return AtomicExpansionKind::None;
396 return AtomicExpansionKind::CmpXChg;
399 bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
400 // Implementation copied from X86TargetLowering.
401 unsigned Opc = VecOp.getOpcode();
403 // Assume target opcodes can't be scalarized.
404 // TODO - do we have any exceptions?
405 if (Opc >= ISD::BUILTIN_OP_END)
408 // If the vector op is not supported, try to convert to scalar.
409 EVT VecVT = VecOp.getValueType();
410 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
413 // If the vector op is supported, but the scalar op is not, the transform may
414 // not be worthwhile.
415 EVT ScalarVT = VecVT.getScalarType();
416 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
419 FastISel *WebAssemblyTargetLowering::createFastISel(
420 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
421 return WebAssembly::createFastISel(FuncInfo, LibInfo);
424 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
426 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
427 if (BitWidth > 1 && BitWidth < 8)
431 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
432 // the count to be an i32.
434 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
435 "32-bit shift counts ought to be enough for anyone");
438 MVT Result = MVT::getIntegerVT(BitWidth);
439 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
440 "Unable to represent scalar shift amount type");
444 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
445 // undefined result on invalid/overflow, to the WebAssembly opcode, which
446 // traps on invalid/overflow.
447 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
448 MachineBasicBlock *BB,
449 const TargetInstrInfo &TII,
450 bool IsUnsigned, bool Int64,
451 bool Float64, unsigned LoweredOpcode) {
452 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
454 Register OutReg = MI.getOperand(0).getReg();
455 Register InReg = MI.getOperand(1).getReg();
457 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
458 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
459 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
460 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
461 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
462 unsigned Eqz = WebAssembly::EQZ_I32;
463 unsigned And = WebAssembly::AND_I32;
464 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
465 int64_t Substitute = IsUnsigned ? 0 : Limit;
466 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
467 auto &Context = BB->getParent()->getFunction().getContext();
468 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
470 const BasicBlock *LLVMBB = BB->getBasicBlock();
471 MachineFunction *F = BB->getParent();
472 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
473 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
474 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
476 MachineFunction::iterator It = ++BB->getIterator();
477 F->insert(It, FalseMBB);
478 F->insert(It, TrueMBB);
479 F->insert(It, DoneMBB);
481 // Transfer the remainder of BB and its successor edges to DoneMBB.
482 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
483 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
485 BB->addSuccessor(TrueMBB);
486 BB->addSuccessor(FalseMBB);
487 TrueMBB->addSuccessor(DoneMBB);
488 FalseMBB->addSuccessor(DoneMBB);
490 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
491 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
492 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
493 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
494 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
495 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
496 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
498 MI.eraseFromParent();
499 // For signed numbers, we can do a single comparison to determine whether
500 // fabs(x) is within range.
504 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
506 BuildMI(BB, DL, TII.get(FConst), Tmp1)
507 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
508 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
510 // For unsigned numbers, we have to do a separate comparison with zero.
512 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
513 Register SecondCmpReg =
514 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
515 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
516 BuildMI(BB, DL, TII.get(FConst), Tmp1)
517 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
518 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
519 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
523 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
525 // Create the CFG diamond to select between doing the conversion or using
526 // the substitute value.
527 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
528 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
529 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
530 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
531 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
540 static MachineBasicBlock *
541 LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
542 const WebAssemblySubtarget *Subtarget,
543 const TargetInstrInfo &TII) {
544 MachineInstr &CallParams = *CallResults.getPrevNode();
545 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
546 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
547 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
550 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI();
551 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
553 bool IsFuncrefCall = false;
554 if (IsIndirect && CallParams.getOperand(0).isReg()) {
555 Register Reg = CallParams.getOperand(0).getReg();
556 const MachineFunction *MF = BB->getParent();
557 const MachineRegisterInfo &MRI = MF->getRegInfo();
558 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
559 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
560 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
564 if (IsIndirect && IsRetCall) {
565 CallOp = WebAssembly::RET_CALL_INDIRECT;
566 } else if (IsIndirect) {
567 CallOp = WebAssembly::CALL_INDIRECT;
568 } else if (IsRetCall) {
569 CallOp = WebAssembly::RET_CALL;
571 CallOp = WebAssembly::CALL;
574 MachineFunction &MF = *BB->getParent();
575 const MCInstrDesc &MCID = TII.get(CallOp);
576 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
578 // See if we must truncate the function pointer.
579 // CALL_INDIRECT takes an i32, but in wasm64 we represent function pointers
580 // as 64-bit for uniformity with other pointer types.
581 // See also: WebAssemblyFastISel::selectCall
582 if (IsIndirect && MF.getSubtarget<WebAssemblySubtarget>().hasAddr64()) {
584 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
585 auto &FnPtr = CallParams.getOperand(0);
586 BuildMI(*BB, CallResults.getIterator(), DL,
587 TII.get(WebAssembly::I32_WRAP_I64), Reg32)
588 .addReg(FnPtr.getReg());
592 // Move the function pointer to the end of the arguments for indirect calls
594 auto FnPtr = CallParams.getOperand(0);
595 CallParams.removeOperand(0);
597 // For funcrefs, call_indirect is done through __funcref_call_table and the
598 // funcref is always installed in slot 0 of the table, therefore instead of
599 // having the function pointer added at the end of the params list, a zero
601 // __funcref_call_table is added).
604 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
605 MachineInstrBuilder MIBC0 =
606 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
608 BB->insert(CallResults.getIterator(), MIBC0);
609 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
611 CallParams.addOperand(FnPtr);
614 for (auto Def : CallResults.defs())
618 // Placeholder for the type index.
620 // The table into which this call_indirect indexes.
621 MCSymbolWasm *Table = IsFuncrefCall
622 ? WebAssembly::getOrCreateFuncrefCallTableSymbol(
623 MF.getContext(), Subtarget)
624 : WebAssembly::getOrCreateFunctionTableSymbol(
625 MF.getContext(), Subtarget);
626 if (Subtarget->hasReferenceTypes()) {
629 // For the MVP there is at most one table whose number is 0, but we can't
630 // write a table symbol or issue relocations. Instead we just ensure the
631 // table is live and write a zero.
637 for (auto Use : CallParams.uses())
640 BB->insert(CallResults.getIterator(), MIB);
641 CallParams.eraseFromParent();
642 CallResults.eraseFromParent();
644 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
645 // table slot with ref.null upon call_indirect return.
647 // This generates the following code, which comes right after a call_indirect
652 // table.set __funcref_call_table
653 if (IsIndirect && IsFuncrefCall) {
654 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
655 MF.getContext(), Subtarget);
657 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
658 MachineInstr *Const0 =
659 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
660 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
662 Register RegFuncref =
663 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
664 MachineInstr *RefNull =
665 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
666 BB->insertAfter(Const0->getIterator(), RefNull);
668 MachineInstr *TableSet =
669 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
673 BB->insertAfter(RefNull->getIterator(), TableSet);
679 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
680 MachineInstr &MI, MachineBasicBlock *BB) const {
681 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
682 DebugLoc DL = MI.getDebugLoc();
684 switch (MI.getOpcode()) {
686 llvm_unreachable("Unexpected instr type to insert");
687 case WebAssembly::FP_TO_SINT_I32_F32:
688 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
689 WebAssembly::I32_TRUNC_S_F32);
690 case WebAssembly::FP_TO_UINT_I32_F32:
691 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
692 WebAssembly::I32_TRUNC_U_F32);
693 case WebAssembly::FP_TO_SINT_I64_F32:
694 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
695 WebAssembly::I64_TRUNC_S_F32);
696 case WebAssembly::FP_TO_UINT_I64_F32:
697 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
698 WebAssembly::I64_TRUNC_U_F32);
699 case WebAssembly::FP_TO_SINT_I32_F64:
700 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
701 WebAssembly::I32_TRUNC_S_F64);
702 case WebAssembly::FP_TO_UINT_I32_F64:
703 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
704 WebAssembly::I32_TRUNC_U_F64);
705 case WebAssembly::FP_TO_SINT_I64_F64:
706 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
707 WebAssembly::I64_TRUNC_S_F64);
708 case WebAssembly::FP_TO_UINT_I64_F64:
709 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
710 WebAssembly::I64_TRUNC_U_F64);
711 case WebAssembly::CALL_RESULTS:
712 case WebAssembly::RET_CALL_RESULTS:
713 return LowerCallResults(MI, DL, BB, Subtarget, TII);
718 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
719 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
720 case WebAssemblyISD::FIRST_NUMBER:
721 case WebAssemblyISD::FIRST_MEM_OPCODE:
723 #define HANDLE_NODETYPE(NODE) \
724 case WebAssemblyISD::NODE: \
725 return "WebAssemblyISD::" #NODE;
726 #define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
727 #include "WebAssemblyISD.def"
728 #undef HANDLE_MEM_NODETYPE
729 #undef HANDLE_NODETYPE
734 std::pair<unsigned, const TargetRegisterClass *>
735 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
736 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
737 // First, see if this is a constraint that directly corresponds to a
738 // WebAssembly register class.
739 if (Constraint.size() == 1) {
740 switch (Constraint[0]) {
742 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
743 if (Subtarget->hasSIMD128() && VT.isVector()) {
744 if (VT.getSizeInBits() == 128)
745 return std::make_pair(0U, &WebAssembly::V128RegClass);
747 if (VT.isInteger() && !VT.isVector()) {
748 if (VT.getSizeInBits() <= 32)
749 return std::make_pair(0U, &WebAssembly::I32RegClass);
750 if (VT.getSizeInBits() <= 64)
751 return std::make_pair(0U, &WebAssembly::I64RegClass);
753 if (VT.isFloatingPoint() && !VT.isVector()) {
754 switch (VT.getSizeInBits()) {
756 return std::make_pair(0U, &WebAssembly::F32RegClass);
758 return std::make_pair(0U, &WebAssembly::F64RegClass);
769 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
772 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
773 // Assume ctz is a relatively cheap operation.
777 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
778 // Assume clz is a relatively cheap operation.
782 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
784 Type *Ty, unsigned AS,
785 Instruction *I) const {
786 // WebAssembly offsets are added as unsigned without wrapping. The
787 // isLegalAddressingMode gives us no way to determine if wrapping could be
788 // happening, so we approximate this by accepting only non-negative offsets.
792 // WebAssembly has no scale register operands.
796 // Everything else is legal.
800 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
801 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
802 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
803 // WebAssembly supports unaligned accesses, though it should be declared
804 // with the p2align attribute on loads and stores which do so, and there
805 // may be a performance impact. We tell LLVM they're "fast" because
806 // for the kinds of things that LLVM uses this for (merging adjacent stores
807 // of constants, etc.), WebAssembly implementations will either want the
808 // unaligned access or they'll split anyway.
814 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
815 AttributeList Attr) const {
816 // The current thinking is that wasm engines will perform this optimization,
817 // so we can save on code size.
821 bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
822 EVT ExtT = ExtVal.getValueType();
823 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
824 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
825 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
826 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
829 bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
830 const GlobalAddressSDNode *GA) const {
831 // Wasm doesn't support function addresses with offsets
832 const GlobalValue *GV = GA->getGlobal();
833 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
836 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
840 return VT.changeVectorElementTypeToInteger();
842 // So far, all branch instructions in Wasm take an I32 condition.
843 // The default TargetLowering::getSetCCResultType returns the pointer size,
844 // which would be useful to reduce instruction counts when testing
845 // against 64-bit pointers/values if at some point Wasm supports that.
846 return EVT::getIntegerVT(C, 32);
849 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
852 unsigned Intrinsic) const {
854 case Intrinsic::wasm_memory_atomic_notify:
855 Info.opc = ISD::INTRINSIC_W_CHAIN;
856 Info.memVT = MVT::i32;
857 Info.ptrVal = I.getArgOperand(0);
859 Info.align = Align(4);
860 // atomic.notify instruction does not really load the memory specified with
861 // this argument, but MachineMemOperand should either be load or store, so
862 // we set this to a load.
863 // FIXME Volatile isn't really correct, but currently all LLVM atomic
864 // instructions are treated as volatiles in the backend, so we should be
865 // consistent. The same applies for wasm_atomic_wait intrinsics too.
866 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
868 case Intrinsic::wasm_memory_atomic_wait32:
869 Info.opc = ISD::INTRINSIC_W_CHAIN;
870 Info.memVT = MVT::i32;
871 Info.ptrVal = I.getArgOperand(0);
873 Info.align = Align(4);
874 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
876 case Intrinsic::wasm_memory_atomic_wait64:
877 Info.opc = ISD::INTRINSIC_W_CHAIN;
878 Info.memVT = MVT::i64;
879 Info.ptrVal = I.getArgOperand(0);
881 Info.align = Align(8);
882 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
889 void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
890 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
891 const SelectionDAG &DAG, unsigned Depth) const {
892 switch (Op.getOpcode()) {
895 case ISD::INTRINSIC_WO_CHAIN: {
896 unsigned IntNo = Op.getConstantOperandVal(0);
900 case Intrinsic::wasm_bitmask: {
901 unsigned BitWidth = Known.getBitWidth();
902 EVT VT = Op.getOperand(1).getSimpleValueType();
903 unsigned PossibleBits = VT.getVectorNumElements();
904 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
905 Known.Zero |= ZeroMask;
913 TargetLoweringBase::LegalizeTypeAction
914 WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
915 if (VT.isFixedLengthVector()) {
916 MVT EltVT = VT.getVectorElementType();
917 // We have legal vector types with these lane types, so widening the
918 // vector would let us use some of the lanes directly without having to
919 // extend or truncate values.
920 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
921 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
922 return TypeWidenVector;
925 return TargetLoweringBase::getPreferredVectorAction(VT);
928 bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
929 SDValue Op, const TargetLoweringOpt &TLO) const {
930 // ISel process runs DAGCombiner after legalization; this step is called
931 // SelectionDAG optimization phase. This post-legalization combining process
932 // runs DAGCombiner on each node, and if there was a change to be made,
933 // re-runs legalization again on it and its user nodes to make sure
934 // everythiing is in a legalized state.
936 // The legalization calls lowering routines, and we do our custom lowering for
937 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
938 // into zeros. But there is a set of routines in DAGCombiner that turns unused
939 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
940 // turns unused vector elements into undefs. But this routine does not work
941 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
942 // combination can result in a infinite loop, in which undefs are converted to
943 // zeros in legalization and back to undefs in combining.
945 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
946 // running for build_vectors.
947 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
952 //===----------------------------------------------------------------------===//
953 // WebAssembly Lowering private implementation.
954 //===----------------------------------------------------------------------===//
956 //===----------------------------------------------------------------------===//
958 //===----------------------------------------------------------------------===//
960 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
961 MachineFunction &MF = DAG.getMachineFunction();
962 DAG.getContext()->diagnose(
963 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
966 // Test whether the given calling convention is supported.
967 static bool callingConvSupported(CallingConv::ID CallConv) {
968 // We currently support the language-independent target-independent
969 // conventions. We don't yet have a way to annotate calls with properties like
970 // "cold", and we don't have any call-clobbered registers, so these are mostly
971 // all handled the same.
972 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
973 CallConv == CallingConv::Cold ||
974 CallConv == CallingConv::PreserveMost ||
975 CallConv == CallingConv::PreserveAll ||
976 CallConv == CallingConv::CXX_FAST_TLS ||
977 CallConv == CallingConv::WASM_EmscriptenInvoke ||
978 CallConv == CallingConv::Swift;
982 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
983 SmallVectorImpl<SDValue> &InVals) const {
984 SelectionDAG &DAG = CLI.DAG;
986 SDValue Chain = CLI.Chain;
987 SDValue Callee = CLI.Callee;
988 MachineFunction &MF = DAG.getMachineFunction();
989 auto Layout = MF.getDataLayout();
991 CallingConv::ID CallConv = CLI.CallConv;
992 if (!callingConvSupported(CallConv))
994 "WebAssembly doesn't support language-specific or target-specific "
995 "calling conventions yet");
996 if (CLI.IsPatchPoint)
997 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
999 if (CLI.IsTailCall) {
1000 auto NoTail = [&](const char *Msg) {
1001 if (CLI.CB && CLI.CB->isMustTailCall())
1003 CLI.IsTailCall = false;
1006 if (!Subtarget->hasTailCall())
1007 NoTail("WebAssembly 'tail-call' feature not enabled");
1009 // Varargs calls cannot be tail calls because the buffer is on the stack
1011 NoTail("WebAssembly does not support varargs tail calls");
1013 // Do not tail call unless caller and callee return types match
1014 const Function &F = MF.getFunction();
1015 const TargetMachine &TM = getTargetMachine();
1016 Type *RetTy = F.getReturnType();
1017 SmallVector<MVT, 4> CallerRetTys;
1018 SmallVector<MVT, 4> CalleeRetTys;
1019 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
1020 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
1021 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1022 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
1023 CalleeRetTys.begin());
1025 NoTail("WebAssembly tail call requires caller and callee return types to "
1028 // If pointers to local stack values are passed, we cannot tail call
1030 for (auto &Arg : CLI.CB->args()) {
1031 Value *Val = Arg.get();
1032 // Trace the value back through pointer operations
1034 Value *Src = Val->stripPointerCastsAndAliases();
1035 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
1036 Src = GEP->getPointerOperand();
1041 if (isa<AllocaInst>(Val)) {
1043 "WebAssembly does not support tail calling with stack arguments");
1050 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1051 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1052 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1054 // The generic code may have added an sret argument. If we're lowering an
1055 // invoke function, the ABI requires that the function pointer be the first
1056 // argument, so we may have to swap the arguments.
1057 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1058 Outs[0].Flags.isSRet()) {
1059 std::swap(Outs[0], Outs[1]);
1060 std::swap(OutVals[0], OutVals[1]);
1063 bool HasSwiftSelfArg = false;
1064 bool HasSwiftErrorArg = false;
1065 unsigned NumFixedArgs = 0;
1066 for (unsigned I = 0; I < Outs.size(); ++I) {
1067 const ISD::OutputArg &Out = Outs[I];
1068 SDValue &OutVal = OutVals[I];
1069 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1070 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1071 if (Out.Flags.isNest())
1072 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1073 if (Out.Flags.isInAlloca())
1074 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1075 if (Out.Flags.isInConsecutiveRegs())
1076 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1077 if (Out.Flags.isInConsecutiveRegsLast())
1078 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1079 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1080 auto &MFI = MF.getFrameInfo();
1081 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1082 Out.Flags.getNonZeroByValAlign(),
1085 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1086 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1087 Chain = DAG.getMemcpy(
1088 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getNonZeroByValAlign(),
1089 /*isVolatile*/ false, /*AlwaysInline=*/false,
1090 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
1093 // Count the number of fixed args *after* legalization.
1094 NumFixedArgs += Out.IsFixed;
1097 bool IsVarArg = CLI.IsVarArg;
1098 auto PtrVT = getPointerTy(Layout);
1100 // For swiftcc, emit additional swiftself and swifterror arguments
1101 // if there aren't. These additional arguments are also added for callee
1102 // signature They are necessary to match callee and caller signature for
1104 if (CallConv == CallingConv::Swift) {
1105 if (!HasSwiftSelfArg) {
1108 Arg.Flags.setSwiftSelf();
1109 CLI.Outs.push_back(Arg);
1110 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1111 CLI.OutVals.push_back(ArgVal);
1113 if (!HasSwiftErrorArg) {
1116 Arg.Flags.setSwiftError();
1117 CLI.Outs.push_back(Arg);
1118 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1119 CLI.OutVals.push_back(ArgVal);
1123 // Analyze operands of the call, assigning locations to each operand.
1124 SmallVector<CCValAssign, 16> ArgLocs;
1125 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1128 // Outgoing non-fixed arguments are placed in a buffer. First
1129 // compute their offsets and the total amount of buffer space needed.
1130 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1131 const ISD::OutputArg &Out = Outs[I];
1132 SDValue &Arg = OutVals[I];
1133 EVT VT = Arg.getValueType();
1134 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1135 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1137 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1139 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1140 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1141 Offset, VT.getSimpleVT(),
1142 CCValAssign::Full));
1146 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1149 if (IsVarArg && NumBytes) {
1150 // For non-fixed arguments, next emit stores to store the argument values
1151 // to the stack buffer at the offsets computed above.
1152 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
1153 Layout.getStackAlignment(),
1156 SmallVector<SDValue, 8> Chains;
1157 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1158 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1159 "ArgLocs should remain in order and only hold varargs args");
1160 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1161 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1162 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1163 DAG.getConstant(Offset, DL, PtrVT));
1165 DAG.getStore(Chain, DL, Arg, Add,
1166 MachinePointerInfo::getFixedStack(MF, FI, Offset)));
1168 if (!Chains.empty())
1169 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1170 } else if (IsVarArg) {
1171 FINode = DAG.getIntPtrConstant(0, DL);
1174 if (Callee->getOpcode() == ISD::GlobalAddress) {
1175 // If the callee is a GlobalAddress node (quite common, every direct call
1176 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1177 // doesn't at MO_GOT which is not needed for direct calls.
1178 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
1179 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
1180 getPointerTy(DAG.getDataLayout()),
1182 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1183 getPointerTy(DAG.getDataLayout()), Callee);
1186 // Compute the operands for the CALLn node.
1187 SmallVector<SDValue, 16> Ops;
1188 Ops.push_back(Chain);
1189 Ops.push_back(Callee);
1191 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1193 Ops.append(OutVals.begin(),
1194 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1195 // Add a pointer to the vararg buffer.
1197 Ops.push_back(FINode);
1199 SmallVector<EVT, 8> InTys;
1200 for (const auto &In : Ins) {
1201 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1202 assert(!In.Flags.isNest() && "nest is not valid for return values");
1203 if (In.Flags.isInAlloca())
1204 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1205 if (In.Flags.isInConsecutiveRegs())
1206 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1207 if (In.Flags.isInConsecutiveRegsLast())
1209 "WebAssembly hasn't implemented cons regs last return values");
1210 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1212 InTys.push_back(In.VT);
1215 // Lastly, if this is a call to a funcref we need to add an instruction
1216 // table.set to the chain and transform the call.
1217 if (CLI.CB && WebAssembly::isWebAssemblyFuncrefType(
1218 CLI.CB->getCalledOperand()->getType())) {
1219 // In the absence of function references proposal where a funcref call is
1220 // lowered to call_ref, using reference types we generate a table.set to set
1221 // the funcref to a special table used solely for this purpose, followed by
1222 // a call_indirect. Here we just generate the table set, and return the
1223 // SDValue of the table.set so that LowerCall can finalize the lowering by
1224 // generating the call_indirect.
1225 SDValue Chain = Ops[0];
1227 MCSymbolWasm *Table = WebAssembly::getOrCreateFuncrefCallTableSymbol(
1228 MF.getContext(), Subtarget);
1229 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1230 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1231 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1232 SDValue TableSet = DAG.getMemIntrinsicNode(
1233 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1235 // Machine Mem Operand args
1237 WebAssembly::WasmAddressSpace::WASM_ADDRESS_SPACE_FUNCREF),
1238 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1239 MachineMemOperand::MOStore);
1241 Ops[0] = TableSet; // The new chain is the TableSet itself
1244 if (CLI.IsTailCall) {
1245 // ret_calls do not return values to the current frame
1246 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1247 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1250 InTys.push_back(MVT::Other);
1251 SDVTList InTyList = DAG.getVTList(InTys);
1252 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1254 for (size_t I = 0; I < Ins.size(); ++I)
1255 InVals.push_back(Res.getValue(I));
1258 return Res.getValue(Ins.size());
1261 bool WebAssemblyTargetLowering::CanLowerReturn(
1262 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1263 const SmallVectorImpl<ISD::OutputArg> &Outs,
1264 LLVMContext & /*Context*/) const {
1265 // WebAssembly can only handle returning tuples with multivalue enabled
1266 return Subtarget->hasMultivalue() || Outs.size() <= 1;
1269 SDValue WebAssemblyTargetLowering::LowerReturn(
1270 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1271 const SmallVectorImpl<ISD::OutputArg> &Outs,
1272 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1273 SelectionDAG &DAG) const {
1274 assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
1275 "MVP WebAssembly can only return up to one value");
1276 if (!callingConvSupported(CallConv))
1277 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1279 SmallVector<SDValue, 4> RetOps(1, Chain);
1280 RetOps.append(OutVals.begin(), OutVals.end());
1281 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1283 // Record the number and types of the return values.
1284 for (const ISD::OutputArg &Out : Outs) {
1285 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1286 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1287 assert(Out.IsFixed && "non-fixed return value is not valid");
1288 if (Out.Flags.isInAlloca())
1289 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1290 if (Out.Flags.isInConsecutiveRegs())
1291 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1292 if (Out.Flags.isInConsecutiveRegsLast())
1293 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1299 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1300 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1301 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1302 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1303 if (!callingConvSupported(CallConv))
1304 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1306 MachineFunction &MF = DAG.getMachineFunction();
1307 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1309 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1310 // of the incoming values before they're represented by virtual registers.
1311 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1313 bool HasSwiftErrorArg = false;
1314 bool HasSwiftSelfArg = false;
1315 for (const ISD::InputArg &In : Ins) {
1316 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1317 HasSwiftErrorArg |= In.Flags.isSwiftError();
1318 if (In.Flags.isInAlloca())
1319 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1320 if (In.Flags.isNest())
1321 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1322 if (In.Flags.isInConsecutiveRegs())
1323 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1324 if (In.Flags.isInConsecutiveRegsLast())
1325 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1326 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1328 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1329 DAG.getTargetConstant(InVals.size(),
1331 : DAG.getUNDEF(In.VT));
1333 // Record the number and types of arguments.
1334 MFI->addParam(In.VT);
1337 // For swiftcc, emit additional swiftself and swifterror arguments
1338 // if there aren't. These additional arguments are also added for callee
1339 // signature They are necessary to match callee and caller signature for
1341 auto PtrVT = getPointerTy(MF.getDataLayout());
1342 if (CallConv == CallingConv::Swift) {
1343 if (!HasSwiftSelfArg) {
1344 MFI->addParam(PtrVT);
1346 if (!HasSwiftErrorArg) {
1347 MFI->addParam(PtrVT);
1350 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1351 // the buffer is passed as an argument.
1353 MVT PtrVT = getPointerTy(MF.getDataLayout());
1354 Register VarargVreg =
1355 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
1356 MFI->setVarargBufferVreg(VarargVreg);
1357 Chain = DAG.getCopyToReg(
1358 Chain, DL, VarargVreg,
1359 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1360 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1361 MFI->addParam(PtrVT);
1364 // Record the number and types of arguments and results.
1365 SmallVector<MVT, 4> Params;
1366 SmallVector<MVT, 4> Results;
1367 computeSignatureVTs(MF.getFunction().getFunctionType(), &MF.getFunction(),
1368 MF.getFunction(), DAG.getTarget(), Params, Results);
1369 for (MVT VT : Results)
1371 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1372 // the param logic here with ComputeSignatureVTs
1373 assert(MFI->getParams().size() == Params.size() &&
1374 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1380 void WebAssemblyTargetLowering::ReplaceNodeResults(
1381 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
1382 switch (N->getOpcode()) {
1383 case ISD::SIGN_EXTEND_INREG:
1384 // Do not add any results, signifying that N should not be custom lowered
1385 // after all. This happens because simd128 turns on custom lowering for
1386 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1389 case ISD::SIGN_EXTEND_VECTOR_INREG:
1390 case ISD::ZERO_EXTEND_VECTOR_INREG:
1391 // Do not add any results, signifying that N should not be custom lowered.
1392 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
1396 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1400 //===----------------------------------------------------------------------===//
1401 // Custom lowering hooks.
1402 //===----------------------------------------------------------------------===//
1404 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1405 SelectionDAG &DAG) const {
1407 switch (Op.getOpcode()) {
1409 llvm_unreachable("unimplemented operation lowering");
1411 case ISD::FrameIndex:
1412 return LowerFrameIndex(Op, DAG);
1413 case ISD::GlobalAddress:
1414 return LowerGlobalAddress(Op, DAG);
1415 case ISD::GlobalTLSAddress:
1416 return LowerGlobalTLSAddress(Op, DAG);
1417 case ISD::ExternalSymbol:
1418 return LowerExternalSymbol(Op, DAG);
1419 case ISD::JumpTable:
1420 return LowerJumpTable(Op, DAG);
1422 return LowerBR_JT(Op, DAG);
1424 return LowerVASTART(Op, DAG);
1425 case ISD::BlockAddress:
1427 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1429 case ISD::RETURNADDR:
1430 return LowerRETURNADDR(Op, DAG);
1431 case ISD::FRAMEADDR:
1432 return LowerFRAMEADDR(Op, DAG);
1433 case ISD::CopyToReg:
1434 return LowerCopyToReg(Op, DAG);
1435 case ISD::EXTRACT_VECTOR_ELT:
1436 case ISD::INSERT_VECTOR_ELT:
1437 return LowerAccessVectorElement(Op, DAG);
1438 case ISD::INTRINSIC_VOID:
1439 case ISD::INTRINSIC_WO_CHAIN:
1440 case ISD::INTRINSIC_W_CHAIN:
1441 return LowerIntrinsic(Op, DAG);
1442 case ISD::SIGN_EXTEND_INREG:
1443 return LowerSIGN_EXTEND_INREG(Op, DAG);
1444 case ISD::ZERO_EXTEND_VECTOR_INREG:
1445 case ISD::SIGN_EXTEND_VECTOR_INREG:
1446 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1447 case ISD::BUILD_VECTOR:
1448 return LowerBUILD_VECTOR(Op, DAG);
1449 case ISD::VECTOR_SHUFFLE:
1450 return LowerVECTOR_SHUFFLE(Op, DAG);
1452 return LowerSETCC(Op, DAG);
1456 return LowerShift(Op, DAG);
1457 case ISD::FP_TO_SINT_SAT:
1458 case ISD::FP_TO_UINT_SAT:
1459 return LowerFP_TO_INT_SAT(Op, DAG);
1461 return LowerLoad(Op, DAG);
1463 return LowerStore(Op, DAG);
1467 return DAG.UnrollVectorOp(Op.getNode());
1471 static bool IsWebAssemblyGlobal(SDValue Op) {
1472 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1473 return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
1478 static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
1479 SelectionDAG &DAG) {
1480 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1482 return std::nullopt;
1484 auto &MF = DAG.getMachineFunction();
1485 return WebAssemblyFrameLowering::getLocalForStackObject(MF, FI->getIndex());
1488 SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1489 SelectionDAG &DAG) const {
1491 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1492 const SDValue &Value = SN->getValue();
1493 const SDValue &Base = SN->getBasePtr();
1494 const SDValue &Offset = SN->getOffset();
1496 if (IsWebAssemblyGlobal(Base)) {
1497 if (!Offset->isUndef())
1498 report_fatal_error("unexpected offset when storing to webassembly global",
1501 SDVTList Tys = DAG.getVTList(MVT::Other);
1502 SDValue Ops[] = {SN->getChain(), Value, Base};
1503 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1504 SN->getMemoryVT(), SN->getMemOperand());
1507 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1508 if (!Offset->isUndef())
1509 report_fatal_error("unexpected offset when storing to webassembly local",
1512 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1513 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1514 SDValue Ops[] = {SN->getChain(), Idx, Value};
1515 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1518 if (WebAssembly::isWasmVarAddressSpace(SN->getAddressSpace()))
1520 "Encountered an unlowerable store to the wasm_var address space",
1526 SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1527 SelectionDAG &DAG) const {
1529 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1530 const SDValue &Base = LN->getBasePtr();
1531 const SDValue &Offset = LN->getOffset();
1533 if (IsWebAssemblyGlobal(Base)) {
1534 if (!Offset->isUndef())
1536 "unexpected offset when loading from webassembly global", false);
1538 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1539 SDValue Ops[] = {LN->getChain(), Base};
1540 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1541 LN->getMemoryVT(), LN->getMemOperand());
1544 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1545 if (!Offset->isUndef())
1547 "unexpected offset when loading from webassembly local", false);
1549 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1550 EVT LocalVT = LN->getValueType(0);
1551 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1552 {LN->getChain(), Idx});
1553 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1554 assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1558 if (WebAssembly::isWasmVarAddressSpace(LN->getAddressSpace()))
1560 "Encountered an unlowerable load from the wasm_var address space",
1566 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1567 SelectionDAG &DAG) const {
1568 SDValue Src = Op.getOperand(2);
1569 if (isa<FrameIndexSDNode>(Src.getNode())) {
1570 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1571 // the FI to some LEA-like instruction, but since we don't have that, we
1572 // need to insert some kind of instruction that can take an FI operand and
1573 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1574 // local.copy between Op and its FI operand.
1575 SDValue Chain = Op.getOperand(0);
1577 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1578 EVT VT = Src.getValueType();
1579 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1580 : WebAssembly::COPY_I64,
1583 return Op.getNode()->getNumValues() == 1
1584 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1585 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1586 Op.getNumOperands() == 4 ? Op.getOperand(3)
1592 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1593 SelectionDAG &DAG) const {
1594 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1595 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1598 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1599 SelectionDAG &DAG) const {
1602 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1604 "Non-Emscripten WebAssembly hasn't implemented "
1605 "__builtin_return_address");
1609 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1612 unsigned Depth = Op.getConstantOperandVal(0);
1613 MakeLibCallOptions CallOptions;
1614 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1615 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1619 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1620 SelectionDAG &DAG) const {
1621 // Non-zero depths are not supported by WebAssembly currently. Use the
1622 // legalizer's default expansion, which is to return 0 (what this function is
1623 // documented to do).
1624 if (Op.getConstantOperandVal(0) > 0)
1627 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1628 EVT VT = Op.getValueType();
1630 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1631 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1635 WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1636 SelectionDAG &DAG) const {
1638 const auto *GA = cast<GlobalAddressSDNode>(Op);
1640 MachineFunction &MF = DAG.getMachineFunction();
1641 if (!MF.getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
1642 report_fatal_error("cannot use thread-local storage without bulk memory",
1645 const GlobalValue *GV = GA->getGlobal();
1647 // Currently only Emscripten supports dynamic linking with threads. Therefore,
1648 // on other targets, if we have thread-local storage, only the local-exec
1649 // model is possible.
1650 auto model = Subtarget->getTargetTriple().isOSEmscripten()
1651 ? GV->getThreadLocalMode()
1652 : GlobalValue::LocalExecTLSModel;
1654 // Unsupported TLS modes
1655 assert(model != GlobalValue::NotThreadLocal);
1656 assert(model != GlobalValue::InitialExecTLSModel);
1658 if (model == GlobalValue::LocalExecTLSModel ||
1659 model == GlobalValue::LocalDynamicTLSModel ||
1660 (model == GlobalValue::GeneralDynamicTLSModel &&
1661 getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))) {
1662 // For DSO-local TLS variables we use offset from __tls_base
1664 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1665 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1666 : WebAssembly::GLOBAL_GET_I32;
1667 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1670 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1671 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1674 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1675 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1677 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
1679 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
1682 assert(model == GlobalValue::GeneralDynamicTLSModel);
1684 EVT VT = Op.getValueType();
1685 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1686 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1688 WebAssemblyII::MO_GOT_TLS));
1691 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1692 SelectionDAG &DAG) const {
1694 const auto *GA = cast<GlobalAddressSDNode>(Op);
1695 EVT VT = Op.getValueType();
1696 assert(GA->getTargetFlags() == 0 &&
1697 "Unexpected target flags on generic GlobalAddressSDNode");
1698 if (!WebAssembly::isValidAddressSpace(GA->getAddressSpace()))
1699 fail(DL, DAG, "Invalid address space for WebAssembly target");
1701 unsigned OperandFlags = 0;
1702 if (isPositionIndependent()) {
1703 const GlobalValue *GV = GA->getGlobal();
1704 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1705 MachineFunction &MF = DAG.getMachineFunction();
1706 MVT PtrVT = getPointerTy(MF.getDataLayout());
1707 const char *BaseName;
1708 if (GV->getValueType()->isFunctionTy()) {
1709 BaseName = MF.createExternalSymbolName("__table_base");
1710 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1712 BaseName = MF.createExternalSymbolName("__memory_base");
1713 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1716 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1717 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1719 SDValue SymAddr = DAG.getNode(
1720 WebAssemblyISD::WrapperREL, DL, VT,
1721 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1724 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1726 OperandFlags = WebAssemblyII::MO_GOT;
1729 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1730 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1731 GA->getOffset(), OperandFlags));
1735 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1736 SelectionDAG &DAG) const {
1738 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1739 EVT VT = Op.getValueType();
1740 assert(ES->getTargetFlags() == 0 &&
1741 "Unexpected target flags on generic ExternalSymbolSDNode");
1742 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1743 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1746 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1747 SelectionDAG &DAG) const {
1748 // There's no need for a Wrapper node because we always incorporate a jump
1749 // table operand into a BR_TABLE instruction, rather than ever
1750 // materializing it in a register.
1751 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1752 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1753 JT->getTargetFlags());
1756 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1757 SelectionDAG &DAG) const {
1759 SDValue Chain = Op.getOperand(0);
1760 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1761 SDValue Index = Op.getOperand(2);
1762 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1764 SmallVector<SDValue, 8> Ops;
1765 Ops.push_back(Chain);
1766 Ops.push_back(Index);
1768 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1769 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1771 // Add an operand for each case.
1772 for (auto *MBB : MBBs)
1773 Ops.push_back(DAG.getBasicBlock(MBB));
1775 // Add the first MBB as a dummy default target for now. This will be replaced
1776 // with the proper default target (and the preceding range check eliminated)
1777 // if possible by WebAssemblyFixBrTableDefaults.
1778 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
1779 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1782 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1783 SelectionDAG &DAG) const {
1785 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1787 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1788 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1790 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1791 MFI->getVarargBufferVreg(), PtrVT);
1792 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1793 MachinePointerInfo(SV));
1796 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1797 SelectionDAG &DAG) const {
1798 MachineFunction &MF = DAG.getMachineFunction();
1800 switch (Op.getOpcode()) {
1801 case ISD::INTRINSIC_VOID:
1802 case ISD::INTRINSIC_W_CHAIN:
1803 IntNo = Op.getConstantOperandVal(1);
1805 case ISD::INTRINSIC_WO_CHAIN:
1806 IntNo = Op.getConstantOperandVal(0);
1809 llvm_unreachable("Invalid intrinsic");
1815 return SDValue(); // Don't custom lower most intrinsics.
1817 case Intrinsic::wasm_lsda: {
1818 auto PtrVT = getPointerTy(MF.getDataLayout());
1819 const char *SymName = MF.createExternalSymbolName(
1820 "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
1821 if (isPositionIndependent()) {
1822 SDValue Node = DAG.getTargetExternalSymbol(
1823 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
1824 const char *BaseName = MF.createExternalSymbolName("__memory_base");
1826 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1827 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1829 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
1830 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
1832 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
1833 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
1836 case Intrinsic::wasm_shuffle: {
1837 // Drop in-chain and replace undefs, but otherwise pass through unchanged
1840 Ops[OpIdx++] = Op.getOperand(1);
1841 Ops[OpIdx++] = Op.getOperand(2);
1842 while (OpIdx < 18) {
1843 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
1844 if (MaskIdx.isUndef() ||
1845 cast<ConstantSDNode>(MaskIdx.getNode())->getZExtValue() >= 32) {
1846 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
1847 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
1849 Ops[OpIdx++] = MaskIdx;
1852 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1858 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1859 SelectionDAG &DAG) const {
1861 // If sign extension operations are disabled, allow sext_inreg only if operand
1862 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
1863 // extension operations, but allowing sext_inreg in this context lets us have
1864 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
1865 // everywhere would be simpler in this file, but would necessitate large and
1866 // brittle patterns to undo the expansion and select extract_lane_s
1868 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1869 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1872 const SDValue &Extract = Op.getOperand(0);
1873 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1874 if (VecT.getVectorElementType().getSizeInBits() > 32)
1876 MVT ExtractedLaneT =
1877 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
1879 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1880 if (ExtractedVecT == VecT)
1883 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1884 const SDNode *Index = Extract.getOperand(1).getNode();
1885 if (!isa<ConstantSDNode>(Index))
1887 unsigned IndexVal = cast<ConstantSDNode>(Index)->getZExtValue();
1889 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1892 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
1893 SDValue NewExtract = DAG.getNode(
1894 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1895 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1896 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
1901 WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
1902 SelectionDAG &DAG) const {
1904 EVT VT = Op.getValueType();
1905 SDValue Src = Op.getOperand(0);
1906 EVT SrcVT = Src.getValueType();
1908 if (SrcVT.getVectorElementType() == MVT::i1 ||
1909 SrcVT.getVectorElementType() == MVT::i64)
1912 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
1913 "Unexpected extension factor.");
1914 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
1916 if (Scale != 2 && Scale != 4 && Scale != 8)
1920 switch (Op.getOpcode()) {
1921 case ISD::ZERO_EXTEND_VECTOR_INREG:
1922 Ext = WebAssemblyISD::EXTEND_LOW_U;
1924 case ISD::SIGN_EXTEND_VECTOR_INREG:
1925 Ext = WebAssemblyISD::EXTEND_LOW_S;
1930 while (Scale != 1) {
1931 Ret = DAG.getNode(Ext, DL,
1933 .widenIntegerVectorElementType(*DAG.getContext())
1934 .getHalfNumVectorElementsVT(*DAG.getContext()),
1938 assert(Ret.getValueType() == VT);
1942 static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG) {
1944 if (Op.getValueType() != MVT::v2f64)
1947 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
1948 unsigned &Index) -> bool {
1949 switch (Op.getOpcode()) {
1950 case ISD::SINT_TO_FP:
1951 Opcode = WebAssemblyISD::CONVERT_LOW_S;
1953 case ISD::UINT_TO_FP:
1954 Opcode = WebAssemblyISD::CONVERT_LOW_U;
1956 case ISD::FP_EXTEND:
1957 Opcode = WebAssemblyISD::PROMOTE_LOW;
1963 auto ExtractVector = Op.getOperand(0);
1964 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1967 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
1970 SrcVec = ExtractVector.getOperand(0);
1971 Index = ExtractVector.getConstantOperandVal(1);
1975 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
1976 SDValue LHSSrcVec, RHSSrcVec;
1977 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
1978 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
1981 if (LHSOpcode != RHSOpcode)
1985 switch (LHSOpcode) {
1986 case WebAssemblyISD::CONVERT_LOW_S:
1987 case WebAssemblyISD::CONVERT_LOW_U:
1988 ExpectedSrcVT = MVT::v4i32;
1990 case WebAssemblyISD::PROMOTE_LOW:
1991 ExpectedSrcVT = MVT::v4f32;
1994 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
1997 auto Src = LHSSrcVec;
1998 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
1999 // Shuffle the source vector so that the converted lanes are the low lanes.
2000 Src = DAG.getVectorShuffle(
2001 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
2002 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2004 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
2007 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2008 SelectionDAG &DAG) const {
2009 if (auto ConvertLow = LowerConvertLow(Op, DAG))
2013 const EVT VecT = Op.getValueType();
2014 const EVT LaneT = Op.getOperand(0).getValueType();
2015 const size_t Lanes = Op.getNumOperands();
2016 bool CanSwizzle = VecT == MVT::v16i8;
2018 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2019 // possible number of lanes at once followed by a sequence of replace_lane
2020 // instructions to individually initialize any remaining lanes.
2022 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2023 // swizzled lanes should be given greater weight.
2025 // TODO: Investigate looping rather than always extracting/replacing specific
2026 // lanes to fill gaps.
2028 auto IsConstant = [](const SDValue &V) {
2029 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
2032 // Returns the source vector and index vector pair if they exist. Checks for:
2033 // (extract_vector_elt
2035 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2037 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2038 auto Bail = std::make_pair(SDValue(), SDValue());
2039 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2041 const SDValue &SwizzleSrc = Lane->getOperand(0);
2042 const SDValue &IndexExt = Lane->getOperand(1);
2043 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2045 const SDValue &Index = IndexExt->getOperand(0);
2046 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2048 const SDValue &SwizzleIndices = Index->getOperand(0);
2049 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2050 SwizzleIndices.getValueType() != MVT::v16i8 ||
2051 Index->getOperand(1)->getOpcode() != ISD::Constant ||
2052 Index->getConstantOperandVal(1) != I)
2054 return std::make_pair(SwizzleSrc, SwizzleIndices);
2057 // If the lane is extracted from another vector at a constant index, return
2058 // that vector. The source vector must not have more lanes than the dest
2059 // because the shufflevector indices are in terms of the destination lanes and
2060 // would not be able to address the smaller individual source lanes.
2061 auto GetShuffleSrc = [&](const SDValue &Lane) {
2062 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2064 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2066 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2067 VecT.getVectorNumElements())
2069 return Lane->getOperand(0);
2072 using ValueEntry = std::pair<SDValue, size_t>;
2073 SmallVector<ValueEntry, 16> SplatValueCounts;
2075 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2076 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2078 using ShuffleEntry = std::pair<SDValue, size_t>;
2079 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2081 auto AddCount = [](auto &Counts, const auto &Val) {
2083 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2084 if (CountIt == Counts.end()) {
2085 Counts.emplace_back(Val, 1);
2091 auto GetMostCommon = [](auto &Counts) {
2093 std::max_element(Counts.begin(), Counts.end(), llvm::less_second());
2094 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2098 size_t NumConstantLanes = 0;
2100 // Count eligible lanes for each type of vector creation op
2101 for (size_t I = 0; I < Lanes; ++I) {
2102 const SDValue &Lane = Op->getOperand(I);
2106 AddCount(SplatValueCounts, Lane);
2108 if (IsConstant(Lane))
2110 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2111 AddCount(ShuffleCounts, ShuffleSrc);
2113 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2114 if (SwizzleSrcs.first)
2115 AddCount(SwizzleCounts, SwizzleSrcs);
2120 size_t NumSplatLanes;
2121 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2124 SDValue SwizzleIndices;
2125 size_t NumSwizzleLanes = 0;
2126 if (SwizzleCounts.size())
2127 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2128 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2130 // Shuffles can draw from up to two vectors, so find the two most common
2132 SDValue ShuffleSrc1, ShuffleSrc2;
2133 size_t NumShuffleLanes = 0;
2134 if (ShuffleCounts.size()) {
2135 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2136 llvm::erase_if(ShuffleCounts,
2137 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2139 if (ShuffleCounts.size()) {
2140 size_t AdditionalShuffleLanes;
2141 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2142 GetMostCommon(ShuffleCounts);
2143 NumShuffleLanes += AdditionalShuffleLanes;
2146 // Predicate returning true if the lane is properly initialized by the
2147 // original instruction
2148 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2150 // Prefer swizzles over shuffles over vector consts over splats
2151 if (NumSwizzleLanes >= NumShuffleLanes &&
2152 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2153 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2155 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2156 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2157 return Swizzled == GetSwizzleSrcs(I, Lane);
2159 } else if (NumShuffleLanes >= NumConstantLanes &&
2160 NumShuffleLanes >= NumSplatLanes) {
2161 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2162 size_t DestLaneCount = VecT.getVectorNumElements();
2165 SDValue Src1 = ShuffleSrc1;
2166 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2167 if (Src1.getValueType() != VecT) {
2169 Src1.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2170 assert(LaneSize > DestLaneSize);
2171 Scale1 = LaneSize / DestLaneSize;
2172 Src1 = DAG.getBitcast(VecT, Src1);
2174 if (Src2.getValueType() != VecT) {
2176 Src2.getValueType().getVectorElementType().getFixedSizeInBits() / 8;
2177 assert(LaneSize > DestLaneSize);
2178 Scale2 = LaneSize / DestLaneSize;
2179 Src2 = DAG.getBitcast(VecT, Src2);
2183 assert(DestLaneCount <= 16);
2184 for (size_t I = 0; I < DestLaneCount; ++I) {
2185 const SDValue &Lane = Op->getOperand(I);
2186 SDValue Src = GetShuffleSrc(Lane);
2187 if (Src == ShuffleSrc1) {
2188 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2189 } else if (Src && Src == ShuffleSrc2) {
2190 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2195 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2196 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2197 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2198 auto Src = GetShuffleSrc(Lane);
2199 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2201 } else if (NumConstantLanes >= NumSplatLanes) {
2202 SmallVector<SDValue, 16> ConstLanes;
2203 for (const SDValue &Lane : Op->op_values()) {
2204 if (IsConstant(Lane)) {
2205 // Values may need to be fixed so that they will sign extend to be
2206 // within the expected range during ISel. Check whether the value is in
2207 // bounds based on the lane bit width and if it is out of bounds, lop
2208 // off the extra bits and subtract 2^n to reflect giving the high bit
2209 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
2210 // cannot possibly be out of range.
2211 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
2212 int64_t Val = Const ? Const->getSExtValue() : 0;
2213 uint64_t LaneBits = 128 / Lanes;
2214 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2215 "Unexpected out of bounds negative value");
2216 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2217 uint64_t Mask = (1ll << LaneBits) - 1;
2218 auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask;
2219 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
2221 ConstLanes.push_back(Lane);
2223 } else if (LaneT.isFloatingPoint()) {
2224 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2226 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2229 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2230 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2231 return IsConstant(Lane);
2234 // Use a splat (which might be selected as a load splat)
2235 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2236 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2237 return Lane == SplatValue;
2242 assert(IsLaneConstructed);
2244 // Add replace_lane instructions for any unhandled values
2245 for (size_t I = 0; I < Lanes; ++I) {
2246 const SDValue &Lane = Op->getOperand(I);
2247 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2248 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2249 DAG.getConstant(I, DL, MVT::i32));
2256 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2257 SelectionDAG &DAG) const {
2259 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2260 MVT VecType = Op.getOperand(0).getSimpleValueType();
2261 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2262 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2264 // Space for two vector args and sixteen mask indices
2267 Ops[OpIdx++] = Op.getOperand(0);
2268 Ops[OpIdx++] = Op.getOperand(1);
2270 // Expand mask indices to byte indices and materialize them as operands
2271 for (int M : Mask) {
2272 for (size_t J = 0; J < LaneBytes; ++J) {
2273 // Lower undefs (represented by -1 in mask) to {0..J}, which use a
2274 // whole lane of vector input, to allow further reduction at VM. E.g.
2275 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
2276 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2277 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2281 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2284 SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2285 SelectionDAG &DAG) const {
2287 // The legalizer does not know how to expand the unsupported comparison modes
2288 // of i64x2 vectors, so we manually unroll them here.
2289 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2290 SmallVector<SDValue, 2> LHS, RHS;
2291 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2292 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2293 const SDValue &CC = Op->getOperand(2);
2294 auto MakeLane = [&](unsigned I) {
2295 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2296 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2297 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2299 return DAG.getBuildVector(Op->getValueType(0), DL,
2300 {MakeLane(0), MakeLane(1)});
2304 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2305 SelectionDAG &DAG) const {
2306 // Allow constant lane indices, expand variable lane indices
2307 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2308 if (isa<ConstantSDNode>(IdxNode)) {
2309 // Ensure the index type is i32 to match the tablegen patterns
2310 uint64_t Idx = cast<ConstantSDNode>(IdxNode)->getZExtValue();
2311 SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
2312 Ops[Op.getNumOperands() - 1] =
2313 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
2314 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
2316 // Perform default expansion
2320 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
2321 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2322 // 32-bit and 64-bit unrolled shifts will have proper semantics
2323 if (LaneT.bitsGE(MVT::i32))
2324 return DAG.UnrollVectorOp(Op.getNode());
2325 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2327 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2328 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2329 unsigned ShiftOpcode = Op.getOpcode();
2330 SmallVector<SDValue, 16> ShiftedElements;
2331 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2332 SmallVector<SDValue, 16> ShiftElements;
2333 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2334 SmallVector<SDValue, 16> UnrolledOps;
2335 for (size_t i = 0; i < NumLanes; ++i) {
2336 SDValue MaskedShiftValue =
2337 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2338 SDValue ShiftedValue = ShiftedElements[i];
2339 if (ShiftOpcode == ISD::SRA)
2340 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2341 ShiftedValue, DAG.getValueType(LaneT));
2342 UnrolledOps.push_back(
2343 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2345 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2348 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2349 SelectionDAG &DAG) const {
2352 // Only manually lower vector shifts
2353 assert(Op.getSimpleValueType().isVector());
2355 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2356 auto ShiftVal = Op.getOperand(1);
2358 // Try to skip bitmask operation since it is implied inside shift instruction
2359 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2360 if (MaskOp.getOpcode() != ISD::AND)
2362 SDValue LHS = MaskOp.getOperand(0);
2363 SDValue RHS = MaskOp.getOperand(1);
2364 if (MaskOp.getValueType().isVector()) {
2366 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal))
2367 std::swap(LHS, RHS);
2369 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) &&
2370 MaskVal == MaskBits)
2373 if (!isa<ConstantSDNode>(RHS.getNode()))
2374 std::swap(LHS, RHS);
2376 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode());
2377 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2384 // Skip vector and operation
2385 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2386 ShiftVal = DAG.getSplatValue(ShiftVal);
2388 return unrollVectorShift(Op, DAG);
2390 // Skip scalar and operation
2391 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2392 // Use anyext because none of the high bits can affect the shift
2393 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2396 switch (Op.getOpcode()) {
2398 Opcode = WebAssemblyISD::VEC_SHL;
2401 Opcode = WebAssemblyISD::VEC_SHR_S;
2404 Opcode = WebAssemblyISD::VEC_SHR_U;
2407 llvm_unreachable("unexpected opcode");
2410 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2413 SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2414 SelectionDAG &DAG) const {
2416 EVT ResT = Op.getValueType();
2417 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2419 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2420 (SatVT == MVT::i32 || SatVT == MVT::i64))
2423 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2429 //===----------------------------------------------------------------------===//
2430 // Custom DAG combine hooks
2431 //===----------------------------------------------------------------------===//
2433 performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2434 auto &DAG = DCI.DAG;
2435 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2437 // Hoist vector bitcasts that don't change the number of lanes out of unary
2438 // shuffles, where they are less likely to get in the way of other combines.
2439 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2440 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2441 SDValue Bitcast = N->getOperand(0);
2442 if (Bitcast.getOpcode() != ISD::BITCAST)
2444 if (!N->getOperand(1).isUndef())
2446 SDValue CastOp = Bitcast.getOperand(0);
2447 MVT SrcType = CastOp.getSimpleValueType();
2448 MVT DstType = Bitcast.getSimpleValueType();
2449 if (!SrcType.is128BitVector() ||
2450 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2452 SDValue NewShuffle = DAG.getVectorShuffle(
2453 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2454 return DAG.getBitcast(DstType, NewShuffle);
2457 /// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
2458 /// split up into scalar instructions during legalization, and the vector
2459 /// extending instructions are selected in performVectorExtendCombine below.
2461 performVectorExtendToFPCombine(SDNode *N,
2462 TargetLowering::DAGCombinerInfo &DCI) {
2463 auto &DAG = DCI.DAG;
2464 assert(N->getOpcode() == ISD::UINT_TO_FP ||
2465 N->getOpcode() == ISD::SINT_TO_FP);
2467 EVT InVT = N->getOperand(0)->getValueType(0);
2468 EVT ResVT = N->getValueType(0);
2470 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2472 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2478 N->getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
2479 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0));
2480 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
2484 performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2485 auto &DAG = DCI.DAG;
2486 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2487 N->getOpcode() == ISD::ZERO_EXTEND);
2489 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2490 // possible before the extract_subvector can be expanded.
2491 auto Extract = N->getOperand(0);
2492 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2494 auto Source = Extract.getOperand(0);
2495 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2496 if (IndexNode == nullptr)
2498 auto Index = IndexNode->getZExtValue();
2500 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2501 // extracted subvector is the low or high half of its source.
2502 EVT ResVT = N->getValueType(0);
2503 if (ResVT == MVT::v8i16) {
2504 if (Extract.getValueType() != MVT::v8i8 ||
2505 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2507 } else if (ResVT == MVT::v4i32) {
2508 if (Extract.getValueType() != MVT::v4i16 ||
2509 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2511 } else if (ResVT == MVT::v2i64) {
2512 if (Extract.getValueType() != MVT::v2i32 ||
2513 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2519 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2520 bool IsLow = Index == 0;
2522 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2523 : WebAssemblyISD::EXTEND_HIGH_S)
2524 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2525 : WebAssemblyISD::EXTEND_HIGH_U);
2527 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2531 performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
2532 auto &DAG = DCI.DAG;
2534 auto GetWasmConversionOp = [](unsigned Op) {
2536 case ISD::FP_TO_SINT_SAT:
2537 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2538 case ISD::FP_TO_UINT_SAT:
2539 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2541 return WebAssemblyISD::DEMOTE_ZERO;
2543 llvm_unreachable("unexpected op");
2546 auto IsZeroSplat = [](SDValue SplatVal) {
2547 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2548 APInt SplatValue, SplatUndef;
2549 unsigned SplatBitSize;
2552 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2557 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2560 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2562 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2566 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2568 // into (f32x4.demote_zero_f64x2 $x).
2570 EVT ExpectedConversionType;
2571 auto Conversion = N->getOperand(0);
2572 auto ConversionOp = Conversion.getOpcode();
2573 switch (ConversionOp) {
2574 case ISD::FP_TO_SINT_SAT:
2575 case ISD::FP_TO_UINT_SAT:
2577 ExpectedConversionType = MVT::v2i32;
2581 ExpectedConversionType = MVT::v2f32;
2587 if (N->getValueType(0) != ResVT)
2590 if (Conversion.getValueType() != ExpectedConversionType)
2593 auto Source = Conversion.getOperand(0);
2594 if (Source.getValueType() != MVT::v2f64)
2597 if (!IsZeroSplat(N->getOperand(1)) ||
2598 N->getOperand(1).getValueType() != ExpectedConversionType)
2601 unsigned Op = GetWasmConversionOp(ConversionOp);
2602 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2607 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2609 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2613 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2615 // into (f32x4.demote_zero_f64x2 $x).
2617 auto ConversionOp = N->getOpcode();
2618 switch (ConversionOp) {
2619 case ISD::FP_TO_SINT_SAT:
2620 case ISD::FP_TO_UINT_SAT:
2627 llvm_unreachable("unexpected op");
2630 if (N->getValueType(0) != ResVT)
2633 auto Concat = N->getOperand(0);
2634 if (Concat.getValueType() != MVT::v4f64)
2637 auto Source = Concat.getOperand(0);
2638 if (Source.getValueType() != MVT::v2f64)
2641 if (!IsZeroSplat(Concat.getOperand(1)) ||
2642 Concat.getOperand(1).getValueType() != MVT::v2f64)
2645 unsigned Op = GetWasmConversionOp(ConversionOp);
2646 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2649 // Helper to extract VectorWidth bits from Vec, starting from IdxVal.
2650 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
2651 const SDLoc &DL, unsigned VectorWidth) {
2652 EVT VT = Vec.getValueType();
2653 EVT ElVT = VT.getVectorElementType();
2654 unsigned Factor = VT.getSizeInBits() / VectorWidth;
2655 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
2656 VT.getVectorNumElements() / Factor);
2658 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
2659 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
2660 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
2662 // This is the index of the first element of the VectorWidth-bit chunk
2663 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
2664 IdxVal &= ~(ElemsPerChunk - 1);
2666 // If the input is a buildvector just emit a smaller one.
2667 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
2668 return DAG.getBuildVector(ResultVT, DL,
2669 Vec->ops().slice(IdxVal, ElemsPerChunk));
2671 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL);
2672 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx);
2675 // Helper to recursively truncate vector elements in half with NARROW_U. DstVT
2676 // is the expected destination value type after recursion. In is the initial
2677 // input. Note that the input should have enough leading zero bits to prevent
2678 // NARROW_U from saturating results.
2679 static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL,
2680 SelectionDAG &DAG) {
2681 EVT SrcVT = In.getValueType();
2683 // No truncation required, we might get here due to recursive calls.
2687 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
2688 unsigned NumElems = SrcVT.getVectorNumElements();
2689 if (!isPowerOf2_32(NumElems))
2691 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
2692 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
2694 LLVMContext &Ctx = *DAG.getContext();
2695 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
2697 // Narrow to the largest type possible:
2698 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
2699 EVT InVT = MVT::i16, OutVT = MVT::i8;
2700 if (SrcVT.getScalarSizeInBits() > 16) {
2704 unsigned SubSizeInBits = SrcSizeInBits / 2;
2705 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
2706 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
2708 // Split lower/upper subvectors.
2709 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits);
2710 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits);
2712 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
2713 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
2714 Lo = DAG.getBitcast(InVT, Lo);
2715 Hi = DAG.getBitcast(InVT, Hi);
2716 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi);
2717 return DAG.getBitcast(DstVT, Res);
2720 // Recursively narrow lower/upper subvectors, concat result and narrow again.
2721 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
2722 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG);
2723 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG);
2725 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
2726 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
2727 return truncateVectorWithNARROW(DstVT, Res, DL, DAG);
2730 static SDValue performTruncateCombine(SDNode *N,
2731 TargetLowering::DAGCombinerInfo &DCI) {
2732 auto &DAG = DCI.DAG;
2734 SDValue In = N->getOperand(0);
2735 EVT InVT = In.getValueType();
2736 if (!InVT.isSimple())
2739 EVT OutVT = N->getValueType(0);
2740 if (!OutVT.isVector())
2743 EVT OutSVT = OutVT.getVectorElementType();
2744 EVT InSVT = InVT.getVectorElementType();
2745 // Currently only cover truncate to v16i8 or v8i16.
2746 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
2747 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
2751 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
2752 OutVT.getScalarSizeInBits());
2753 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
2754 return truncateVectorWithNARROW(OutVT, In, DL, DAG);
2757 static SDValue performBitcastCombine(SDNode *N,
2758 TargetLowering::DAGCombinerInfo &DCI) {
2759 auto &DAG = DCI.DAG;
2761 SDValue Src = N->getOperand(0);
2762 EVT VT = N->getValueType(0);
2763 EVT SrcVT = Src.getValueType();
2765 // bitcast <N x i1> to iN
2767 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
2768 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) {
2769 unsigned NumElts = SrcVT.getVectorNumElements();
2770 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2772 EVT Width = MVT::getIntegerVT(128 / NumElts);
2773 return DAG.getZExtOrTrunc(
2774 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
2775 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
2776 DAG.getSExtOrTrunc(N->getOperand(0), DL,
2777 SrcVT.changeVectorElementType(Width))}),
2784 static SDValue performSETCCCombine(SDNode *N,
2785 TargetLowering::DAGCombinerInfo &DCI) {
2786 auto &DAG = DCI.DAG;
2788 SDValue LHS = N->getOperand(0);
2789 SDValue RHS = N->getOperand(1);
2790 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
2792 EVT VT = N->getValueType(0);
2794 // setcc (iN (bitcast (vNi1 X))), 0, ne
2795 // ==> any_true (vNi1 X)
2796 // setcc (iN (bitcast (vNi1 X))), 0, eq
2797 // ==> xor (any_true (vNi1 X)), -1
2798 // setcc (iN (bitcast (vNi1 X))), -1, eq
2799 // ==> all_true (vNi1 X)
2800 // setcc (iN (bitcast (vNi1 X))), -1, ne
2801 // ==> xor (all_true (vNi1 X)), -1
2802 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
2803 (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
2804 (isNullConstant(RHS) || isAllOnesConstant(RHS)) &&
2805 LHS->getOpcode() == ISD::BITCAST) {
2806 EVT FromVT = LHS->getOperand(0).getValueType();
2807 if (FromVT.isFixedLengthVector() &&
2808 FromVT.getVectorElementType() == MVT::i1) {
2809 int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue
2810 : Intrinsic::wasm_alltrue;
2811 unsigned NumElts = FromVT.getVectorNumElements();
2812 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
2814 EVT Width = MVT::getIntegerVT(128 / NumElts);
2815 SDValue Ret = DAG.getZExtOrTrunc(
2817 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
2818 {DAG.getConstant(Intrin, DL, MVT::i32),
2819 DAG.getSExtOrTrunc(LHS->getOperand(0), DL,
2820 FromVT.changeVectorElementType(Width))}),
2822 if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) ||
2823 (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) {
2824 Ret = DAG.getNOT(DL, Ret, MVT::i1);
2826 return DAG.getZExtOrTrunc(Ret, DL, VT);
2834 WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
2835 DAGCombinerInfo &DCI) const {
2836 switch (N->getOpcode()) {
2840 return performBitcastCombine(N, DCI);
2842 return performSETCCCombine(N, DCI);
2843 case ISD::VECTOR_SHUFFLE:
2844 return performVECTOR_SHUFFLECombine(N, DCI);
2845 case ISD::SIGN_EXTEND:
2846 case ISD::ZERO_EXTEND:
2847 return performVectorExtendCombine(N, DCI);
2848 case ISD::UINT_TO_FP:
2849 case ISD::SINT_TO_FP:
2850 return performVectorExtendToFPCombine(N, DCI);
2851 case ISD::FP_TO_SINT_SAT:
2852 case ISD::FP_TO_UINT_SAT:
2854 case ISD::CONCAT_VECTORS:
2855 return performVectorTruncZeroCombine(N, DCI);
2857 return performTruncateCombine(N, DCI);